content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def _spherical_harmonics_l0(xyz): """Compute the l=0 Spherical Harmonics Args: xyz : array (Nbatch,Nelec,Nrbf,Ndim) x,y,z, of (Point - Center) Returns Y00 = 1/2 \sqrt(1 / \pi) """ return 0.2820948 * torch.ones_like(xyz[..., 0])
83831d0a140d85dc356ae04bcbaeeda74c5a9fee
12,884
def index_all(elm, lst): """return list[int] all positions where elm appears in lst. Empty list if not found""" if type(list)!=list: lst=list(lst) return [i for i,e in enumerate(lst) if e==elm]
0db325714d6c8de5b1a1fbb516287c56931cf0a9
12,886
def get_lines(file_path): """ get all lines in file """ with open(file_path, encoding='utf-8') as data_file: lines = map(lambda line: line.strip(), data_file.readlines()) return lines
caf42286bc985c609076cb31432242552483a661
12,887
def summary_table(params, proteins): """ Returns a string representing a simple summary table of protein classifcations. """ out = "" counts = {} for seqid in proteins: category = proteins[seqid]['category'] if category not in counts: counts[category] = 1 else: counts[category] += 1 out += "\n\n# Number of proteins in each class:\n" pse_total = 0 for c in counts: if "PSE-" in c: pse_total += counts[c] counts["PSE(total)"] = pse_total for c in sorted(counts): if "PSE-" in c: spacer = " " pse_total += counts[c] else: spacer = "" out += "# %s%-15s\t%i\n" % (spacer, c, counts[c]) # out += "#\n# PSE(total)\t\t%i\n" % (pse_total) return out
1fa3f16f799964bf9f1c0f1c593020902ab2219f
12,894
def on_board(i, j): """Return True if the location is on board >>> on_board(0,0) True >>> on_board(-1,17) False """ return 0 <= i < 8 and 0 <= j < 8
107f9e614c965b29c0c872738fd4ea5407971a29
12,896
def multiplicative_inverse(e, phi): """ Euclid's extended algorithm for finding the multiplicative inverse of two numbers """ d, next_d, temp_phi = 0, 1, phi while e > 0: quotient = temp_phi // e d, next_d = next_d, d - quotient * next_d temp_phi, e = e, temp_phi - quotient * e if temp_phi > 1: raise ValueError('e is not invertible by modulo phi.') if d < 0: d += phi return d
9934f6e2f86ff0ef4165728d59f11ba0d1cad928
12,897
import random def generate_token(length=64): """Generate a random token. Args: length (int): Number of characters in the token. Returns: Random character sequence. """ _allowed = 'abcdefghijklmnoprstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+-_$' token = ''.join(random.SystemRandom().choice(_allowed) for i in range(length)) return token
b1cc37216ed43e8d1d3c86e9817e87ab9833bfdd
12,903
from datetime import datetime def calc_max_uptime(reboots): """Parse the passed in reboots output, extracting the datetimes. Calculate the highest uptime between reboots = highest diff between extracted reboot datetimes. Return a tuple of this max uptime in days (int) and the date (str) this record was hit. For the output above it would be (30, '2019-02-17'), but we use different outputs in the tests as well ... """ timestamps = [] for line in reboots.splitlines()[1:]: timestamps.append(datetime.strptime(line[-16:], "%a %b %d %H:%M")) timestamps = sorted([i.replace(year=2020) for i in timestamps]) timedeltas = [] for i in range(1, len(timestamps)): to_append = ((timestamps[i]-timestamps[i-1]), timestamps[i]) timedeltas.append(to_append) sorted_stamps = sorted(timedeltas, key=lambda x: x[0], reverse=True) #print(sorted_stamps) to_return = max(timedeltas) actual_return = (int(to_return[0].days), str(to_return[1].date())) return actual_return pass
6f7d02bc17c16d4185e566646a70abe840934af3
12,904
import string def get_number(s, cast=int): """ Try to get a number out of a string, and cast it. """ d = "".join(x for x in str(s) if x in string.digits) return cast(d) if d else s
f788bfd13cc7234ca5f290ed047ec9b8bf8acc9b
12,905
def create_avg_stoplines_df(stoplines_df_name): """Create an aggregated stoplines_avg_df with the average stopline X, Y for each link and number of lanes""" stopline_avg_df = stoplines_df_name.groupby('Link_ID')['stopline_X'].mean().reset_index(name='mean_X') stopline_avg_df['mean_Y'] = stoplines_df_name.groupby('Link_ID')['stopline_Y'].mean().reset_index(name='mean_Y').iloc[:,1] stopline_avg_df['n_lanes'] = stoplines_df_name.groupby('Link_ID')['Lane'].count().reset_index().iloc[:,1] stopline_avg_df['link_direction'] = stoplines_df_name.groupby('Link_ID')['Link_direction'].first().reset_index().iloc[:,1] #print(stopline_avg_df.head()) return stopline_avg_df
284d22d121cb495b95e61da4c45c107b9cfd7a24
12,911
def create_grid(dataset: list) -> str: """ Create a table grid of the github users Args: dataset (list): The dataset to create the table from, in this case, the list of github users. Returns (str): table grid of github users """ row_length = 7 html_card = '' for i in range(0, len(dataset), row_length): html_card += '<tr>' for element in dataset[i:i+row_length]: html_card += f'\n<td>{element} </td>\n' html_card += '</tr>\n' return html_card
8cd9efaa01ece44ae06400e1e14456f80b2137fa
12,912
def get_runtime(job): """Returns the runtime in milliseconds or None if job is still running""" if job.metadata.get("finished_time") is not None: finished_time = job.metadata.get("finished_time") else: return None start_time = job.metadata.get("scrapystats")["start_time"] return float(finished_time - start_time)
710d9d135ae7097bb85b0c3e8ea88d142da593e2
12,914
def get_residue_ranges(numbers): """Given a list of integers, creates a list of ranges with the consecutive numbers found in the list. Parameters ---------- numbers: list A list of integers Returns ------ list A list with the ranges of consecutive numbers found in the list """ nums = sorted(set(numbers)) gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 3 < e] edges = iter(nums[:1] + sum(gaps, []) + nums[-1:]) return list(zip(edges, edges))
0f263ee265574e64bec655ec2b5f35fdef2cf8af
12,916
def instance_get_host(instance): """ Retrieve the host the instance is located on """ return instance['OS-EXT-SRV-ATTR:host']
0fef8cdf9e2ba7ac26e8a1ef2b2a1786cbe73d9d
12,917
import csv def loadcsv(filename): """ Reads an input CSV file. Args: filename (str): input file path. Returns: List containing all rows from the CSV file without headers. """ with open(filename, "r", encoding="utf-8") as f: return list(filter(None, list(csv.reader(f))[1:]))
442d0fdf7bcc160e98c83d7c848ec9477cf757fe
12,919
from typing import Dict import re def apply_list_replace(input_str: str, replacements: Dict[str, str]) -> str: """ Apply a series of replacement on the input. :param input_str: the string to be modified :param replacements: a Dict regex -> replacement. Each item will be passed to re.sub() :return: the modified string """ temp = input_str if isinstance(replacements, dict): for replacement in replacements.items(): temp = re.sub(replacement[0], replacement[1], temp) return temp
287e1a7763e7f56719adf566358c62156bcf668c
12,920
from typing import Union def serialize_attribute_value(value: Union[str, int, float, bool, None]): """ Serialize a value to be stored in a Magento attribute. """ if isinstance(value, bool): return "1" if value else "0" elif value is None: return "" return str(value)
a9d5b4f6d507672b594eb1c88d4d86c9bfc6bc11
12,925
def mk_uni_port_num(intf_id, onu_id): """ Create a unique virtual UNI port number based up on PON and ONU ID :param intf_id: :param onu_id: (int) ONU ID (0..max) :return: (int) UNI Port number """ return intf_id << 11 | onu_id << 4
3d348b9d6dc40f54d6f3667a8a5310a3d52b3d5d
12,926
from pathlib import Path def find_files(base_root: Path): """ Search the given base directory for the actual dataset root. This makes it a little easier for the dataset manager :param base_root: :return: """ # These are folders we expect within the dataset folder structure. If we hit them, we've gone too far excluded_folders = {'depth', 'rgb', '__MACOSX'} to_search = {Path(base_root)} while len(to_search) > 0: candidate_root = to_search.pop() # Make the derivative paths we're looking for. All of these must exist. rgb_path = candidate_root / 'rgb.txt' trajectory_path = candidate_root / 'groundtruth.txt' depth_path = candidate_root / 'depth.txt' # If all the required files are present, return that root and the file paths. if rgb_path.is_file() and trajectory_path.is_file() and depth_path.is_file(): return candidate_root, rgb_path, depth_path, trajectory_path # This was not the directory we were looking for, search the subdirectories for child_path in candidate_root.iterdir(): if child_path.is_dir() and child_path.name not in excluded_folders: to_search.add(child_path) # Could not find the necessary files to import, raise an exception. raise FileNotFoundError("Could not find a valid root directory within '{0}'".format(base_root))
d54ab18fb1bc7b49a7bf486b11d020a5d17e3cdb
12,927
def summult(list1, list2): """ Multiplies elements in list1 and list2, element by element, and returns the sum of all resulting multiplications. Must provide equal length lists. Usage: lsummult(list1,list2) """ if len(list1) != len(list2): raise ValueError("Lists not equal length in summult.") s = 0 for item1, item2 in zip(list1, list2): s = s + item1 * item2 return s
2b1c4543867998c8edf372c2388df13ce07df910
12,928
def fun_dfun(obj, space, d): """ Computes the posterior predictive and posterior predictive gradients of the provided GPyOpt object. Parameters ---------- obj: GPyOpt object The GPyOpt object with a surrogate probabilistic model. space: GPyOpt space A GPyOpt object that contains information about the design domain. d: np.ndarray Proposed design. """ mask = space.indicator_constraints(d) pred = obj.model.predict_withGradients(d)[0][0][0] d_pred = obj.model.predict_withGradients(d)[2][0] return float(pred * mask), d_pred * mask
14c39d96dc810d8078ff2dd602fc6216214e8d3f
12,935
def partition_opm_license_list(df): """This function partitions the OPM Granted Licenses list by license type, storing the result in a Python dictionary. Parameters ---------- df (DataFrame): OPM Granted Licenses list Returns ------- license_list_dict (dict): OPM Granted Licenses list partitioned by license type """ license_list_dict = {} license_list_dict['PBI'] = df[ ~df['Power BI'].isna() ] license_list_dict['P1'] = df[ ~df['Essentials License (Project Plan Essential)'].isna() ] license_list_dict['P3'] = df[ ~df['Professional (Project Plan 3)'].isna() ] license_list_dict['P5'] = df[ ~df['Premium (Project Plan 5)'].isna() ] return license_list_dict
4f9bab49385e23732e5e56e3f0dbf8762eb05438
12,937
def get_upload_commands(system, release, package): """Returns the required package_cloud commands to upload this package""" repos = ["datawireio/stable", "datawireio/telepresence"] res = [] for repo in repos: res.append( "package_cloud push {}/{}/{} {}".format( repo, system, release, package ) ) return res
db3fccb3302657367be121f4bb64affcd4879180
12,939
import math def step_decay(epoch: int): """ Learning rate scheduler. Parameters ----------- epoch: int Number of epochs to perform Returns -------- float Learning rate """ initial_lrate = 1e-3 drop = 0.5 epochs_drop = 50.0 lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop)) return lrate
80af442b6ce0c7b454969896b7ac41efdf63eaf6
12,945
def _internal_is_knet_ascii(buf): """ Checks if the file is a valid K-NET/KiK-net ASCII file. :param buf: File to read. :type buf: Open file or open file like object. """ first_string = buf.read(11).decode() # File has less than 11 characters if len(first_string) != 11: return False if first_string == 'Origin Time': return True return False
9ca251ef9c5eab64a97f69e1acd8b6aa746ea09b
12,946
def getInviteAcceptedRedirect(entity, _): """Returns the redirect for accepting an invite. """ return '/%s/accept_invite/%s/%s' % ( entity.role, entity.scope_path, entity.link_id)
39affb109481cb96bb4ca992911e6a6a1957962a
12,950
def format_month(month): """Formats a month to first 3 characters of the month input Args: month: user input month Returns: A ValueError if the input is not a month, or a 3 character month. """ months = ['Jan','Feb','Mar','Apr','May','Jun', 'Jul','Aug','Sep','Oct','Nov','Dec'] if (month.isdigit()): if(len(month) > 2): raise ValueError else: month = int(month) if((month > 12) | (month <= 0)): raise ValueError return months[month - 1] elif not(month.istitle() | month.islower()| month.isupper()): raise ValueError elif(month.capitalize() in months): return month.capitalize() else: raise ValueError
da7ffd8bc801377ecebcc76e972219633ae21566
12,951
def get_data(fn, split=False, split_char=None, filter_blanks=False): """ :param fn: filename to open :param split: if you want to split the data read :param split_char: character you want to split the data on :param filter_blanks: remove empty strings if split=True Example: >>>data = get_data('file.txt', split=True, split_char=",") >>>print(data) [1, 2, 3, 4] """ with open(fn, encoding='utf-8') as f: data = f.read() if split: if split_char: data = data.split(split_char) if filter_blanks: data = [s.strip() for s in data if s.strip() != ''] return data
6ff198281d41f96fb48c45c51525245cffe8e7ec
12,952
def maybe_add(d, exclude_false=False, **kws): """ Adds keywork argumnts to a dict if their values are not None. Parameters ---------- d: dict The dictionary to add to. exclude_false: bool Exclue keys whose values are false. kws: dict The keys to maybe add """ for k, v in kws.items(): if v is not None: d[k] = v return d
bc7bc09261f37fe052d30d6b0c2452937f33c563
12,955
import shutil def cut_paste(src_path, dst_path): """Cut paste functionality just copied from shutil. Works on files or dirs. Args: src_path (string): Source path to be cut dst_path (string): Destination path to paste to """ shutil.move(src_path, dst_path) return True
295eee15f8e31f510333967a45c683ff99c25efb
12,960
def get_cache_key_counter(bound_method, *args, **kwargs): """ Return the cache, key and stat counter for the given call. """ model = bound_method.__self__ ormcache = bound_method.clear_cache.__self__ cache, key0, counter = ormcache.lru(model) key = key0 + ormcache.key(model, *args, **kwargs) return cache, key, counter
241b13b29b3dce0888f2eeb40361dfa03d5f8389
12,964
def extract(mod, models): """ Returns models with mod removed. """ if mod == {}: # No mod to delete, return models as it is. return models return [model for model in models if model != mod]
5aa24ccaa238fe85f4f037c8af318e95a77b7b55
12,965
def _is_no_cache(request): """Returns True if the request should skip the cache.""" cache_control = request.headers.get('Cache-Control') or '' return 'no-cache' in cache_control or 'max-age=0' in cache_control
bf8421a3f9a654a877cdf518aa9a4de532098f89
12,967
def get_or_else(data, key, default_value = None): """ Tries to get a value from data with key. Returns default_value in case of key not found. """ if not data: return default_value try: return data[key] except: return default_value
53bc769a3331684c46127b08976fd62ca1ab47df
12,969
import math def pformat_bar( value, width=40, prefix="", vmin=0., vmax=1., border=True, fill=' ', reverse=False): """Return a progressbar-like str representation of value. Parameters ========== value : float Value to be represented. width: int Bar width (in character). prefix: string Text to be prepend to the bar. vmin : float Minimum value. vmax : float Maximum value. """ # This code is based on https://gist.github.com/rougier/c0d31f5cbdaac27b876c # noqa: E501 # The original license: # ----------------------------------------------------------------------------- # Copyright (c) 2016, Nicolas P. Rougier # Distributed under the (new) BSD License. # ----------------------------------------------------------------------------- # Block progression is 1/8 if reverse: # blocks = ["", "▐", "β–ˆ"] blocks = ' β–β–‚β–ƒβ–„β–…β–†β–‡β–ˆ' else: blocks = ["", "▏", "β–Ž", "▍", "β–Œ", "β–‹", "β–Š", "β–‰", "β–ˆ"] vmin = vmin or 0.0 vmax = vmax or 1.0 if border: lsep, rsep = "▏", "β–•" else: lsep, rsep = " ", " " # Normalize value value = min(max(value, vmin), vmax) value = (value - vmin) / (vmax - vmin) v = value * width x = math.floor(v) # integer part y = v - x # fractional part i = int(round(y * (len(blocks) - 1))) bar = "β–ˆ" * x barfrac = blocks[i] n = width - x - 1 nobar = fill * n if reverse: bar = f'{lsep}{nobar}{barfrac}{bar}{rsep}' else: bar = f'{lsep}{bar}{barfrac}{nobar}{rsep}' return bar
6b12a5bb2bd4d051224b2f5960a0378b0593b2f7
12,973
import csv def process_csv(csv_file): """Turn the contents of the CSV file into a list of lists""" print("Processing {}".format(csv_file)) with open(csv_file, "r") as datafile: data = list(csv.reader(datafile)) return data
87603c21d09332cc1e1d25a3de6074f984f95426
12,975
import re def new_exposures(out): """Scan rsync output for exposures to be transferred. Parameters ---------- out : :class:`str` Output from :command:`rsync`. Returns ------- :class:`set` The unique exposure numbers detected in `out`. """ e = set() e_re = re.compile(r'([0-9]{8})/?') for l in out.split('\n'): m = e_re.match(l) if m is not None: e.add(m.groups()[0]) return e
8fc8817fe0ad79c177473ec676853569d733ec65
12,976
def midpoint(pair1, pair2): """find and return the midpoint between the two given points""" x = (pair1[0] + pair2[0])/2 y = (pair1[1] + pair2[1])/2 return x, y
760aca99b1dad002fb8b6f483515f222dee77160
12,982
def separate_answers(albert_text, cls='[CLS]', sep='[SEP]'): """ Separates the sentences of sequence classification used for bert :param bert_text: list of bert word tokens :param cls: string of cls token :param sep: string of sep token :return: separated strings """ # Fix SPIECE_underline cls_idx = albert_text.index(cls) + 4 sep_1_idx = albert_text.index(sep) + 4 ans1 = albert_text[cls_idx + 1:sep_1_idx - 4] ans2 = albert_text[sep_1_idx + 1:albert_text.index(sep, sep_1_idx + 1)] return ans1, ans2
66a9b1e5bcd2c096187db12f31b6e66336552f22
12,984
def find_allowed_size(nx_size): """ Finds the next largest "allowed size" for the Fried Phase Screen method Parameters: nx_size (int): Requested size Returns: int: Next allowed size """ n = 0 while (2 ** n + 1) < nx_size: n += 1 nx_size = 2 ** n + 1 return nx_size
b7d53ba805ed3c4f543bbb3ff0a94056b76f9738
12,988
def expandRectangle(rect, xfactor=3, yfactor=3): """ Takes a (x,y,w,h) rectangle tuple and returns a new bounding rectangle that is centered on the center of the origin rectangle, but has a width/height that is larger by a given factor. The returned coordinates are rounded to integers """ x, y, w, h = rect # Horizontal expansion x -= ((xfactor - 1) / 2) * w w *= xfactor # Horizontal expansion y -= ((yfactor - 1) / 2) * h h *= yfactor return (int(round(x)), int(round(y)), int(round(w)), int(round(h)))
dbc37c87f7fe69c846a3089ab0543e4810ee3c21
12,997
import re def baby_names_table_rows_from_babyfile_string(babyfile_string): """ babyfile_string sample excerpt with lines of html <tr align="right"><td>1</td><td>Michael</td><td>Jessica</td> <tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td> <tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td> return list of dictionaries row = {'rank' : rank, 'name_boy' : name_boy, 'name_girl' : name_girl} """ print('baby_names_table_rows_from_babyfile_string') table_rows = [] # findall with regular expression with () groups returns a list of tuples. baby_tuples = re.findall(r'<tr align="right"><td>(\d+)</td><td>(\w+)</td><td>(\w+)</td>', babyfile_string) for baby_tuple in baby_tuples: rank = baby_tuple[0] name_boy = baby_tuple[1] name_girl = baby_tuple[2] row = {'rank' : rank, 'name_boy' : name_boy, 'name_girl' : name_girl} table_rows.append(row) #print(table_rows) return table_rows
a0bb831ee908e9fae0c125b2452977afc23058ce
12,998
import json def error_response(title, status='500'): """Generates an error response to return to API Gateway Parameters ---------- title: string The title description of the error. status: string Thes http status code. Returns ------- dict: An response back to the api gateway with the error. """ return { 'statusCode': '500', 'body': json.dumps({ 'errors': [{ 'status': status, 'title': title }] })}
16a62ec46396bee5fe3b240febe4bd0ac2cf9de9
13,005
def create_plotly_blob(data_list, xlabel, ylabel, title): """ Create plotly line plot object, useful for jupyter plots or generation of interactive html plots E.G. import plotly.graph_objects as go blob = create_plotly_blob([(xdata1, ydata1, label1, True), (xdata2, ydata2, label2, False)], 'x', 'y', 'title') fig = go.Figure(blob) fig.show() 4 element tuples in data_list must follow (xdata, ydata, label, visible): xdata: 1d array of x-axis data ydata: 1d array of y-axis data label: str label description visible: bool, if False plot is only given in legend but not turned on :param data_list: list of 4 element tuples (xdata, ydata, label, visible) :param xlabel: str x-axis label :param ylabel: str y-axis label :param title: str plot title :return: dict """ # Convert title title = title.replace('\n', '<br>') # Create json blob auto_blob = { 'data': [], 'layout': {'font': {'family': 'Courier New, monospace', 'size': 18}, 'legend': {'title': {'text': 'Scannables'}}, 'title': {'text': title}, 'xaxis': {'title': {'text': xlabel}}, 'yaxis': {'title': {'text': ylabel}}} } for item in data_list: if not item[3]: vis = 'legendonly' else: vis = True trace = { 'mode': 'markers+lines', 'name': item[2], 'type': 'scatter', 'visible': vis, 'x': list(item[0]), 'y': list(item[1]), } auto_blob['data'] += [trace] return auto_blob
a19ece2ba0a1fc6a1db6a167b266d1b3099927c1
13,006
def get_indices_of_A_in_B(A, B): """Return the set of indices into B of the elements in A that occur in B Parameters ---------- A : list The "needles" B : list The "haystack" Returns ------- list Indices into B of elements in A occuring in B """ s = set(B) return [i for i, e in enumerate(A) if e in s]
88948acd14d7979f7e5ce067776c847e1d0b2a24
13,010
def get_usrid_from_league(league): """ get user id from a league and put them into a list :param league: LeagueDto: an object contains league information :return: usrid """ entries = league['entries'] usrid = [] for entry in entries: usrid.append(entry['playerOrTeamId']) usrid = list(set(usrid)) return usrid
ce91f7f1f0afcc064b8f59c90721b60be47ab4b9
13,016
import string def apply_object_attributes_to_template(template, value_object): """Generate a string from the template by applying values from the given object. If the template provided is not a template (not have any placeholders), this will not have any effect and template will be returned unchanged. If value_object is None, this will not have any effect. Arguments: template -- A string that may or may not be templated. If templated, the placeholders will be populated with values from the attributes of the value_object. value_object -- Any object that supports the __dict__ method. Most classes have a __dict__ method that return a mapping of all the attributes and the associated values. Returns: string -- This will be the template, with the values from value_object applied. """ # Parse the template and extract the field names. # We'll use the field names to explicitly look-up attributes in the value_object. # The reason for this is that it works for @property attributes as well as normal attributes. field_names = [field_name for _, field_name, _, _ in string.Formatter().parse(template) if field_name is not None] template_values = {} for field_name in field_names: try: template_values[field_name] = getattr(value_object, field_name) except AttributeError as e: raise AttributeError(('Unable to apply object to template. Could not look-up attribute \'{}\' in the ' 'object \'{}\'. Error: {}').format(field_name, str(value_object), str(e))) return template.format(**template_values)
266085845a4e4283d9ffa897b4b0b7d2f8669001
13,019
def urlconf(patterns, do_not_autogenerate=True): """ A custom url configuration for this action, just like in Django's urls.py. The custom urlconf applies after the urlconf for the controller, unless erase is true. Example: `["/user/(?P<user_id>\d+)/"]` :param patterns: a url pattern or a list of url patterns :param do_not_autogenerate: erase the urlconf that was automatically generated """ if type(patterns) not in (list, tuple): patterns = tuple(patterns) def decorator(action_function): action_function.urlconf = patterns action_function.urlconf_erase = do_not_autogenerate return action_function return decorator
1d6f8c9e840979cbaab54112b3203eebebed5f82
13,022
def note2ratio(note, cents=0): """ Converts semitones to a frequency ratio. """ ratio = 2 ** ((note + cents / 100) / 12) return ratio
1c78697d3978d122d8be39e406bc8be8b7684f9d
13,023
def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, default_value=None, options_to_print=None): """ Prompt the user a message with optionally some options. :param message: the message to show to the user :param validator: a function that predicates if the input is correct :param input_to_option: a function that given the input transforms it in something else :param default_value: the value to return as the default if the user doesn't insert anything :param options_to_print: the options to print if necessary :return: the value inserted by the user validated """ if options_to_print: print("Allowed values for {0}:".format(message)) for item in options_to_print: print(item) user_prompt = "{0} [{1}]: ".format(message, default_value if default_value is not None else "") valid_user_input = False result = default_value # Give the user the possibility to try again if wrong while not valid_user_input: user_input = input(user_prompt).strip() or default_value result = input_to_option(user_input) if validator(result): valid_user_input = True else: print("ERROR: {0} is not an acceptable value for {1}".format(user_input, message)) return result
3dcb9ab47330dfb0ea52b8c9704d22b02b245174
13,027
import re def get_pull_no(ref): """ Get pull request from a git given ref >>> get_pull_no('refs/pull/12345/head') 12345 >>> get_pull_no('refs/pull/6789/merge') 6789 """ match = re.search('refs/pull/([0-9]+)/', ref) if match: return int(match[1]) raise ValueError("Unable to get pull request number from ref {}" .format(ref))
dbd4ca5f241c735976f992765c1e8b507dab53a0
13,030
import math def total_sample_splits_categorical(no_of_values): """ Compute total number of sample splits that can generated by categoricals. Parameters ---------- no_of_values : Int. Returns ------- no_of_splits: Int. """ no_of_splits = 0 for i in range(1, no_of_values): no_of_splits += math.factorial(no_of_values) / ( math.factorial(no_of_values-i) * math.factorial(i)) return no_of_splits/2
705a565998c5cde4a37370e6787aa0f07d973987
13,035
def normalize_commit(commit, **kwargs): """ This takes commits either in the JSON format provided by a GitHub webhook, or the object format provided by github3.py, and returns a normalized Python dict. """ if isinstance(commit, dict): # If GitHub webhook payload: sender = kwargs.get("sender", {}) avatar_url = "" if sender["avatar_url"] and sender["login"] == commit["author"]["username"]: avatar_url = sender["avatar_url"] return { "id": commit["id"], "timestamp": commit["timestamp"], "author": { "name": commit["author"]["name"], "email": commit["author"]["email"], "username": commit["author"]["username"], "avatar_url": avatar_url, }, "message": commit["message"], "url": commit["url"], } else: # If github3.py object: return { "id": commit.sha, "timestamp": commit.commit.author.get("date", ""), "author": { "name": commit.commit.author.get("name", ""), "email": commit.commit.author.get("email", ""), "username": commit.author.login if commit.author else "", "avatar_url": commit.author.avatar_url if commit.author else "", }, "message": commit.message, "url": commit.html_url, }
7728c9d9f42929fce8193580ef374d1e55e4df7f
13,037
def concat_drive_path(dest_path: str, end_point: str, default_folder: str = 'mydrive') -> str: """ Generate file path :param dest_path: parent path :param end_point: file_name or folder_name :param default_folder: if dest_path is None, use your drive home folder instead :return: """ if dest_path is None: display_path = f"/{default_folder}/{end_point}" elif dest_path.startswith('id'): display_path = f"{dest_path}/folder_name" else: # add start position / dest_path = f"/{dest_path}" if not dest_path.startswith('/') else dest_path # add end position / dest_path = f"{dest_path}/" if not dest_path.endswith('/') else dest_path display_path = f"{dest_path}{end_point}" return display_path
09dd261e825379604c0c7546b3a6a68d1b35505f
13,042
def temp_gradient(bottom_hole_temperature, surface_temperature, bottom_hole_depth): """ Temperature gradient calculation. Parameters ---------- bottom_hole_temperature : float Bottom hole temperature (deg F or deg C) surface_temperature : float Surface temperature (deg F or deg C) bottom_hole_depth : float Bottom hole depth (ft or m) Returns ------- float Returns temperature gradient in deg per depth unit (degF/ft or deg C/m) """ gradient = (bottom_hole_temperature - surface_temperature) / bottom_hole_depth return gradient
f99a9215fd63a8ef564a1fc91de75ea146091712
13,043
def cross2D(v1,v2): """calculates the scalar cross product magnitude of two 2D vectors, v1 and v2""" return v1[0]*v2[1] - v1[1]*v2[0]
6bbe95ac483b349cda500a0c5045181035b46314
13,045
def qualified_name_to_object(qualified_name: str, default_module_name='builtins'): """ Convert a fully qualified name into a Python object. It is true that ``qualified_name_to_object(object_to_qualified_name(obj)) is obj``. >>> qualified_name_to_object('unittest.TestCase') <class 'unittest.case.TestCase'> See also :py:func:`object_to_qualified_name`. :param qualified_name: fully qualified name of the form [<module>'.'{<name>'.'}]<name> :param default_module_name: default module name to be used if the name does not contain one :return: the Python object :raise ImportError: If the module could not be imported :raise AttributeError: If the name could not be found """ parts = qualified_name.split('.') if len(parts) == 1: module_name = default_module_name else: module_name = parts[0] parts = parts[1:] value = __import__(module_name) for name in parts: value = getattr(value, name) return value
f8758a80aaa4196aa559b18a5850840ba4bbd69b
13,046
def build_url(hub_base_url: str, org: str, workspace: str) -> str: """ Build the base URL of the Chaos Hub workspace where the experiment and the journal will be stored and be made visible. """ return '/'.join([hub_base_url, 'api', org, workspace])
116b9b7f6211ca50b48550689b7e161cee0b4d9d
13,053
from typing import Callable from typing import Any import functools def agent_must_be_ready(f: Callable[..., Any]) -> Callable[..., Any]: """Any Agent method decorated with this will raise if the agent is not ready.""" @functools.wraps(f) def decorated(self, *args: Any, **kwargs: Any) -> Any: if not self.is_ready(): raise Exception( "Agent needs to be prepared before usage. You need to set a " "processor and a tracker store." ) return f(self, *args, **kwargs) return decorated
bec74702355d6aaa7796527a05d9510048513910
13,057
def calc_exec_time(block): """ calculate the total time to execute all observing blocks by exp. time. Excludes any read out time and overheads. :param block: The observing Block document. :type block: Dict :rtype: int """ if "parameters" not in block: return 0 exp1 = 0 exp2 = 0 sci_blk = block["parameters"] if sci_blk.keys() >= {"det1_exptime", "det1_nexp"}: if sci_blk['det1_exptime'] and sci_blk['det1_nexp']: exp1 = sci_blk['det1_exptime'] * sci_blk['det1_nexp'] if sci_blk.keys() >= {"det1_exptime", "det2_exptime", "det1_nexp", "det2_nexp"}: if sci_blk['det2_exptime'] and sci_blk['det2_nexp']: exp2 = sci_blk['det2_exptime'] * sci_blk['det2_nexp'] return max(exp1, exp2)
deaf6649173de784a77601da4ef4df1b7c10cb93
13,058
def _parse_instructors(details): """ Extract instructor names from the course detail page Args: details(Tag): BeautifulSoup Tag for course details Returns: list of dict: List of first & last names of each instructor """ try: instructors = details.findAll( "div", {"class": "field--name-field-learn-more-links"} )[-1].findAll("div", {"class": "field__item"}) return [ instructor.get_text().strip().split(",", 1)[0].split(" ", 1) for instructor in instructors ] except (AttributeError, IndexError): return []
3dc4f4ddf62dc9dae894d218e249a279941eca5f
13,060
def useful_person_for_subjects( person_main_topics: list, scopus_topics: list, k_relevant: int = 1) -> bool: """ Get possibility status use person for subject Args: person_main_topics: scopus_topics: topics list in ScopusScienceTopicSearch class format k_relevant: count of reviewed topics Returns: Status """ if len(scopus_topics) == 0 or len(person_main_topics) == 0: return False for topic in scopus_topics[:k_relevant]: if topic.split(',')[0] in person_main_topics: return True return False
df567a755dc6a5f4df57ed3654f407fb27655cd5
13,069
def format_line_obj(line_obj, ordered_attributes, delimiter): """Formats an object into delimited string If attribute not found, a ':(' will be inserted instead """ return str(delimiter).join((str(line_obj.__dict__.get(attr, ':(')) for attr in ordered_attributes))
be7da1fa6bd080047bdd235d332b72e770dcfa48
13,073
import struct def parse_images(filename): """ Reading images information and converting from bytearray to list of ints with help of python's struct module. """ images = [] imgs_file = open(filename, 'rb') # Get only the size of following data size = struct.unpack(">IIII", imgs_file.read(16))[1] for _ in range(size): # Read whole image pixel and unpack it from unsigned bytes to integers barray = imgs_file.read(784) img = list(struct.unpack("<" + "B"*784, barray)) images.append(img) return images
bf17dded918051d52cc807f01aed350d94897077
13,076
def matmul_cupydense(left, right, scale=1, out=None): """ Perform the operation ``out := scale * (left @ right) + out`` where `left`, `right` and `out` are matrices. `scale` is a complex scalar, defaulting to 1. """ # This may be done more naturally with gemm from cupy.cublas # but the GPU timings which are not very different. if out is None: return (left @ right) * scale else: out += (left @ right) * scale return out
244a956221054b7fa330100badf8ec5e15e20f8a
13,079
def either(*funcs): """Return ``True`` is any of the function evaluate true""" return lambda x: any(f(x) for f in funcs)
32f037748a1fb4cf06e5fdce7c1bad20e8200d61
13,082
def split(matrix): """ Splits a given matrix into quarters. Input: nxn matrix Output: tuple containing 4 n/2 x n/2 matrices corresponding to a, b, c, d """ row, col = matrix.shape row2, col2 = row//2, col//2 return matrix[:row2, :col2], matrix[:row2, col2:], matrix[row2:, :col2], matrix[row2:, col2:]
6284c4a8ff6747b005971a7a37f0327aa5815a7c
13,084
import re def normalize_tokenize(string): """ Takes a string, normalizes it (makes it lowercase and removes punctuation), and then splits it into a list of words. Note that everything in this function is plain Python without using NLTK (although as noted below, NLTK provides some more sophisticated tokenizers we could have used). """ # make lowercase norm = string.lower() # remove punctuation norm = re.sub(r'(?u)[^\w\s]', '', norm) # split into words tokens = norm.split() return tokens
800be8deabbc109f7d9459e5cf2da9fb134557ae
13,086
def _IsDisallowedPropertiesError(error): """Checks if an error is due to properties that were not in the schema. Args: error: A ValidationError Returns: Whether the error was due to disallowed properties """ prop_validator = 'additionalProperties' prop_message = 'Additional properties are not allowed' return error.validator == prop_validator and prop_message in error.message
2042806486e861cf97d0e2ec168a167610749ebd
13,087
def notqdm(iterable, *args, **kwargs): """ silent replacement for `tqdm` """ return iterable
f37a5f84cf02987e4ada0ba8c0ee324c7a676903
13,091
def slice2limits(slices): """ Create a tuple of min,max limits from a set of slices Parameters: * slices: list of slices Returns: Tuple of minimum and maximum indices """ mins = [s.start for s in slices] maxs = [s.stop-1 for s in slices] return mins,maxs
1196e67fea135fb3c363def9d5fe53a0ada9b0af
13,097
def solution(array, n_rotations): """ Returns the Cyclic Rotation of array with n_rotations positions to the right. """ n = len(array) n_rotations = n_rotations % n if n > 0 else 0 return array[n - n_rotations:] + array[:n - n_rotations]
6f5edc0c8ec3d0e58830466e61577e80a725a624
13,098
import yaml import json def load_data(file_path, use_yaml=True): """Load data from a given YAML file into a Python dictionary. :param file_path: path to your YAML file. :param use_yaml: use yaml or json :return: contents of the file as Python dict. """ with open(file_path, "r") as f: if use_yaml: data = yaml.safe_load(f) else: data = json.load(f) return data
b136e16cd614cfdb3ca791a6d64d513cf52dbf7c
13,100
import random def get_best_move(board, scores): """ Function that computes the best move for the machine given the actual board state """ max_score = max(scores[row][col] for row, col in board.get_empty_squares()) candidates = [(row, col) for row, col in board.get_empty_squares() if scores[row][col] == max_score] return random.choice(candidates)
afabd281ad1547b118cab10ccc912bdf4956f9c4
13,104
import pathlib import json def load_json(filepath): """ Loads the metrics in a dictionary. :param log_dir: The directory in which the log is saved :param metrics_file_name: The name of the metrics file :return: A dict with the metrics """ if isinstance(filepath, pathlib.Path): filepath = str(filepath) if ".json" not in filepath: filepath = f"{filepath}.json" with open(filepath, "rb") as json_file: metrics_dict = json.loads(json_file.read()) return metrics_dict
1dd8ff7822228bc0173a66df626902a723338d55
13,109
from typing import Callable import random def choose(option1: Callable = lambda: None, option2: Callable = lambda: None): """ Randomly run either option 1 or option 2 :param option1: a possible function to run :param option2: another possible function to run :return: the result of the function """ if random.getrandbits(1): return option1() else: return option2()
0f0ecbc945de9f6d5698cd86103265eccf5708e6
13,110
def transform_response_to_context_format(data: dict, keys: list) -> dict: """ Transform API response data to suitable XSOAR context data. Remove 'x-ms' prefix and replace '-' to '_' for more readable and conventional variables. Args: data (dict): Data to exchange. keys (list): Keys to filter. Returns: dict: Processed data. """ return {key.replace('x-ms-', '').replace('-', '_').lower(): value for key, value in data.items() if key in keys}
34f1a613654deb71581bcd33757b8741840ae44f
13,111
import torch def one_hot(y, K, smooth_eps = 0): # pylint: disable=invalid-name """One-hot encodes a tensor with optional label smoothing. Args: y: A tensor containing the ground-truth labels of shape (N,), i.e. one label for each element in the batch. K: The number of classes. smooth_eps: Label smoothing factor in [0, 1] range. Returns: A one-hot encoded tensor. """ assert 0 <= smooth_eps <= 1 assert y.ndim == 1, "Label tensor must be rank 1." y_hot = torch.eye(K)[y] * (1 - smooth_eps) + (smooth_eps / (K - 1)) return y_hot.to(y.device)
ee47f9c778d875834c49c098ded4936edb104887
13,113
def import_name(modulename, name=None): """ Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``modulename``. Returns: object: Requested object. """ if name is None: modulename, name = modulename.rsplit(':', 1) module = __import__(modulename, globals(), {}, [name]) return getattr(module, name)
a320063e878db935f8a2409c7487617e2c9f1802
13,117
import json def _get_needed_packages(json_file, test=None): """ Returns a dict with needed packages based on a JSON file. If a test is specified it will return the dict just for that test. """ needed_packages = {} with open(json_file) as f: test_packages = json.load(f) for key,value in test_packages.items(): needed_packages[key] = value if test: if test in needed_packages: needed_packages = needed_packages[test] else: needed_packages = {} return needed_packages
b929e57beda05372b03936598db275e87a318962
13,126
def get_factory_log_recipients(entry): """Read the log recipients specified by the Factory in its configuration Args: entry: dict-like object representing the entry configuration Returns: list: list contaning the URLs of the log servers, empty if none present """ entr_attrs = entry.get_child_list("attrs") for attr in entr_attrs: if attr["name"] == "LOG_RECIPIENTS_FACTORY": return attr["value"].split() return []
f3e776ea9b8102247b5c7e817762523467d9f953
13,129
def calc_num_pixels(num_pixels, stride): """ Converts the current number of pixels to the number there will be given a specific stride. """ return 1 + (num_pixels - 1) // stride
b6ae056339913c496017251709381c19f551a074
13,134
def rewrite_arcs (label_map, nfa): """Rewrite the label arcs in a NFA according to the input remapping.""" states = [[(label_map[label], tostate) for (label, tostate) in arcs] for arcs in nfa[2]] return (nfa[0], nfa[1], states, nfa[3], nfa[4])
2bd9911a5c65ce7711848746614a3a6ceb37f8d2
13,141
import copy def merge_params(base_params, partial_params=None): """Merge a partial change to the base configuration. Parameters ---------- base_params The base parameters partial_params The partial parameters Returns ------- final_params The final parameters """ if partial_params is None: return base_params elif base_params is None: return partial_params else: if not isinstance(partial_params, dict): return partial_params assert isinstance(base_params, dict) final_params = copy.deepcopy(base_params) for key in partial_params: if key in base_params: final_params[key] = merge_params(base_params[key], partial_params[key]) else: final_params[key] = partial_params[key] return final_params
4c52d492358106b7c5e6df0c1e099d45043a7935
13,142
def flatten(list_of_lists): """Single level flattening of a list.""" return sum((list(sublist) for sublist in list_of_lists), [])
d7a2b9b75a1bd920f50d78cc725f8e551d6bb2f5
13,144
def maximum() -> int: """Returns 9.""" return 9
dfd9a240bdaf985f89ca1b90c2b6e01f5426b2b0
13,146
def load_motion_masks(motion_root): """Load motion masks from disk. Args: motion_root (Path): Points to a directory which contains a subdirectory for each sequence, which in turn contains a .png for each frame in the sequence. Returns: motion_masks (dict): Map sequence to dict mapping frame name to motion mask path. """ motion_mask_paths = {} for sequence_path in motion_root.iterdir(): if not sequence_path.is_dir(): continue sequence = sequence_path.stem motion_mask_paths[sequence] = {} for motion_path in sequence_path.glob('*.png'.format(sequence)): # Pavel's ICCV 2017 method outputs an extra set of soft masks that # start with 'raw_' or 'input_'; ignore them by starting the glob # with the sequence name. if (motion_path.stem.startswith('raw_') or motion_path.stem.startswith('input_')): continue motion_mask_paths[sequence][motion_path.stem] = motion_path return motion_mask_paths
babe7c83b7f2c7a1cade2d4fcb3de8461d88ba00
13,156
def parse_genetic_models(models_info, case_id): """Parse the genetic models entry of a vcf Args: models_info(str): The raw vcf information case_id(str) Returns: genetic_models(list) """ genetic_models = [] if models_info: for family_info in models_info.split(","): splitted_info = family_info.split(":") if splitted_info[0] == case_id: genetic_models = splitted_info[1].split("|") return genetic_models
9d8d94d9008e2f287a875aa9bd15330c82bbf8b5
13,157
def get_message_after(response, index): """Returns next message of search response after index or None""" try: return response[index + 1].object except IndexError: return None
0208fd1f4397e77636df11b62054e86eb84fb682
13,164
import time import json def result_running_callback(request, context): # pylint: disable=unused-argument """ Callback function returning 'running' JSON. """ job_result_running = { 'status': 'running', } time.sleep(1) return json.dumps(job_result_running)
cb38b44b86cdcc96706dbf40a764d8d2059de449
13,165
def compute_intersection_length(A, B): """Compute the intersection length of two tuples. Args: A: a (speaker, start, end) tuple of type (string, float, float) B: a (speaker, start, end) tuple of type (string, float, float) Returns: a float number of the intersection between `A` and `B` """ max_start = max(A[1], B[1]) min_end = min(A[2], B[2]) return max(0.0, min_end - max_start)
6cf038e1febbc1a7aa19eb104f5628ff2f935174
13,173
import socket def bindsocket(port, host=''): """ Creates a socket assigned to the IP address (host, port). Parameters ---------- port : int port assigned to the socket. host : str, optional host assigned to the socket. The default is ''. Returns ------- tcpsock : socket socket connected at (host, port). """ tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) tcpsock.bind((host, port)) return tcpsock
48be29952a3d35af0ec0886e3ca332f9280384f1
13,175
import six def _UnitsByMagnitude(units, type_abbr): """Returns a list of the units in scales sorted by magnitude.""" scale_items = sorted(six.iteritems(units), key=lambda value: (value[1], value[0])) return [key + type_abbr for key, _ in scale_items if key]
972a17b51901a133444ddb77989c4ebc372fc35e
13,180
import functools import traceback def check_workchain_step(func): """ Decorator for workchain steps that logs (and re-raises) errors occuring within that step. """ @functools.wraps(func) def inner(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as e: self.report( '{} in {}: {}.\nTraceback:\n{}'.format(type(e).__name__, func.__name__, e, traceback.format_exc()) ) raise e return inner
d6e5834b233075fbb1097a84f7e3010c3f292194
13,183
def get_setuptools_package_version(setuptools_version: str) -> str: """ Generate the right setuptools command for pip command :param setuptools_version: Setuptools version obtained from :return: A string formatted for pip install command (e.g setuptools==58.0.0) """ setuptools_version = setuptools_version.lower().strip() # tox.ini: setuptools_version = setuptools==19.0 if setuptools_version.startswith("setuptools"): return setuptools_version # tox.ini: setuptools_version = 19.0 return f"setuptools=={setuptools_version}"
99554292253752545d1bbf82edfef92a925b1746
13,186
import torch def log1pMSELoss(log_predicted_counts, true_counts): """A MSE loss on the log(x+1) of the inputs. This loss will accept tensors of predicted counts and a vector of true counts and return the MSE on the log of the labels. The squared error is calculated for each position in the tensor and then averaged, regardless of the shape. Note: The predicted counts are in log space but the true counts are in the original count space. Parameters ---------- log_predicted_counts: torch.tensor, shape=(n, ...) A tensor of log predicted counts where the first axis is the number of examples. Important: these values are already in log space. true_counts: torch.tensor, shape=(n, ...) A tensor of the true counts where the first axis is the number of examples. Returns ------- loss: torch.tensor, shape=(n, 1) The MSE loss on the log of the two inputs, averaged over all examples and all other dimensions. """ log_true = torch.log(true_counts+1) return torch.mean(torch.square(log_true - log_predicted_counts), dim=-1)
ba7d244885303fa6755c4c25f1f991afae6d10ed
13,189
import logging def check_connection(ssh_conn): """ This will check if the connection is still available. Return (bool) : True if it's still alive, False otherwise. """ try: ssh_conn.exec_command("ls", timeout=5) return True except Exception as e: logging.error( "unable to execute a simple command on remote connection. Error: {}".format( e.__str__() ) ) return False
f443d6788eb4a79db7011f0ca9dc4ae15cdf6145
13,190
import re def parse_show_ip_bgp_route_map(raw_result): """ Parse the 'show ip bgp route-map' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show ip bgp route-map command in a \ dictionary of the form: :: { '1': { 'action': 'deny', 'set_parameters': '', 'as_path_exclude': '20 30 40', 'match_parameters': '', 'prefix_list': 'List2', 'ipv6_prefix_list': 'List2-6' }, '2': { 'action': 'permit', 'set_parameters': '', 'match_parameters': '', 'prefix_list': None, } '3': { 'action': 'permit', 'set_parameters': '', 'match_parameters': '', 'prefix_list': 'List1', } } """ rmap_re = ( r'Entry\s(?P<entry_number>\d+):\n' r'\s+action\s:\s(?P<action>\w+) \n' r'\s+Set\sparameters\s:(?P<set_parameters>[\S]*)\n' r'(\s+as_path_exclude\s:\s(?P<as_path_exclude>[\d ]+))?' r'\s+Match\sparameters\s:(?P<match_parameters>[\S]*)\n?' r'(\s+prefix_list\s:\s(?P<prefix_list>[\w-]+) \n?)?' r'(\s+ipv6_prefix_list\s:\s(?P<ipv6_prefix_list>[\w_\-]+) \n?)?' ) result = {} for output in re.finditer(rmap_re, raw_result): entry = output.groupdict() result[entry['entry_number']] = entry if result[entry['entry_number']]['prefix_list'] is None: del result[entry['entry_number']]['prefix_list'] if result[entry['entry_number']]['ipv6_prefix_list'] is None: del result[entry['entry_number']]['ipv6_prefix_list'] if result[entry['entry_number']]['as_path_exclude'] is None: del result[entry['entry_number']]['as_path_exclude'] del result[entry['entry_number']]['entry_number'] assert result return result
48aecdc76da27fbbc991c30405d1efef82bcbfc3
13,191
def find_nearest_date(items, pivot): """This function will return the datetime in items which is the closest to the date pivot. See https://stackoverflow.com/questions/32237862/find-the-closest-date-to-a-given-date Parameters ---------- items : list List containing datetimes pivot : datetime.datime Datetime to be found Returns ------- datetime.datetime """ return min(items, key=lambda x: abs(x - pivot))
7b719357c92210729857957e5b4ed8aee4a1f466
13,196
def get_time_tied_leading_trailing(event, previous_score, last_goal_time): """ Calculate time of previous score state according to current event time and time of last goal scored. """ if previous_score['home'] == previous_score['road']: return 'tied', event['time'] - last_goal_time elif previous_score['home'] > previous_score['road']: return 'home_leading', event['time'] - last_goal_time elif previous_score['home'] < previous_score['road']: return 'road_leading', event['time'] - last_goal_time
d53172e0beb9ab3155f02f3feca3680d25e5bcd0
13,197