content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def userToJson(user): """Returns a serializable User dict :param user: User to get info for :type user: User :returns: dict """ obj = { "id": user.id, "username": user.username, "name": user.get_full_name(), "email": user.email, } return obj
da5e11bbc2e8cbdffb25e32e6a47e0ccabe8d33a
701,604
def find_block(csv, name): """For an Illumina SampleSheet.csv, return a tuple of the index of the line containing the header specified by name, and the index of the line just past the end of the data block. `range(*r)` will index all lines for the block, starting at the header line. """ start = None end = None def blockend(f): maxfieldlen = max([0] + [len(x) for x in f]) if len(f) > 0 and len(f[0]) > 0 and f[0][0] == "[" and f[0][-1] == "]": return True return 0 == maxfieldlen for i, fields in enumerate(csv): if len(fields) > 0 and fields[0] == name: start = i elif start is not None and blockend(fields): return start, i if start is not None: end = len(csv) return start, end
7a1fc119e6e3e889d9a18884028cb2e6a67e0cd5
701,606
def cancel_and_stop_intent_handler(handler_input): """Single handler for Cancel and Stop Intent.""" # type: (HandlerInput) -> Response speech_text = "Alla prossima!" return handler_input.response_builder.speak(speech_text).response
73a9171b5ea01fedb7fc470dd7d6bcd232a605a0
701,607
import json def load_jsonl(filename): """Load json lines formatted file""" with open(filename, "r") as f: return list(map(json.loads, f))
7acb3513cf885139e62af56d17c19aa87e1c85ca
701,608
from typing import List def _read_get_graph_source_citation_section(jcamp_dict: dict) -> List[str]: """ Extract and translate from the JCAMP-DX dictionary the SciData JSON-LD citations in the 'sources' section from the '@graph' scection. :param jcamp_dict: JCAMP-DX dictionary to extract citations from :return: List for citation from SciData JSON-LD """ citation = [] if "$ref author" in jcamp_dict: citation.append(f'{jcamp_dict["$ref author"]} :') if "$ref title" in jcamp_dict: citation.append(f'{jcamp_dict["$ref title"]}.') if "$ref journal" in jcamp_dict: citation.append(f'{jcamp_dict["$ref journal"]}') if "$ref volume" in jcamp_dict: citation.append(f'{jcamp_dict["$ref volume"]}') if "$ref date" in jcamp_dict: citation.append(f'({jcamp_dict["$ref date"]})') if "$ref page" in jcamp_dict: citation.append(f'{jcamp_dict["$ref page"]}') return citation
dafe4fd793dd0e47b690d6c1fd745ca89265de39
701,610
def deltaify_traces(traces, final_byte_duration=9999): """Convert absolute start times in traces to durations. Traces returned by `read_traces_csv` pair bytes with start times. This function computes how long each byte remains on the bus and replaces the start time with this value in its output. Note that the final duration can't be calculated and will be given the duration `final_byte_duration`. Args: traces: Traces to "deltaify" as described. final_byte_duration: Duration to assign to the final byte. Returns: "Deltaified" traces as described. """ deltaified_traces = [] for i in range(len(traces) - 1): dt = traces[i+1][0] - traces[i][0] deltaified_traces.append((dt, traces[i][1])) deltaified_traces.append((final_byte_duration, traces[-1][1])) return deltaified_traces
8185a9825d4706bdf8a579fcefec5e27ca8c3baa
701,612
def rzpad(value, total_length): """ Right zero pad value `x` at least to length `l`. """ return value + b"\x00" * max(0, total_length - len(value))
76a0884e9f8a65e0ff3efac56223dfa2fbed31b4
701,613
def compose(*funcs): """Returns a function that is the composition of multiple functions.""" def wrapper(x): for func in reversed(funcs): x = func(x) return x return wrapper
d93d59f2f1979fa35638357fcac5130710e0fda3
701,616
from functools import reduce def compose(*functions): """ Compose all the function arguments together :param functions: Functions to compose :return: Single composed function """ # pylint: disable=undefined-variable return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
d69ab8953d8e846fffd50aa9c0925935e38e9e38
701,617
def mul_fft(f_fft, g_fft): """Multiplication of two polynomials (coefficient representation).""" deg = len(f_fft) return [f_fft[i] * g_fft[i] for i in range(deg)]
67db62dc812827b6aa7c7406a068ae9e47f97a65
701,618
import re def clean_text(text): """ Cleans abstract text from scopus documents. Args: text (str): Unformatted abstract text. Returns: (str) Abstract text with formatting issues removed. """ if text is None: return None try: cleaned_text = re.sub("© ([0-9])\w* The Author(s)*\.( )*", "", text) cleaned_text = re.sub("Published by Elsevier Ltd\.", "", cleaned_text) cleaned_text = re.sub("\n ", "", cleaned_text) cleaned_text = re.sub("\n ", "", cleaned_text) cleaned_text = " ".join("".join(cleaned_text.split("\n ")).split()) cleaned_text = cleaned_text.replace("Abstract ", '', 1) return cleaned_text except: return None
7ffbf3a6ebe0c0caac203cea109e939b5c861724
701,620
from datetime import datetime def undersc_str2dt(undersc): """Converts the format with underscores to a datetime instance Args: undersc(str): time in underscores-format Returns: `datetime`: datetime instance """ (mydate, mytime) = undersc.split("_") ymd = mydate.split("-") ymd = [int(i) for i in ymd] hms = mytime.split("-") hms = [int(i) for i in hms] if len(hms) == 3: return datetime(ymd[0], ymd[1], ymd[2], hms[0], hms[1], hms[2]) else: return datetime(ymd[0], ymd[1], ymd[2], hms[0], hms[1], hms[2], hms[3])
36988c6af6a20590d781f7ec1ea1ee2e8713941e
701,622
import yaml def load_yaml_config(filepath): """Load Krake base configuration settings from YAML file Args: filepath (os.PathLike, optional): Path to YAML configuration file Raises: FileNotFoundError: If no configuration file can be found Returns: dict: Krake YAML file configuration """ with open(filepath, "r") as fd: return yaml.safe_load(fd)
a5970ab968a9da7c89733077834a88b05ca2d6a0
701,625
def max_cum_build_rule(mod, g, p): """ **Constraint Name**: GenNewLin_Max_Cum_Build_Constraint **Enforced Over**: GEN_NEW_LIN_VNTS_W_MAX_CONSTRAINT Can't build more than certain amount of capacity by period p. """ return mod.GenNewLin_Capacity_MW[g, p] \ <= mod.gen_new_lin_max_cumulative_new_build_mw[g, p]
9122a05867ccccbe36378c36d34e98462c62f85d
701,626
from typing import List def available_years() -> List[int]: """List available years with datasets.""" return [2012, 2013, 2014, 2015, 2016, 2017, 2018]
898fd02e0cde1ac71251cd70069c52c481a66acf
701,629
import aiohttp async def http_call(url, method, data=None, headers=None): """ Performs an http request Args: url (str): The URL to send the request to method (str): The HTTP method to use data (dict): The data to send with the request headers (dict): The headers to send with the request """ async with aiohttp.ClientSession(headers=headers) as session: async with session.request(method, url, data=data) as resp: return await resp.json()
10d72f32312b7fe9071a8776f784a1ce7c4954d6
701,630
def coordinates_contain_point(coordinates, point): """ This function uses `Crossing number method` (http:#geomalgorithms.com/a03-_inclusion.html) to check whether a polygon contains a point or not. For each segment with the coordinates `pt1` and `pt2`, we check to see if y coordinate of `point` is between y of `pt1` and `pt2`. If so, then we check to see if `point` is to the left of the edge; i.e. a line drawn from `point` to the right will intersect the edge. If the line intersects the polygon an odd number of times, it is inside. """ contains = False j = - 1 for i, pt1 in enumerate(coordinates): pt2 = coordinates[j] check_y = lambda: (pt1[1] <= point[1] < pt2[1]) or (pt2[1] <= point[1] < pt1[1]) # The following checks if `point` is to the left of the current segment check_x = lambda: point[0] < (point[1] - pt1[1]) * (pt2[0] - pt1[0]) / (pt2[1] - pt1[1]) + pt1[0] if check_y() and check_x(): contains = not contains j = i return contains
97176955324be459595503d204e0e13ce9983562
701,631
def parse_response(response): """ :param response: output of boto3 rds client describe_db_instances :return: an array, each element is an 3-element array with DBInstanceIdentifier, Engine, and Endpoint Address Example: [ ['devdb-ldn-test1', 'mysql', 'devdb-ldn-test.cjjimtutptto.eu-west-2.rds.amazonaws.com'], ['devdb-ldn-test2', 'postgres', 'devdb-ldn-test.cjjimtutptto.eu-west-2.rds.amazonaws.com'], ... ] """ res = [] # json output parse for db in response['DBInstances']: res.append([db['DBInstanceIdentifier'], db['Engine'], db['Endpoint']['Address']]) return res
edaa4abbb695adb06c43dc93d70178bc10a82445
701,633
def format_csv(factor_name, entry): """Format a data entry as a csv line.""" return "%s, %s, %s" % (entry[factor_name], entry['quarter'], entry['count'])
8d3f4f794f58f6aa0c6d259fcda124340df8d4da
701,634
def _remove_quotes(values): """Remove any quotes from quoted values.""" removed = [] for value in values: if value.startswith('"') and value.endswith('"'): value = value[1:-1] removed.append(value) return removed
a75bd25198a56a28748af059647e62c26df74232
701,635
import json def get_dimensions(cube_id): """ For this test data we will use a predefined dimension object matching cube-420 in the acceptance environment, but this could be read from the TAP service. """ dims = json.loads( '{"axes": [{"name": "RA", "numPixels": "4096", "pixelSize": "5.5555555555560e-04", "pixelUnit": "deg"},' + '{"name": "DEC", "numPixels": "4096", "pixelSize": "5.5555555555560e-04", "pixelUnit": "deg"},' + '{"name": "STOKES", "numPixels": "1", "pixelSize": "1.0000000000000e+00", "pixelUnit": " ",' + '"min": "5.0000000000000e-01", "max": "1.5000000000000e+00", "centre": "1.0000000000000e+00"},' + '{"name": "FREQ", "numPixels": "16416", "pixelSize": "1.0000000000000e+00", "pixelUnit": "Hz",' + '"min": "1.2699999995000e+09", "max": "1.2700164155000e+09", "centre": "1.2700082075000e+09"}],' + '"corners": [{"RA": "1.8942941872444e+02", "DEC": "5.3846168509499e+01"},' + '{"RA": "1.8557152279432e+02", "DEC": "5.3846183833748e+01"},' + '{"RA": "1.8545899454910e+02", "DEC": "5.6120973603008e+01"},' + '{"RA": "1.8954200183991e+02", "DEC": "5.6120957384947e+01"}],' + '"centre": {"RA": "1.8750048428742e+02", "DEC": "5.4999722221261e+01"}}') return dims
aab639236510785649fd02ff80c795d7941007fd
701,638
from pathlib import Path import yaml def load_yaml_config(file_path: str | Path) -> dict: """ Parameters ---------- file_path : str or Path Yaml config file name. The file is assumed to be in the repo's config directory. Returns ------- config : dict Configuration parameters stored in a dictionary. """ file_path = Path(file_path) with open(file_path) as file: config = yaml.load(file, Loader=yaml.CLoader) return config
88a137807d6d1caabd3b8f7f7a03a3be3f04bdfe
701,639
import re def has_sh_placeholders(message): """Returns true if the message has placeholders.""" return re.search(r'\$\{(\w+)\}', message) is not None
9bd5b4a22c89cfa1d45ea28bf7121cd4171828ee
701,642
import re def _find_char(input_char): """ find english char in input string """ result = re.findall(r'[a-zA-Z=_/0-9.]+', str(input_char)) return result
b89bc97e0b73c71ec6a1a875414b73e16c9d6036
701,643
import functools def verifyrun(func): """Prints whether the decorated function ran.""" @functools.wraps(func) def wrapper_verifyrun(*args, **kwargs): print(f'Ran {func.__name__!r} from {func.__module__}.') value = func(*args, **kwargs) return value return wrapper_verifyrun
5f2d1289573a9069f283e508b1ce133eccfe3529
701,644
def create_empty_gid_matrix(width, height): """Creates a matrix of the given size initialized with all zeroes.""" return [[0] * width for row_index in range(height)]
8f1a0adf9e45bb6fc267a5cec3079657dbace51d
701,647
def value_to_none_low_medium_high(confidence_value): """ This method will transform an integer value into the None / Low / Med / High scale string representation. The scale for this confidence representation is the following: .. list-table:: STIX Confidence to None, Low, Med, High :header-rows: 1 * - Range of Values - None/ Low/ Med/ High * - 0 - None * - 1-29 - Low * - 30-69 - Med * - 70-100 - High Args: confidence_value (int): An integer value between 0 and 100. Returns: str: A string corresponding to the None / Low / Med / High scale. Raises: ValueError: If `confidence_value` is out of bounds. """ if confidence_value == 0: return 'None' elif 29 >= confidence_value >= 1: return 'Low' elif 69 >= confidence_value >= 30: return 'Med' elif 100 >= confidence_value >= 70: return 'High' else: raise ValueError("Range of values out of bounds: %s" % confidence_value)
ac3b39ae12591408fca2f8b4e844b535e7b1aaa3
701,648
from typing import Iterable def iterify(x): """Return an iterable form of a given value.""" if isinstance(x, Iterable): return x else: return (x,)
85373e5ac0e03caf2115096088ce92ca27b65b4a
701,650
def load_ignore(fname): """Loads patterns signalling lines to ignore Args: fname: File name containing patterns Returns: A list of patterns """ values = [] with open(fname, 'r') as f: lines = f.readlines() for line in lines: values.append(line.rstrip('\n')) return values
6a2b4aad3bb4f2747e91a0b1a9a58b70187d1d1e
701,653
def heapsort(values): """Heapsorts a list of values in nondecreasing order.""" length = len(values) def pick_child(parent): left = parent * 2 + 1 if left >= length: return None right = left + 1 if right == length or values[left] >= values[right]: return left return right def sift_down(parent): while True: child = pick_child(parent) if child is None or values[parent] >= values[child]: break values[parent], values[child] = values[child], values[parent] parent = child # Convert the list into a maxheap. for parent in range(length // 2, -1, -1): sift_down(parent) # Extract each element from the maxheap, placing them from right to left. while length > 1: length -= 1 values[0], values[length] = values[length], values[0] sift_down(0)
74e1afaf33e474611e842e97032a60a28fe5664d
701,658
def get_array_info(subs, dictofsubs): """ Returns information needed to create and access members of the numpy array based upon the string names given to them in the model file. Parameters ---------- subs : Array of strings of subscripts These should be all of the subscript names that are needed to create the array dictofsubs : dictionary returns ------- A dictionary of the dimensions associating their names with their numpy indices directory = {'dimension name 1':0, 'dimension name 2':1} A list of the length of each dimension. Equivalently, the shape of the array: shape = [5,4] """ # subscript references here are lists of array 'coordinate' names if isinstance(subs,list): element=subs[0] else: element=subs # we collect the references used in each dimension as a set, so we can compare contents position=[] directory={} dirpos=0 elements=element.replace('!','').replace(' ','').split(',') for element in elements: if element in dictofsubs.keys(): if isinstance(dictofsubs[element],list): dir,pos = (get_array_info(dictofsubs[element][-1],dictofsubs)) position.append(pos[0]) directory[dictofsubs[element][-1]]=dirpos dirpos+=1 else: position.append(len(dictofsubs[element])) directory[element]=dirpos dirpos+=1 else: for famname,value in dictofsubs.iteritems(): try: (value[element]) except: pass else: position.append(len(value)) directory[famname]=dirpos dirpos+=1 return directory, position
ad97545278eddd12e8098dc623024beb797baebc
701,659
import click def validate_nonempty(ctx, param, value): """Validate parameter is not an empty string.""" if not value.strip(): raise click.BadParameter('value cannot be empty') return value
a8dd9a81c7fc7b0d0064fe34e849d35c0ce52a04
701,660
import re def get_genres_from_soup(soup): """Get the genres of a book. Parameters ---------- soup : BeautifulSoup BeautifulSoup object created from a book page. Returns ------- list Book genres. """ genres_elements = soup.find_all('a', {'href': re.compile('/genres/')}, class_='bookPageGenreLink') return list(map(lambda element: element.get_text(), genres_elements))
16db0fc8cb58cdcf19aa89ea8fef27078d33a390
701,661
import torch def accuracy(predictions, labels): """ Evaluate accuracy from model predictions against ground truth labels. """ ind = torch.argmax(predictions, 1) # provide labels only for samples, where prediction is available (during the training, not every samples prediction is returned for efficiency reasons) labels = labels[-predictions.size()[0]:] accuracy = torch.sum(torch.eq(ind, labels)).item() / \ labels.size()[0] * 100.0 return accuracy
484ba64b2239363daddd206e747f6c1456e236c9
701,663
def inet_ntoa(i): """Convert an int to dotted quad.""" return '.'.join(map(str, [(i >> (3-j)*8) & 0xff for j in range(4)]))
b83a6b08118bcd7858cb588f53b71daaf31d358e
701,664
def cleanup_string(string): """ >>> cleanup_string(u', Road - ') u'road' >>> cleanup_string(u',Lighting - ') u'lighting' >>> cleanup_string(u', Length - ') u'length' >>> cleanup_string(None) '' >>> cleanup_string(' LIT ..') 'lit' >>> cleanup_string('poor.') 'poor' """ if string is None: return '' string = string.replace(',', '') string = string.replace('.', '') string = string.replace(' ', '') string = string.replace('-', '') string = string.lower() return string
5f9a369a52b798ff8c26bea56fbfe585b3612db0
701,665
def is_field_allowed(name, field_filter=None): """ Check is field name is eligible for being split. For example, '__str__' is not, but 'related__field' is. """ if field_filter in ["year", "month", "week", "day", "hour", "minute", "second"]: return False return isinstance(name, str) and not name.startswith('__') and not name.endswith('__') and '__' in name
8be38b79bab3aeb49219155db0159cc143c38111
701,666
def lucas(n): """ compute the nth Lucas number """ a, b = 2, 1 # notice that all I had to change from fib were these values? if n == 0: return a for _ in range(n - 1): a, b = b, a + b return b
9d9404edf59690cafc49ba70d7dc776376d1f020
701,674
from typing import Tuple from typing import Union def _verify_data_shape(data, shape, path=None) -> Tuple[bool, Union[str, None]]: """ _verify_data_shape( {'data': []}, {'data': list} ) == (True, None) _verify_data_shape( {'data': ''}, {'data': list} ) == (False, '.data') _verify_data_shape( {'data': '', 'empty': 10}, {'data': list} ) == (False, '.data') This function is what handles the data shape verification. You can use this function, or the decorator on uploaded data to verify its use before usage. You can basically write out what the shape should look like. This function supports nested dictionaries. Here, we will return a tuple of a boolean indicating success or failure, and a error string. If there was an error validating a given field, the error string will be a path to the unvalidated field. An example would be: _verify_data_shape( {'data': ''}, {'data': list} ) -> (False, '.data') :return: success as bool, error path """ if path is None: path = "" if shape is dict or shape is list: # Free, just need a match if isinstance(data, shape): return True, None return False, path # Verify if data is constant for _t in [int, str, float]: if isinstance(data, _t): return (True, None) if shape == _t else (False, path) if isinstance(data, dict): # Verify dict keys for s_key, s_value in shape.items(): # Verify key is included if s_key not in data: return False, path + "." + s_key # Supported basic types for _t in [int, str, float]: # Check free strings are strings and lists if s_value is _t: if not isinstance(data[s_key], s_value): return False, path + "." + s_key # Check explicit strings and lists elif isinstance(s_value, _t): if not isinstance(data[s_key], type(s_value)): return False, path + "." + s_key # Recurse on other dicts if isinstance(s_value, dict): # Free dict ( no need to verify more ) if s_value == dict: return True, None # Explicit Dict ( need to recurse ) elif isinstance(s_value, dict): # Recurse on dict r, e = _verify_data_shape(data[s_key], s_value, path + "." + s_key) if r is False: return r, e # Type s_value was not dict ( type mismatch ) else: return False, path + "." + s_key # Recurse on lists if isinstance(s_value, list): # Free list ( no need to verify more ) if s_value == list: return True, None # Explicit list ( need to recurse ) elif isinstance(s_value, list): # If we have a type specified in the list, # we should iterate, then recurse on the # elements of the data. Otherwise there's # nothing to do. if len(s_value) == 1: s_value = s_value[0] for item in data[s_key]: # Recurse on list item r, e = _verify_data_shape( item, s_value, path + ".[" + s_key + "]" ) if r is False: return r, e # Type s_value was not dict ( type mismatch ) else: return False, path + "." + s_key if s_value is list or s_value is dict: if isinstance(data[s_key], s_value): return True, None return ( False, path + ".[" + s_key + "]" if s_value is list else path + "." + s_key + "", ) return True, None
3e76362938146972d96e34a22373b43dca23381b
701,680
def create_dict_playlists_playlistids(p_list, pid_list): """ Create a dictionary of playlists and playlist ids """ playlists_and_playlist_ids = {} for i in range(len(p_list)): playlists_and_playlist_ids[p_list[i]] = pid_list[i] return playlists_and_playlist_ids
173850ed85b3dc774ddea14674e22e701991c807
701,685
def required_input(message, method=input): """ Collect input from user and repeat until they answer. """ result = method(message) while len(result) < 1: result = method(message) return result
c9a21d6e63ab6bdde081db471cf0d2420f9047ea
701,686
def is_cat(filename: str) -> bool: """ Returns true if filename is an image of a cat. In the dataset we are using this is indicated by the first letter of the filename; cats are labeled with uppercase letters, dogs with lowercase ones. """ result = filename[0].isupper() # print(f"File: {filename}, initial: '{filename[0]}', result: {result}") return result
ad56c7c3ae28951fc31bcf70fece29bf934e4cec
701,690
def analyze_text(filename): """ Calculate the number of lines and characters in a file :param filename: the name of the file to analyze :raises: IOError: if ``filename`` does not exist or can't be read :return: a tuple where the first element is the number of lines in the file and the second element is the number of characters """ lines = 0 chars = 0 with open(filename, 'r') as f: for line in f: lines += 1 chars += len(line) return lines, chars
1670d3bff0402482e9e33be401e8914eea117f6c
701,691
def auto_adapt_batch(train_size, val_size, batch_count_multiple=1, max_size=256): """ returns a suitable batch size according to train and val dataset size, say max_size = 128, and val_size is smaller than train_size, if val_size < 128, the batch_size1 to be returned is val_size if 128 < val_size <= 256, the batch size is 1/2 of val_size, at most 1 validation sample cannot be used if 256 < val_size <= 384, the batch size is 1/3 of val_size, at most 2 validation samples cannot be used ... :param train_size: the number of training samples in the training set :param val_size: the number of validation samples in the validation set :param max_size: the maximum batch_size1 that is allowed to be returned :param batch_count_multiple: force the batch count to be a multiple of this number, default = 1 :return: a suitable batch_size1 for the input """ print('Auto adapting batch size...') numerator = min(train_size, val_size) denominator = 0 while True: denominator += batch_count_multiple batch_size = numerator // denominator if batch_size <= max_size: return batch_size
d0a6fa9e6bde3d563bd7fad5e2bbcf7068f9ff65
701,692
def two_fer(name="you"): """Returns a string in the two-fer format.""" return "One for " + name + ", one for me."
a7f10a45b214ea1ea79a6956148b3c6677f27e21
701,695
import math def create_pagination(page, results_per_page, total_results): """Create pagination to filter results to manageable amounts.""" pagination = {} # For UI pagination['page'] = page pagination['total_results'] = total_results pagination['total_pages'] = math.ceil(total_results / results_per_page) # For database pagination['limit'] = results_per_page pagination['offset'] = (page - 1) * results_per_page return pagination
d58bf2adee3e090e88aa82a5a91560e8fb1631e0
701,697
import math def a_raininess_oracle(timestep): """Mimics an external data source for raininess Arguments ========= timestep : int Requires a year between 2010 and 2050 Returns ======= raininess : int """ msg = "timestep {} is outside of the range [2010, 2050]".format(timestep) assert timestep in [x for x in range(2010, 2051, 1)], msg raininess = math.floor((timestep - 2000) / 10) return raininess
e1b4f32f62fe19f95ac876a0acf03fe533858366
701,701
import torch def matrix_to_homogeneous(batch: torch.Tensor) -> torch.Tensor: """ Transforms a given transformation matrix to a homogeneous transformation matrix. Args: batch: the batch of matrices to convert [N, dim, dim] Returns: torch.Tensor: the converted batch of matrices """ if batch.size(-1) == batch.size(-2): missing = batch.new_zeros(size=(*batch.shape[:-1], 1)) batch = torch.cat([batch, missing], dim=-1) missing = torch.zeros( (batch.size(0), *[1 for tmp in batch.shape[1:-1]], batch.size(-1)), device=batch.device, dtype=batch.dtype ) missing[..., -1] = 1 return torch.cat([batch, missing], dim=-2)
ab3bf1acf1e8fab2d4a4fcdcfd062821bc891b9d
701,702
def find_ch_interest_dict(show_channel_dict : dict, usr_pref_dict : dict): """Pass in show_channel_dict {show:channels} and usr_pref_dict {show: rating}. Returns dictionary {channel : total rating}""" ch_interest_dict = {} for show in usr_pref_dict: if show in show_channel_dict: if show_channel_dict[show] in ch_interest_dict: ch_interest_dict[show_channel_dict[show]] += usr_pref_dict[show] else: ch_interest_dict[show_channel_dict[show]] = usr_pref_dict[show] return ch_interest_dict
9928b03c0ceea3ea38c3808a5fd4053553f4e5c4
701,706
def is_valid_int(s: str) -> bool: """ Return true if s can be converted into a valid integer, and false otherwise. :param s: value to check if can be converted into a valid integer :return: true if s can be converted into a valid integer, false otherwise >>> is_valid_int("hello") False >>> is_valid_int("506") True """ try: int(s) except ValueError: return False else: return True
9d2c849839f6fdcf729a7c1503a3eac3daa5f000
701,707
def clean_software_config(config): """Take an individual `config` data structure (as specified by config_validation.SoftwareSchema) and return a 'clean' version suitable for internal use. This allows for a simplified schema to be available to users whilst preserving consistent internal data structures by e.g. replacing null values with empty lists etc. args: config (dict): A validated SoftwareSchema returns: (dict): A cleaned version of `config` """ config = config.copy() if not config["input_files"]["required"]: config["input_files"]["required"] = [] if not config["input_files"]["optional"]: config["input_files"]["optional"] = [] return config
c89ad5a4b61e4214d4b79ce6782e4fe5a86311bf
701,708
def miller_rabin_d(n: int) -> bool: """Check if n is a prime number via deterministic Miller-Rabin test. Miller showed that it is possible to make the algorithm deterministic by only checking all bases ≤ O(lg(n)^2). Bach later gave a concrete bound, it is only necessary to test all bases a ≤ 2lg(n)^2. It turns out, for testing a 32 bit integer it is only necessary to check the first 4 prime bases: 2, 3, 5 and 7. The smallest composite number that fails this test is 3,215,031,751=151⋅751⋅28351. And for testing 64 bit integer it is enough to check the first 12 prime bases: 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, and 37.""" if n < 2: return False s, d = 0, n-1 while d&1 == 0: s += 1 d >>= 1 for a in 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37: if n == a: return True v = pow(a, d, n) if v != 1 and v != n-1: for j in range(1, s): v = pow(v, 2, n) if v == n-1: break if v != n-1: return False return True
ac668cb55e417a6784ba52d1c5da1dc26d3693ad
701,709
from typing import List from typing import Callable def evaluate_predictions(preds: List[List[float]], targets: List[List[float]], metric_func: Callable) -> List[float]: """ Evaluates predictions using a metric function and filtering out invalid targets. :param preds: A list of lists of shape (data_size, num_tasks) with model predictions. :param targets: A list of lists of shape (data_size, num_tasks) with targets. :param metric_func: Metric function which takes in a list of targets and a list of predictions. :return: A list with the score for each task based on `metric_func`. """ data_size, num_tasks = len(preds), len(preds[0]) # Filter out empty targets # valid_preds and valid_targets have shape (num_tasks, data_size) valid_preds = [[] for _ in range(num_tasks)] valid_targets = [[] for _ in range(num_tasks)] for i in range(num_tasks): for j in range(data_size): if targets[j][i] is not None: # Skip those without targets valid_preds[i].append(preds[j][i]) valid_targets[i].append(targets[j][i]) # Compute metric results = [] for i in range(num_tasks): # Skip if all targets are identical if all(target == 0 for target in valid_targets[i]) or all(target == 1 for target in valid_targets[i]): continue results.append(metric_func(valid_targets[i], valid_preds[i])) return results
7b7f550a0983cbb8af90f13b214a195cdb8cbfe3
701,711
from typing import Union def flatten(x: Union[list, tuple]) -> list: """ Flattening function for nested lists and tuples Args: x: List or tuple Returns: object (list): Flat list """ if not isinstance(x, list) and isinstance(x, tuple): raise TypeError("input must be a list or tuple") out: list = [] for item in x: if isinstance(item, (list, tuple)): out.extend(flatten(item)) else: out.append(item) return out
36c35dfbef4214ccf0f6d355f36865996fd6d88e
701,713
from typing import Mapping def update_nested(original_dict, update_dict): """Update a nested dictionary with another nested dictionary. Has equivalent behaviour to :obj:`dict.update(self, update_dict)`. Args: original_dict (dict): The original dictionary to update. update_dict (dict): The dictionary from which to extract updates. Returns: original_dict (dict): The original dictionary after updates. """ for k, v in update_dict.items(): nested_dict = v if isinstance(v, Mapping): # Mapping ~= any dict-like object nested_dict = original_dict.get(k, {}) if nested_dict is not None: nested_dict = update_nested(nested_dict, v) original_dict[k] = nested_dict return original_dict
a1a372ac4d26066c3fe32cd4ee1a49fff6972cd9
701,714
def asInteger(epsg): """ convert EPSG code to integer """ return int(epsg)
18a14944f5f29ec09585757f0edc912b896a12ba
701,720
def mag(initial, current): """ Calculates the magnification of a specified value **Parameters** intial: *float* initial value (magnificiation of 1) current: *float* current value **Returns** magnification: *float* the magnification of the current value """ return float(initial) / float(current)
abc8d3603f11e62f57a62c47dc372b4b9ea19b0c
701,722
import re def capitalize(word): """Only capitalize the first letter of a word, even when written in CamlCase. Args: word (str): Input string. Returns: str: Input string with first letter capitalized. """ return re.sub('([a-zA-Z])', lambda x: x.groups()[0].upper(), word, 1)
4f254696e00c24a85a20ea74fc66a32fceb541c6
701,723
import re def cassandra_ddl_repr(data): """Generate a string representation of a map suitable for use in Cassandra DDL.""" if isinstance(data, str): return "'" + re.sub(r"(?<!\\)'", "\\'", data) + "'" elif isinstance(data, dict): pairs = [] for k, v in data.items(): if not isinstance(k, str): raise ValueError('DDL map keys must be strings') pairs.append(cassandra_ddl_repr(k) + ': ' + cassandra_ddl_repr(v)) return '{' + ', '.join(pairs) + '}' elif isinstance(data, int): return str(data) elif isinstance(data, bool): if data: return 'true' else: return 'false' else: raise ValueError('Cannot convert data to a DDL representation')
c81ad24c0185ef10646644b82399c202c2261a1a
701,724
import csv def read_csv_file(file_name): """ Given a CSV file, read the data into a nested list Input: String corresponding to comma-separated CSV file Output: Nested list consisting of the fields in the CSV file """ with open(file_name, newline='') as csv_file: # don't need to explicitly close the file now csv_table = [] csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: csv_table.append(row) return csv_table
65f9d2edb9ecf020d773a8d8516f31247fa680ed
701,726
def get_all_entries(df, pdbid, cdr): """ Get all entries of a given PDBID and CDR. :param df: dataframe.DataFrame :rtype: pandas.DataFrame """ return df[(df['input_tag'].str.contains(pdbid)) & (df['CDR'] == cdr)]
414eca4481bde0ccc5cdd6e143f7d4b06216a102
701,728
def merge_result(res): """ Merges all items in `res` into a list. This command is used when sending a command to multiple nodes and they result from each node should be merged into a single list. """ if not isinstance(res, dict): raise ValueError("Value should be of dict type") result = set([]) for _, v in res.items(): for value in v: result.add(value) return list(result)
28d21ca00316303c0e2fc0400599921154253236
701,730
def get_file_section_name(section_key, section_label=None): """Build a section name as in the config file, given section key and label.""" return section_key + (" {0}".format(section_label) if section_label else "")
01e1f46d2a949315ba2e927ddfab610064539e3b
701,731
def _final_frame_length(header, final_frame_bytes): """Calculates the length of a final ciphertext frame, given a complete header and the number of bytes of ciphertext in the final frame. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int final_frame_bytes: Bytes of ciphertext in the final frame :rtype: int """ final_frame_length = 4 # Sequence Number End final_frame_length += 4 # Sequence Number final_frame_length += header.algorithm.iv_len # IV final_frame_length += 4 # Encrypted Content Length final_frame_length += final_frame_bytes # Encrypted Content final_frame_length += header.algorithm.auth_len # Authentication Tag return final_frame_length
b7029e3b705194ee7daa02b4400d124ffe6efc2a
701,736
def check_is_paired(df, subject, group): """ Check if samples are paired. :param df: pandas dataframe with samples as rows and protein identifiers as columns (with additional columns 'group', 'sample' and 'subject'). :param str subject: column with subject identifiers :param str group: column with group identifiers :return: True if paired samples. :rtype: bool """ is_pair = False if subject is not None: count_subject_groups = df.groupby(subject)[group].count() is_pair = (count_subject_groups > 1).all() return is_pair
38f9b0722e77edb88ff44a7bc73eb24a8f1aa097
701,738
def specific_heat(mat): """Calculate specifc heat""" cw = 4183 mr = mat['m_heat'] mw = mat['m_w'] Tr = mat['Tr'] Tw = mat['Tw'] Te = mat['Te'] return (mw * cw * (Te - Tw)) / (mr * (Tr - Te))
7d3fbe3f67b3df593c94c93ab7d8523242d17b46
701,743
def _flatten(suitable_for_isinstance): """ isinstance() can accept a bunch of really annoying different types: * a single type * a tuple of types * an arbitrary nested tree of tuples Return a flattened tuple of the given argument. """ types = set() if not isinstance(suitable_for_isinstance, tuple): suitable_for_isinstance = (suitable_for_isinstance,) for thing in suitable_for_isinstance: if isinstance(thing, tuple): types.update(_flatten(thing)) else: types.add(thing) return tuple(types)
5ba63f39b2d22da78f5a362ce6821239714a9e6a
701,745
import six def bitcast_to_bytes(s): """ Take a string and return a string(PY2) or a bytes(PY3) object. The returned object contains the exact same bytes as the input string. (latin1 <-> unicode transformation is an identity operation for the first 256 code points). """ return s if six.PY2 else s.encode("latin1")
b902550be03f447a286490653a2a1361257ac88c
701,748
from typing import Type from typing import Hashable def _lookup(key: str, typ: Type, *args, **kwargs) -> Hashable: """ Gets the value of the given key in args, defaulting to the first positional. :param key: key to find value of in args. :param typ: type that dispatch is being perform on. :param args: positional args. :param kwargs: keyword args. :return: value of the key in the given args. """ if key in kwargs: value = kwargs[key] else: try: if typ.__qualname__.endswith('.__new__'): value = args[1] else: value = args[0] except IndexError: raise TypeError(f'missing dispatch parameter {key!r} on {typ.__name__}') return value
195e95cd1d77137890d0608195677c344e698ad7
701,750
import re def validate_time_string(value, is_list): """ Checks that a "time string" quant is correctly formatted. A time string can contain three kinds of expressions, separated by + or -: Base values, which are just numeric (with an optional exponent on e form) Delta values, which are the same as above with a '*i' or 'i' suffix Variables, which are strings that are formatted according to the variable name rules If the string is correctly formatted, a polished version is returned. Otherwise, the original string is returned with an 'INVALID: ' prefix. """ # Strip the input down to the essential part value = value.replace('INVALID:', '').strip() input_str = value.replace(' ', '') # Split the string if the quant accepts multiple values if is_list: strings = input_str.split(',') else: strings = [input_str] result = '' for idx, s in enumerate(strings): accum_string = '' if idx == 0 else ', ' first = True if len(s) == 0: return f'INVALID: {value}' while len(s) > 0: prefix = '[+-]?' if first else '[+-]' var_rex = re.compile(prefix + r'[A-Za-z_]+[0-9A-Za-z_]*(\*i)?') var_match = var_rex.match(s) if var_match: # Remove the matched part from input match_str = var_match.group() match_len = len(match_str) s = s[match_len:] first = False if match_str[0] in ('+', '-'): match_str = f'{match_str[0]} {match_str[1:]}' # Add a space after variable name accum_string += f'{match_str} ' continue # No variable match, check for numeric value num_rex = re.compile(prefix + r'(([0-9]*\.[0-9]+)|([0-9]+))(e-?[0-9]+)?(\*?i)?', re.I) num_match = num_rex.match(s) if num_match: # Remove the matched part from input match_str = num_match.group() match_len = len(match_str) s = s[match_len:] first = False # Temporarily remove first char if it's a + or - if match_str[0] in ('+', '-'): # Put a space after the sign prefix_char = f'{match_str[0]} ' match_str = match_str[1:] else: prefix_char = '' # Perform some cleanup while match_str.startswith('0') and len(match_str) > 1 and match_str[1].isnumeric(): match_str = match_str[1:] # Insert a zero if the number starts with a period if match_str.startswith('.'): match_str = '0' + match_str match_str = f'{prefix_char}{match_str} ' match_str = match_str.replace('I', 'i') match_str = match_str.replace('e', 'E') accum_string += match_str continue # No match, invalid input return f'INVALID: {value}' result += accum_string.strip() return result
802e7503f19ed1a5bb47ad887d1fe15219225fe1
701,753
async def async_setup(hass, hass_config): """Set up the Plaato component.""" return True
b2c620c58aabcf788310e3aaf9cdf836f9be15ba
701,755
def read_ffindex(file): """Read a ffindex and return a list of all the lines in the file . Args: file (string): path to the ffindex Returns: list of string: The file read line by line """ fh = open(file, "r") index = [] for line in fh: index.append(line.rstrip().split()) fh.close() return index
fae6494ddbda63abae1161f9fd22c8a94f506407
701,760
import importlib def resolve(module_name, obj_name): """ Resolve a named object in a module. """ return getattr(importlib.import_module(module_name), obj_name)
87ccef3456d28615b82a89a8e4ce405403eaade9
701,762
def get_num_shorts(string_list): """ Returns the number of occurences of 'Short' in an input string list. Args: string_list(list of string objects) Returns: numShorts(int): Number of occurences of 'Short' """ numShorts = 0 for marker in string_list: if (marker == 'Short'): numShorts += 1 return numShorts
b8e9da454590a8b29965696be3265053cfc78729
701,763
import jinja2 def create_j2env(template_dir) -> jinja2.Environment: """ Create a Jinja2 enviornment instance used when template building the containerlab topology file. Parameters ---------- template_dir: str The file path where the Jinja2 template file is located. Returns ------- A Jinja2 enviornment instance that will be used to template build the containerlabs topology file. """ env = jinja2.Environment( trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True, loader=jinja2.FileSystemLoader([template_dir]), undefined=jinja2.StrictUndefined, ) return env
1d6559eae0346c0b9fd6171c18da5f2d94e1db86
701,765
import torch def compute_accuracy(logits, labels, mask): """Compute the accuracy""" logits = logits[mask] labels = labels[mask] _, indices = torch.max(logits, dim=1) correct = torch.sum(indices == labels) return correct.item() * 1.0 / len(labels)
a7ee234837024598fc95fa9c54c55802ea411577
701,770
def gt(value, other): """Greater than""" return value > other
943d50ded0dfcb248eefb060d28850154693956b
701,782
def point_dist(a, b): """ Distance between two points. """ return ((a[0]-b[0]) ** 2 + (a[1]-b[1]) ** 2) ** 0.5
ad3490e25fb21a555ee2d12bead5fa476f682566
701,783
import math def exempt_milliwatts_sar(cm: float, ghz: float) -> float: """Calculate power threshold for exemption from routine radio frequency exposure evaluation. Note: narrow range of applicable frequencies. FCC formula is "based on localized specific absorption rate (SAR) limits." Source: FCC 19-126 p.23 :param cm: Distance from antenna to person (centimeters) :param ghz: Frequency of RF source (gigahertz, NOTE different unit from other functions) :return: time-averaged power threshold for exemption (milliwatts) :raises ValueError: if frequency or distance out of range (0.3 - 6 GHz; 0 - 40 cm) """ if 0.3 <= ghz < 1.5: erp20 = 2040 * ghz elif 1.5 <= ghz <= 6: erp20 = 3060 else: raise ValueError("frequency out of range: %s GHz" % str(ghz)) x = -1 * math.log10(60 / (erp20 * math.sqrt(ghz))) if 0 <= cm <= 20: p_threshold = erp20 * (cm / 20) ** x elif 20 < cm <= 40: p_threshold = erp20 else: raise ValueError("distance out of range: %s cm" % str(cm)) return p_threshold
d705c3fd2388204d188e95d9013fe0c574f9e82a
701,789
import uuid def random_string() -> str: """ Create a 36-character random string. Returns ------- str """ return str(uuid.uuid4())
f01e291f6d36a8468a256af0fad83e2d468b471d
701,791
def _extent(x, y): """Get data extent for pyplot imshow. Parameters ---------- x: list Data array on X-axis. y: list Data array on Y-axis. Returns ------- [float, float, float, float] X and Y extent. """ dx, dy = .5 * (x[1] - x[0]), .5 * (y[1] - y[0]) return [x[0] - dx, x[-1] + dx, y[-1] + dy, y[0] - dy]
1b8a062e2060dc99d3fcdc32fe6fd1952f468a6a
701,792
def _normalize_newlines(text: str) -> str: """Normalizes the newlines in a string to use \n (instead of \r or \r\n). :param text: the text to normalize the newlines in :return: the text with the newlines normalized """ return "\n".join(text.splitlines())
6b53b42e8cec72a8e63ec0065776f47de2fba835
701,796
def pick_from_greatests(dictionary, wobble): """ Picks the left- or rightmost positions of the greatests list in a window determined by the wobble size. Whether the left or the rightmost positions are desired can be set by the user, and the list is ordered accordingly. """ previous = -100 is_picked_list = [] for pos, is_greatest in dictionary.items(): is_picked = False if is_greatest: if previous not in range(pos - wobble, pos + wobble + 1): is_picked = True previous = pos is_picked_list.append(is_picked) return is_picked_list
52685877620ab7f58a27eb6997ec25f3b499e3a4
701,797
def replace_layer(model, layer_name, replace_fn): """Replace single layer in a (possibly nested) torch.nn.Module using `replace_fn`. Given a module `model` and a layer specified by `layer_name` replace the layer using `new_layer = replace_fn(old_layer)`. Here `layer_name` is a list of strings, each string indexing a level of the nested model.""" if layer_name: nm = layer_name.pop() model._modules[nm] = replace_layer(model._modules[nm], layer_name, replace_fn) else: model = replace_fn(model) return model
2e0ee082d6ab8b48979aa49e303a0e12583812b7
701,798
def genInvSBox( SBox ): """ genInvSBox - generates inverse of an SBox. Args: SBox: The SBox to generate the inverse. Returns: The inverse SBox. """ InvSBox = [0]*0x100 for i in range(0x100): InvSBox[ SBox[i] ] = i return InvSBox
8ddf7e338e914f6cb6c309dc25705601326160c0
701,799
def get_unassigned(values:dict, unassigned:dict): """ Select Unassigned Variable It uses minimum remaining values MRV and degree as heuristics returns a tuple of: unassigned key and a list of the possible values e.g. ('a1', [1, 2, 3, 4, 5, 6, 7, 8, 9]) """ values_sort = dict() # empty dictionary to store length of possible values array for key in values.keys(): length = len(values[key]) # get the length of possible values array values_sort[key] = (length) # add to dictionary for sorting in next step # sort the dictionary including lengths of possible values from small to large # this is to later on assign the items with the minimum number of remaining values first values_sorted = dict(sorted(values_sort.items(), key=lambda item: item[1], reverse=False)) for key in values_sorted.keys(): if unassigned[key] == True: length = values_sorted[key] if length > 1: vars = values[key] return key, vars
32ff78f7a6443bfbf4406c420ba87e254e88d5c3
701,800
from datetime import datetime def datetime_to_year(dt: datetime) -> float: """ Convert a DateTime instance to decimal year For example, 1/7/2010 would be approximately 2010.5 :param dt: The datetime instance to convert :return: Equivalent decimal year """ # By Luke Davis from https://stackoverflow.com/a/42424261 year_part = dt - datetime(year=dt.year, month=1, day=1) year_length = datetime(year=dt.year + 1, month=1, day=1) - datetime(year=dt.year, month=1, day=1) return dt.year + year_part / year_length
5f4ae29d57d13a344e70016ab59dbc0a619db4d8
701,802
def ek_R56Q(cell): """ Returns the R56Q reversal potential (in mV) for the given integer index ``cell``. """ reversal_potentials = { 1: -96.0, 2: -95.0, 3: -90.5, 4: -94.5, 5: -94.5, 6: -101.0 } return reversal_potentials[cell]
a61d33426e4c14147677c29b8e37381981f0d1db
701,803
import re def _sort_nd2_files(files): """ The script used on the Nikon scopes is not handling > 100 file names correctly and is generating a pattern like: ESN_2021_01_08_00_jsp116_00_P_009.nd2 ESN_2021_01_08_00_jsp116_00_P_010.nd2 ESN_2021_01_08_00_jsp116_00_P_0100.nd2 ESN_2021_01_08_00_jsp116_00_P_011.nd2 So this function parses the last number and treats it as an int for sorting """ pat = re.compile(r"(.*_)(\d+)(\.nd2)$") file_splits = [] did_split = None for file in files: g = pat.match(file) if g is not None: file_splits += [(g.group(1), g.group(2), g.group(3))] assert did_split is True or did_split is None did_split = True else: assert did_split is False or did_split is None did_split = False if did_split: numerically_sorted = sorted(file_splits, key=lambda x: int(x[1])) return ["".join(i) for i in numerically_sorted] else: return sorted(files)
17a034323412174beab3fd9cfb23e315a26d4d5a
701,806
def _suppression_polynomial(halo_mass, z, log_half_mode_mass, c_scale, c_power): """ :param halo_mass: halo mass :param z: halo redshift :param log_half_mode_mass: log10 of half-mode mass :param c_scale: the scale where the relation turns over :param c_power: the steepness of the turnover The functional form is: c_wdm / c_cdm = (1 + c_scale * mhm / m)^c_power * redshift_factor where redshift_factor = (1+z)^(0.026 * z - 0.04) (Bose et al. 2016) :return: the ratio c_wdm over c_cdm """ if c_power > 0: raise Exception('c_power parameters > 0 are unphysical') if c_scale < 0: raise Exception('c_scale parameters < 0 are unphysical') mhm = 10 ** log_half_mode_mass mass_ratio = mhm / halo_mass concentration_factor = (1 + c_scale * mass_ratio) ** c_power redshift_factor = (1 + z) ** (0.026 * z - 0.04) rescale = redshift_factor * concentration_factor return rescale
e0d72ae6c092ff01864cfb74d18143b070f075c9
701,807
import torch def KLDiv_loss(x, y): """Wrapper for PyTorch's KLDivLoss function""" x_log = x.log() return torch.nn.functional.kl_div(x_log, y)
e288c3e60fab90a30693b6d5856bd2964f421599
701,808
def limit_vals(input_value, low_limit, high_limit): """ Apply limits to an input value. Parameters ---------- input_value : float Input value. low_limit : float Low limit. If value falls below this limit it will be set to this value. high_limit : float High limit. If value falls above this limit it will be set to this value. Returns ------- float Returns input value unless it falls above or below the entered limits. """ if input_value < low_limit: return low_limit elif input_value > high_limit: return high_limit else: return input_value
4520da27c81338631ea0d35651c7e3170de0524c
701,809
import torch def get_mask( ref, sub, pyg=False ): """ Get the mask for a reference list based on a subset list. Args: ref: reference list sub: subset list pyg: boolean; whether to return torch tensor for PyG Return: mask: list or torch.BoolTensor """ mask = [item in sub for item in ref] if pyg: mask = torch.BoolTensor(mask) return mask
07e4b092b5becaaec8bc070990259701bc3a00b7
701,810
def get_merged_overlapping_coords(start_end): """merges overlapping spans, assumes sorted by start""" result = [start_end[0]] prev_end = result[0][-1] for i in range(1, len(start_end)): curr_start, curr_end = start_end[i] # if we're beyond previous, add and continue if curr_start > prev_end: prev_end = curr_end result.append([curr_start, curr_end]) elif curr_end > prev_end: prev_end = curr_end result[-1][-1] = prev_end else: pass # we lie completely within previous span return result
22220778a66e069d98d1129b32f024e61fde1859
701,811
def html_to_spreadsheet_cell(html_element): """ Parse HTML elmement, like <a href=www.google.com>Google</a> to =HYPERLINK(www.google.com, Google) """ link = html_element.find("a") if link: return '=HYPERLINK("{}", "{}")'.format(link['href'], link.contents[0]) else: return html_element.text
f0c797f59ed55d1ce6aab32ff8c40cf83577ee27
701,814
from typing import Tuple def parseOptionWithArgs(plugin: str) -> Tuple[str, str]: """Parse the plugin name into name and parameter @type plugin: str @param plugin: The plugin argument @returns tuple[str, str]: The plugin name and parameter """ if '=' in plugin: plugin, param = plugin.split('=', 1) else: plugin = plugin param = '' return (plugin, param)
0e1f85f2e31349bf7ddcdc2d35b9ba815a61ec06
701,816
def knapsack(p, v, cmax): """Knapsack problem: select maximum value set of items if total size not more than capacity :param p: table with size of items :param v: table with value of items :param cmax: capacity of bag :requires: number of items non-zero :returns: value optimal solution, list of item indexes in solution :complexity: O(n * cmax), for n = number of items """ n = len(p) opt = [[0] * (cmax + 1) for _ in range(n + 1)] sel = [[False] * (cmax + 1) for _ in range(n + 1)] # --- basic case for cap in range(p[0], cmax + 1): opt[0][cap] = v[0] sel[0][cap] = True # --- induction case for i in range(1, n): for cap in range(cmax + 1): if cap >= p[i] and opt[i-1][cap - p[i]] + v[i] > opt[i-1][cap]: opt[i][cap] = opt[i-1][cap - p[i]] + v[i] sel[i][cap] = True else: opt[i][cap] = opt[i-1][cap] sel[i][cap] = False # --- reading solution cap = cmax solution = [] for i in range(n-1, -1, -1): if sel[i][cap]: solution.append(i) cap -= p[i] return (opt[n - 1][cmax], solution)
484ae09e663c834e42a9e05d684138d4fb6ddf08
701,817
def get_usage_data_labels(usage_data): """Return dict with human readable labels for each of the groups in each of the given usage data timeframes.""" labels = {} timeframe_formats = {"day": "%H", "week": "%b %d", "month": "%b %d", "year": "%b %Y"} for timeframe, timeframe_data in usage_data.items(): labels[timeframe] = [group["datetime"].strftime(timeframe_formats[timeframe]) for group in timeframe_data] # Modify hourly labels to signify that it is covering a period of an hour. labels["day"] = [f"{label}:00-{int(label) + 1:02}:00" for label in labels["day"]] return labels
7c8610a7feb8a3a4a6454f508bfb9c95ccba55ff
701,819
def format_currency(amount): """Convert float to string currency with 2 decimal places.""" str_format = str(amount) cents = str_format.split('.')[1] if len(cents) == 1: str_format += '0' return '{0} USD'.format(str_format)
b90d064fe6b095227e5c590ad8d175f072e07957
701,823
def _get_delay_time(session): """ Helper function to extract the delay time from the session. :param session: Pytest session object. :return: Returns the delay time for each test loop. """ return session.config.option.delay
466ce191962df90ee8cdc1a5f8a004094eb9e79f
701,825