content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import time def timeit(func): """ Simple wrapper to time a function. Prints the execution time after the method finishes. """ def wrapper(*args, **kwargs): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() print('Execution Time ({0}): {1:.5f} seconds'.format( func.__name__, end_time - start_time)) return result return wrapper
37a657ac013739329a84b619153fdfa781181bd8
23,251
def check_repeat_x(V): """ Check if exists repeated x value Parameters ---------- V : dict dictionary which contains X and Y values Returns ------- bool Returns True if there are repeated x's values or False if there are no repeated x's values """ xlabel, _ = V.keys() return not(len(V[xlabel]) == len(set(V[xlabel])))
e840a596353fc01523f94ba9fc03d7940206b01a
23,253
from typing import Dict import jinja2 def render(path: str, template_name: str, parameters: Dict[str, str]) -> str: """Returns a rendered Dockerfile. path indicates where in the filesystem the Dockerfiles are. template_name references a Dockerfile.<template_name> to render. """ env = jinja2.Environment( loader=jinja2.FileSystemLoader(path), undefined=jinja2.StrictUndefined ) template = "Dockerfile" if template_name is not None: template = "Dockerfile.{}".format(template_name) return env.get_template(template).render(parameters)
11850e6a093eb72d970462745184946e9500c440
23,255
from typing import Dict import random def randomValue(interval: Dict[str, int]) -> int: """Generate a random integer value from a given interval.""" if not isinstance(interval, dict): raise ValueError('value has to be dict') return random.randrange(interval['min'], interval['max'], 1) // 1
d2445ab2127065fa5586080270757e9049246a6d
23,262
def _categorizeVector(v): """ Takes X,Y vector v and returns one of r, h, v, or 0 depending on which of X and/or Y are zero, plus tuple of nonzero ones. If both are zero, it returns a single zero still. >>> _categorizeVector((0,0)) ('0', (0,)) >>> _categorizeVector((1,0)) ('h', (1,)) >>> _categorizeVector((0,2)) ('v', (2,)) >>> _categorizeVector((1,2)) ('r', (1, 2)) """ if not v[0]: if not v[1]: return '0', v[:1] else: return 'v', v[1:] else: if not v[1]: return 'h', v[:1] else: return 'r', v
3532e4920eb7d58aca2dbf360cea6939d94ab730
23,263
def _NormalizeString(string): """Normalizes a string to account for things like case.""" return string.strip().upper() if string else None
24b8c525df9b080716119ee013cf45eb8c7b892a
23,267
def median_iqr(series): """ The interquartile range (Q3-Q1) and median are computed on a pandas series :param df: :return: """ iqr_median = [.25, .5, .75] series = series.quantile(iqr_median) iqr = series.iloc[2] - series.iloc[0] median = series.iloc[1] return median, iqr
095f4d33fd4069cf888eedbfb3570099fc592772
23,272
def zerocross(eigenvec): """ Compute the amount of zero-crossing of an eigenvector matrix (for each eigenvector). Parameters ---------- eigenvec : numpy.ndarray The eigenvectors from a decomposition. Returns ------- numpy.ndarray A 1D array with the amount of zero-crossing for each eigenvector. """ return (eigenvec[:-1, ...] * eigenvec[1:, ...] < 0).sum(axis=0)
79fd04940261167336c088027a3550abad45a464
23,273
def calculate_tdew_from_rh(rh, T, temperature_metric="celsius", verbose=False): """Calculate dew point temperature from relative humidity and temperature. Args: rh (pd series): air relative humidity in % T (pd series): air temperature in °C temperature_metric (str, optional): Input temperature unit. Defaults to "celsius". Returns: pandas series: dew point temperature timeseries (in °C or K) """ if verbose: print( "Calculating dew point temperature (dewp_temp) from relative humidity and temp." ) if temperature_metric != "celsius": if verbose: print("Assuming input temperature unit is Kelvin for rh calculation.") T = T - 273 # K to °C # inspired from humidity.to.dewpoint in: # https://github.com/geanders/weathermetrics/blob/master/R/moisture_conversions.R Tdew = (rh / 100) ** (1 / 8) * (112 + (0.9 * T)) - 112 + (0.1 * T) # in °C if temperature_metric != "celsius": Tdew = Tdew + 273 # °C to K return Tdew
079e4a871e8343378e8d0e95dc3ddeed9366e874
23,277
def to_pc(val): """Convert float value in [0, 1] to percentage""" return r'%.2f\%%' % (val * 100)
ca6b59b437537beef85018089c6448572ece2b32
23,279
def simpson_index(species_num_array): """Calculate the Simpson's Diversity Index: 1 - ∑pi**2 The Simpson index is a dominance index because it gives more weight to common or dominant species. In this case, a few rare species with only a few representatives will not affect the diversity. p is the proportion (n/N) of individuals of one particular species found (n) divided by the total number of individuals found (N). The value of this index ranges between 0 and 1, the greater the value, the greater the sample diversity. Args: species_num_array: An array that store the number of different kind of species. Returns: Simpson's diversity index of this population. """ ratio_ = species_num_array / species_num_array.sum() simpson_index_diversity = 1 - sum(ratio_**2) return float('%0.4f' % simpson_index_diversity)
22c0c2074a2d389ef6e245ce0b3aa27aea0590b5
23,280
def can_embed(bin_msg: str, width: int, height: int) -> bool: """Determines whether the image can hold the message. Parameters: ----------- bin_msg: string: A string of 1's and 0's representing the characters in the msg. width: int: The width of the image. height: int: The height of the image. Returns: -------- embed boolean: States whether the message can fit in the specified image. """ embed = len(bin_msg) + 8 < 3 * width * height # + 8 for terminating 00000000 return embed
8489994fe239a920ece07dd69d97e85b9bc7ce60
23,281
def addition(*args): """Addition an inifite number of integer arguments Args: number (int): Numbers to addition Returns: int: The result of the addition """ result = 0 for arg in args: result += int(arg) return result
0c0f9bc333cbcb1ce55e692cd2f6c1bb07461396
23,282
def precision_single_class(correctly_assigned, total_assigned): """ Computes the precision for a single class :rtype : float :param correctly_assigned: Samples correctly assigned to the class :param total_assigned: Total samples assigned to the class :return: The precision value """ # simply returning the precision value return float(correctly_assigned) / float(total_assigned)
49ab9693c4b0a59384a55b89e9ecd45dbe1da028
23,286
def get_bucket_ix(seq_length, bucket_range): """ Returns index of a bucket for a sequence with q given length when bucket range is bucket_range Args: seq_length: lengh of sequence bucket_range: range of bucket Returns: index of a bucket """ return seq_length // bucket_range + (1 if seq_length % bucket_range != 0 else 0)
705dfbfeeb87adb7b5f39d6b1db46b380d58b276
23,288
def parse_response(response): """ Utility function to parse a response into a list. """ elements = [] for element in response['responses'][0]['labelAnnotations']: elements.append(element['description'].capitalize()) return elements
f8b40a43ad00af68d5d13d3b78f00f33cec85270
23,289
def billing_mode_from_summary(billing_mode_summary): """Extract BillingMode field from BillingModeSummary object.""" return billing_mode_summary["BillingMode"]
9ada170f42e1f0f1eec3378a5f25f0fc884b033e
23,292
def cartesian(lst1, lst2): """Return cartesian of 2 lists""" lst = [] for item1 in lst1: for item2 in lst2: lst.append([item1, item2]) return lst
57bf192770d65143b0cf6c669b18c3baee6cd360
23,294
import re def check_column_name(column_name): """ 检查一列是否是温度相关列 :param column_name: :return: BOOL """ # 2019年之前的数据,如P1_T,代表不覆膜温度 pat1 = re.compile(r'P\d+_T', re.I) # 2019年之前的数据,如P11m_T,代表覆膜温度 pat2 = re.compile(r'P\d+m_T', re.I) # 2020年的数据,如P1_8,代表不覆膜温度 pat3 = re.compile(r'P\d+-\d+', re.I) # 2020年的数据,如P1_8m,代表覆膜温度 pat4 = re.compile(r'P\d+-\d+m', re.I) if (pat1.match(column_name) is not None or pat2.match(column_name) is not None or pat3.match(column_name) is not None or pat4.match(column_name) is not None): return True else: return False
63d501c028beb12be983892383873ffb166e387f
23,297
def is_null_str(value): """ Indicate if a string is None or 'None' or 'N/A' :param value: A string value :return: True if a string is None or 'None' or 'N/A' """ return not value or value == str(None) or value == 'N/A'
e8edb22c77ddf712a039f92529d453b7a4947173
23,298
import math def solveQuad(a, b, c): """ Solve a quadratic equation. Returns a list of solutions from length 0 to 2 :param a: :param b: :param c: :return: """ discriminant = (b ** 2) - (4 * a * c) divisor = 2 * a if discriminant < 0.0: return [] elif divisor == 0.0: if b == 0.0: return [] else: return [-c / b] elif discriminant > 0.0: sdiscriminant = math.sqrt(discriminant) return [(-b - sdiscriminant) / divisor, (-b + sdiscriminant) / divisor] else: return [-b / divisor]
199041cb7a6e6da8787511e29bdfbcee7d73640b
23,301
def is_point_inside_object(obj, obj_BVHtree, point): """ Checks whether the given point is inside the given object. This only works if the given object is watertight and has correct normals :param obj: The object :param obj_BVHtree: A bvh tree of the object :param point: The point to check :return: True, if the point is inside the object """ # Look for closest point on object nearest, normal, _, _ = obj_BVHtree.find_nearest(point) # Compute direction p2 = nearest - point # Compute dot product between direction and normal vector a = p2.normalized().dot((obj.rotation_euler.to_matrix() @ normal).normalized()) return a >= 0.0
d838aece6338858fc4d7dd945418bd6db3a48c42
23,305
def preparation_time_in_minutes(layers): """Calculate prep time :parm layers: Number of layers in the cake :return: int Total number of minutes to prepare the cake """ return 2 * layers
b088c09e5ea43d6fc924d9aecb6b90d91c46fcf2
23,306
def get_product_from_time_scale(time_scale): """ get the the USGS nwis product that is appropriate for the time scale :param time_scale: str - Pandas like time string for the time scale at which the data will be aggregated (e.g., 'H' for hour or 'D' for daily) :return: """ iv_scales = ['15T', 'T', 'H'] dv_scale = ['D'] if time_scale in iv_scales: return 'iv' elif time_scale in dv_scale: return 'dv' else: raise ValueError("time scale must be '15T', 'T', 'H', or 'D'")
425b0cbec0b20b79493dd53805cf3fd6f31fae95
23,313
def identity(arg): """ This function simply returns its argument. It serves as a replacement for ConfigParser.optionxform, which by default changes arguments to lower case. The identity function is a better choice than str() or unicode(), because it is encoding-agnostic. """ return arg
a5a5adfbc87ec25619eb4540dda995e49a03ba7a
23,314
def _do_request(client, method, path, data=None, query=None): """Make a request to the endpoint with `data` in the body. `client` is a flask test client `method` is the request method `path` is the path of the request `data` a dictionary containing body arguments for non-GET methods that will be converted to JSON -- the server will expect valid json, but we want to write test cases with invalid input as well. `query` a dictionary containing query arguments (ie - those after the ?) for the GET method. """ # The arguments for this method are documented at: # http://werkzeug.pocoo.org/docs/0.11/test/#werkzeug.test.EnvironBuilder return client.open(method=method, path=path, query_string=query, data=data)
b9409ad20674f5fc21c2e21e0580d5c837272767
23,317
def int_to_binary(int_num): """ Converts int to binary """ return bin(int_num)
9d72474454b0cdd8dee6cfecc3fba02410cdf959
23,322
from typing import Sequence from typing import List def read_present_files(paths: Sequence[str]) -> str: """Read the content of those files that are present.""" contents: List[str] = [] for path in paths: try: with open(path, "r") as f: contents += ["\n".join(map(str.strip, f.readlines()))] except FileNotFoundError: continue return "\n\n".join(contents)
7dce316db22405e8482b8bbd2890b2213a931cfd
23,325
def get_jaccard_similarity(s, t): """Computes the Jaccard Similarity of two sets""" return len(s.intersection(t)) / len(s.union(t))
12c17780e3ca51b9948b7b89b8b22f89aed6690d
23,343
def strip_list_items(items): """Apply str.strip to all items in a list""" return list(map(str.strip, items))
f34e5d483eb15b4936bff6df57afd0574b05397b
23,345
import random def best_index(seq): """best_index(seq) Given a sequence, find the postion of the largest value. Ties are broken randomly. """ largest = max(seq) indices = [] for idx, val in enumerate(seq): if val == largest: indices.append(idx) # Randomize if necessary if len(indices) > 1: random.shuffle(indices) return indices[0]
7d9506220d9b216c4016102f48853783d049f33d
23,349
import ast def _node_filter_for_globals(globals): """Filters ast nodes in support of setting globals for exec. Removes initial assigns of any variables occuring in `globals`. This is to allow globals to provide the initial value. Subsequent assigns are not removed under the assumption that are re-defining the initial variable value. """ names = set(globals.keys()) removed = set() def f(node): if isinstance(node, ast.Assign): for target in node.targets: if not isinstance(target, ast.Name) or target.id in removed: return True if target.id in names: removed.add(target.id) return False return True return f
5ffc5fb128ab297d0cbc2efef6684fb455dfaa2e
23,353
from typing import List from typing import Dict def get_initial_state(procs: int, tasks: List[int]) -> Dict[int, List[int]]: """generate the initial state of the system the initial stage is easy to generate: all processors have an empty tasklist except the first one which has it all example: { 0: [1, 2, 2, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 9], 1: [], 2: [] } :param procs: number of available processors :param tasks: list of tasks to distribute :return: the initial state """ if procs < 1: exit('Must provide at least one processor') state = {i: [] for i in range(procs)} state[0] = sorted(tasks) return state
04ac8b0d99d7b248772894ea5356065e555c5250
23,357
from typing import List def list2str(l: List[int]) -> str: """ Converts list to a string""" return ' '.join([str(x) for x in l])
fde657c2143ab73fbf0e9a28f3cf47fa377de7dc
23,358
import math def _pos_sqrt(value: float) -> float: """Returns sqrt of value or raises ValueError if negative.""" if value < 0: raise ValueError('Attempt to take sqrt of negative value: {}'.format(value)) return math.sqrt(value)
7b4e76f67b2f3f3dfab3e3c441f084685464a994
23,360
from typing import List from typing import Dict def generate_create_sqls(tbl_name: str, columns: List[tuple], keys: Dict) -> str: """ tbl_name: string a table name after pyjj_{tbl_name} columns: list of tuples (column_name, data_type, options) keys: dict of keys key: (columns, options) """ assert tbl_name column_stmt = ",".join(f"{key} {val} {opt}" for key, val, opt in columns) key_stmt = ( "," + ",".join( f"{key} ({','.join(value[0])}) {value[1]}" for key, value in keys.items() ) if keys else "" ) return f"CREATE TABLE IF NOT EXISTS pyjj_{tbl_name} ({column_stmt} {key_stmt});"
2bcf1e568ce093426666b8e10f9fa28128b7e995
23,368
def get_node_datatext(node): """Returns a string with data node text if it exists on the node, otherwise returns an empty string""" datatext = "" if node.attributes["id"].value: for data_node in node.getElementsByTagName('data'): if data_node.attributes["key"].value == "d5": if data_node.firstChild: datatext = data_node.firstChild.wholeText return datatext
b686cc52e0194440f0c86b9e6dfb12e4b7f2a1b4
23,380
def make_constructed_utts(utt_tokens): """ Converts utterances into correct form for composition Args: utt_tokens: List of utterances (utterance = list of tokens) Returns: utts: List of utterances for individual tokens and compositions compose_idx: List of indices in utts to be composed [["green", "-ish"]] => [["#start#", "green", "#end#"], ["#start#", "-ish", "#end#"], ["#start#", "green", "-ish", "#end#"]], [(0,2)] """ START = "#start#" END = "#end#" utts = [] compose_idx = [] for utt in utt_tokens: compose_idx.append((len(utts), len(utts)+len(utt))) # [start, end) for tok in utt: utts.append([START, tok, END]) utts.append([START] + utt + [END]) return utts, compose_idx
57eaa008883b2cfde95b438251dc305478dac3f9
23,383
import re def isbase64(value): """ Return whether or not given value is base64 encoded. If the value is base64 encoded, this function returns ``True``, otherwise ``False``. Examples:: >>> isbase64('U3VzcGVuZGlzc2UgbGVjdHVzIGxlbw==') True >>> isbase64('Vml2YW11cyBmZXJtZtesting123') False :param value: string to validate base64 encoding """ base64 = re.compile(r"^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})$") return bool(base64.match(value))
02da2cfe0b32288aa599534dea08f4bc85a41123
23,386
def find_next_biggest_with_same_1s(n): """Finds the next biggest number with the same number of 1 bits. - Flips the rightmost 0 that has ones on its right (increases the value) - Rearrange 1s on its right to lowest positions and flips highest of them (decreases the value and creates same number of 1s) Example: xxxx_0_111_0000 --> xxxx_1_111_0000 --> xxxx_1_000_0011 Args: n: A positive integer. Raises: ValueError on non-positive input. Returns: Next biggest number with same number of 1s. """ if n <= 0: raise ValueError('Input argument has to be positive.') temp = n # Count number rightmost 0s num_of_zeros = 0 while temp & 1 == 0: temp >>= 1 num_of_zeros += 1 # Count number of 1s to the left of 0s num_of_ones = 0 while temp & 1 == 1: temp >>= 1 num_of_ones += 1 # Flip next 0 to 1 n = n ^ (1 << (num_of_ones + num_of_zeros)) # Create a 0...01...1 mask, then invert it to get 1...10...0 mask = ~((1 << (num_of_ones + num_of_zeros)) - 1) n = n & mask # Create a 0...01...1 mask with number of 1s = (num_of_ones - 1) mask = (1 << (num_of_ones - 1)) - 1 n = n | mask return n
d323a3d828929f9f6ae0c0ad6f2849e067a2eb8d
23,388
from typing import List from typing import Dict def get_single_color_dicts(self) -> List[List[Dict]]: """ Converts the text in the editor based on the line_string_list into a list of lists of dicts. Every line is one sublist. Since only one color is being applied, we create a list with one dict per line. """ rendering_list = [] for line in self.line_string_list: # appends a single-item list rendering_list.append([{'chars': line, 'type': 'normal', 'color': self.textColor}]) return rendering_list
9ebcb28e59f6b05591c0c5465513aa9408952a62
23,393
def is_last_ts_in_thousands(timestamps): """Detect if the last timestamp in a sequence is a multiple of 1000. Args: timestamps (list): An list of timestamps (in picoseconds). Returns: True if the last timestamp is a multiple of 1000, False otherwise. """ if timestamps is None: return True last_timestamp = max(timestamps) return last_timestamp % 1000 == 0
cb26d134eada9407415fefb4761ec265fa3c8f28
23,394
from typing import List def argv_pars(arguments: List[str]) -> int: """Returns second argv or 30. Args: argv (List[str]): sys.argv Returns: int: i >= 1, or 30 """ try: return max(int(arguments[1]), 1) except Exception: return 30
b96340f9d547e3fabd959f2fe9feb8cd0d3f4c47
23,399
def get_pentagonal_number(n: int) -> int: """Get Pentagonal number `P_n=n*(3n−1)/2` for a given number `n`.""" return (n * (3*n - 1)) // 2
4ec89c0428ea83ede1084877790edadb7b61b6d5
23,400
def cpf_checksum(cpf): """ CPF Checksum algorithm. """ if cpf in map(lambda x: str(x) * 11, range(0, 10)): return False def dv(partial): s = sum(b * int(v) for b, v in zip(range(len(partial) + 1, 1, -1), partial)) return s % 11 dv1 = 11 - dv(cpf[:9]) q2 = dv(cpf[:10]) dv2 = 11 - q2 if q2 >= 2 else 0 return dv1 == int(cpf[9]) and dv2 == int(cpf[10])
cbe634578d50687d2b2aec5aa996123f25a03327
23,407
def _findBeginning(file_name, loc): """Scan given TRY dat file and find end of header (start of data) Arguments: file_name {str} -- Name of TRY data file loc {str} -- Location of TRY data file Returns: (int, string) -- (lines before data starts, column header) """ with open(loc + file_name, 'r') as dat_file: last_line = dat_file.readline() current_line = dat_file.readline() dat_start = 2 while current_line[:3] != '***': last_line = current_line # save column header current_line = dat_file.readline() dat_start += 1 if dat_start == 100: break # get header as list of string last_line = last_line.split() return (dat_start, last_line)
20be3ccefafc61bfa3f769b993e5f53c667b37ba
23,409
def mhz_to_freq_khz(mhz): """ Convert MHz to exact frequency in kHz """ return { 14: 14100, 18: 18110, 21: 21150, 24: 24930, 28: 28200 }[mhz]
d7ec477c88b7e212e852aef407f2a31064d806a0
23,410
def price_table_to_price_mapping(table): """Convert price table to a dict mapping from region to instance type to instance info """ region_price_mapping = {} for region_table in table['config']['regions']: types = {} for type_category in region_table['instanceTypes']: for size in type_category['sizes']: types[size['size']] = size region_price_mapping[region_table['region']] = types return region_price_mapping
d39887b82be8ae37a20d73c830e7ef724553600e
23,413
import hashlib def hash16(data): """ Return a hex string of the data's hash. Currently uses md5. """ hash_object = hashlib.md5(bytes(data, 'utf-8')) return hash_object.hexdigest()
bed6d51832f1354990c08c0c27f883e99f00ddd7
23,414
def dict_to_css(css_dict, pretty=False): """Takes a dictionary and creates CSS from it :param css_dict: python dictionary containing css rules :param pretty: if css should be generated as pretty :return: css as string """ seperator = '\n' tab = '\t' if not pretty: seperator = '' tab = '' css_rules = [] for selector, rules in css_dict.items(): tmp = selector + '{' + seperator tmp_rules = [] if isinstance(rules, dict): for rule, value in rules.items(): tmp_rules.append(tab + rule + ':' + value + ';') tmp += seperator.join(tmp_rules) tmp = tmp + '}' css_rules.append(tmp) return seperator.join(css_rules)
03c489e2cb3f855fad2e2476c33fc2806be9043f
23,419
def avg(iterable): """Simple arithmetic average function. Returns `None` if the length of `iterable` is 0 or no items except None exist.""" items = [item for item in iterable if item is not None] if len(items) == 0: return None return float(sum(items)) / len(items)
1489cf0a828e8c1613453d04abb8773658d60e8e
23,422
def dict_from_tokens(tokens, value): """Build a dict-tree from a list of tokens defining a unique branch within the tree. Args: tokens (list): A list of tokens defining a branch within the nested dict value (any): An object set as the leaf of a branch Returns: dict: A nested dictionary """ if len(tokens) == 0: return value key = tokens.pop(0).lower() return {key: dict_from_tokens(tokens, value)}
dd3b9e72208ba404354f9bd98c8a4a8d80609611
23,427
def mk_falls_description(data_id, data): # measurement group 8 """ transforms a h-falls-description.json form into the triples used by insertMeasurementGroup to store each measurement that is in the form :param data_id: unique id from the json form :param data: data array from the json form :return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements """ return [(220, 2, data_id), (48, 7, data['fallint']), (49, 2, data['falldesc']), (50, 2, data['fallinjury'])]
1e0c304538159b9bb01d677bdacdfa9a0b3b4e4d
23,428
def addMessage(row_num, valid, new_msg, messages): """ Add error message to the list of errors and set the validity""" if new_msg: if "Error" in new_msg: valid = False match = False for msg in messages: if new_msg == msg[1]: match = True if row_num + 1 != msg[0][-1]: msg[0].append(row_num + 1) return valid, messages if match == False: messages.append([[row_num + 1], new_msg]) return valid, messages
899f2c4168ccfbc1fe66ffd35f6b4ad008d6f032
23,432
from typing import Dict def word_count_helper(results: Dict) -> int: """ Helper Function that computes word count for ocr results on a single image Parameters ---------- results: Dict (OCR results from a clapperboard instance) Returns ------- Int Number of words computed from OCR results """ count = 0 for element in results: words_list = element["text"].split(" ") count += len(words_list) return count
aaf4b9b6e430c13b804ed96374eecddb134a31ae
23,434
def get_num_train_images(hparams): """Returns the number of training images according to the dataset.""" num_images_map = { 'imagenet': 1281167, 'cifar10': 50000, } if hparams.input_data.input_fn not in num_images_map: raise ValueError( f'Unknown dataset size for input_fn {hparams.input_data.input_fn}') num_images = num_images_map[hparams.input_data.input_fn] if hparams.input_data.max_samples > 0: return min(num_images, hparams.input_data.max_samples) return num_images
e75e827026b247158ca76890990b04d51cd99a6a
23,439
def _same_file_up_to_epsilon(filen1, filen2, eps=1e-9): """_same_file_up_to_epsilon Return True if filen1 and filen2 contains the same float data up to epsilon Args: filen1 (str): The path and name of the first filename filen2 (str): The path and name of the second filename eps (float): The maximum tolerance for asserting that two floating point numbers are different Returns: A bool indicating if the two files contains the same data """ assert filen1 != filen2, "File names must be different." with open(filen1, "r") as filep_ref: with open(filen2, "r") as filep_test: line_ref = next(filep_ref) line_test = next(filep_test) assert line_ref == line_test, "Invalid line generated for " + filen1 + ":\n" + line_test + "\nthat is different from the reference file " + filen2 + ":\n" + line_ref for line_ref, line_test in zip(filep_ref, filep_test): # Checks that the 38 3D landmarks generated are equal up to epsilon to the reference for val_ref, val_test in zip(line_test.split(",")[1:38 * 3 + 1], line_test.split(",")[1:38 * 3 + 1]): assert abs(float(val_ref) - float(val_test)) < eps, "Invalid value detected for " + filen1 + ":\n" + line_test + "\nthat is different from the reference file " + filen2 + ":\n" + line_ref return True
5aea46a44ce5d5df0e8c32ea4c59f34b4751d492
23,442
def unite_statuses(statuses, update): """ Takes two dictionaries <hostname, hoststatus> and returns dictionary with united entries (returncode is set to the max value per host, logs per host are concatenated)""" result = {} for key, value in statuses.iteritems(): if key in update: upd_status = update[key] res_status = { "exitstatus" : max(value["exitstatus"], upd_status["exitstatus"]), "log" : value["log"] + "\n" + upd_status["log"] } result[key] = res_status else: result[key] = value return result
c0454e3fdc0ccda0c6cabf3ac2ee479aacbfee27
23,448
def stringify(value): """ Escapes a string to be usable as cell content of a CSV formatted data. """ stringified = '' if value is None else str(value) if ',' in stringified: stringified = stringified.replace('"', '""') stringified = f'"{stringified}"' return stringified
74d5683a79e7efab48ec24767d1c912b66c0e65b
23,450
import math def get_bpd(log_p, dimentions=28*28): """ bpd = (nll_val / num_pixels) / numpy.log(2). log_p: log probability dimentions: dimentions (resolution) of image """ return ((-log_p / dimentions) / math.log(2)).mean().item()
a1ba8c8e688988ef0b02ec555e5b31ffc5408d2a
23,452
def nb_coverage_distance(epitope, peptide, mmTolerance = 0): """Determines whether pepitide covers epitope and can handle epitopes and peptides of different lengths. To be a consistent distance matrix: covered = 0 not-covered = 1 If epitope is longer than peptide it is not covered. Otherwise coverage is determined based on a mmTolerance Parameters ---------- epitope : np.array peptide : np.array mmTolerance : int Number of mismatches tolerated If dist <= mmTolerance then it is covered Returns ------- covered : int Covered (0) or not-covered (1)""" LEpitope, LPeptide = len(epitope), len(peptide) if LEpitope > LPeptide: return 1 for starti in range(LPeptide-LEpitope+1): mm = 0 for k in range(LEpitope): if epitope[k] != peptide[starti + k]: mm = mm + 1 if mm > mmTolerance: """If this peptide is already over the tolerance then goto next one""" break if mm <= mmTolerance: """If this peptide is below tolerance then return covered (0)""" return 0 """If no peptides meet mmTolerance then return not covered""" return 1
46b88f83934465e8bb4b30f144b5acc2791c809a
23,455
def construct_unsent_berichten_query(naar_uri, max_sending_attempts): """ Construct a SPARQL query for retrieving all messages for a given recipient that haven't been received yet by the other party. :param naar_uri: URI of the recipient for which we want to retrieve messages that have yet to be sent. :returns: string containing SPARQL query """ q = """ PREFIX schema: <http://schema.org/> PREFIX ext: <http://mu.semte.ch/vocabularies/ext/> SELECT DISTINCT ?referentieABB ?dossieruri ?bericht ?betreft ?uuid ?van ?verzonden ?inhoud WHERE {{ GRAPH ?g {{ ?conversatie a schema:Conversation; schema:identifier ?referentieABB; schema:about ?betreft; schema:hasPart ?bericht. ?bericht a schema:Message; <http://mu.semte.ch/vocabularies/core/uuid> ?uuid; schema:dateSent ?verzonden; schema:text ?inhoud; schema:sender ?van; schema:recipient <{0}>. FILTER NOT EXISTS {{ ?bericht schema:dateReceived ?ontvangen. }} OPTIONAL {{ ?conversatie ext:dossierUri ?dossieruri. }} BIND(0 AS ?default_attempts) OPTIONAL {{ ?bericht ext:failedSendingAttempts ?attempts. }} BIND(COALESCE(?attempts, ?default_attempts) AS ?result_attempts) FILTER(?result_attempts < {1}) }} }} """.format(naar_uri, max_sending_attempts) return q
ff380f1dd2edc77f6ee41376f6c1d8ee50448d43
23,458
def _roman_to_int(r): """ Convert a Roman numeral to an integer. """ if not isinstance(r, str): raise TypeError(f'Expected string, got type(input)') r = r.upper() nums = {'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1} integer = 0 for i in range(len(r)): try: value = nums[r[i]] if i+1 < len(r) and nums[r[i + 1]] > value: integer -= value else: integer += value except KeyError: raise ValueError('Input is not a valid Roman numeral: %s' % r) return integer
f91c88cbdd6ca31ae811a2300b45fb5a6df3ea91
23,459
import re def match_absolute_path(path: str) -> bool: """ Return true if the path starts with ``http(s)://``, false otherwise. Args: path (str): URL Returns: bool: True for absolute URL, false for relative URL. """ return re.match(r"http(s)*://*", path, re.IGNORECASE) is not None
8b508b76fc0f5102c687a202a0ffab26631eaf8b
23,463
def fps(branch): """ extracts function #, process #, and scan # from the idstring of a spectrum branch returns function, process, scan as integers """ idstring = branch.getAttribute('id').split() # pull id string from scan attribute return [int(x.split('=')[1]) for x in idstring]
a470d609d2c8c15c88bbaba539c587410c03394a
23,464
def partition(alist, indices): """A function to split a list based on item indices Parameters: ----------------------------- : alist (list): a list to be split : indices (list): list of indices on which to divide the input list Returns: ----------------------------- : splits (list): a list of subreads based on cut sites """ return [alist[i:j] for i, j in zip([0]+indices, indices+[None])]
b14040058d96d66acf81e0bff8eedfe23df20738
23,466
from typing import Counter def count_elements(data_lst): """Count how often each element occurs in a list. Parameters ---------- data_lst : list List of items to count. Returns ------- counts : collections.Counter Counts for how often each item occurs in the input list. """ counts = Counter(data_lst) try: counts.pop(None) except KeyError: pass return counts
a12f0a35a228e8a8627a8fcfc703d3231984e3f4
23,467
import re def clean_text_from_private_unicode(line): """Cleans the line from private unicode characters and replaces these with space.""" line = re.sub(r"([\uE000-\uF8FF]|\uD83C[\uDF00-\uDFFF]|\uD83D[\uDC00-\uDDFF])", " ", line) return line
0ad7f47446dfb91069003c3fce0d1129dcb71113
23,469
def get_unique_actions_set(classifier_set): """Returns a set containing the unique actions advocated by the classifiers in the classifier set. """ unique_actions = set() for classifier in classifier_set: unique_actions.add(classifier.rule.action) return unique_actions
b6274413764bd8f2bac18700556e00e2855a0534
23,471
def mass_surface_solid( chord, span, density=2700, # kg/m^3, defaults to that of aluminum mean_t_over_c=0.08 ): """ Estimates the mass of a lifting surface constructed out of a solid piece of material. Warning: Not well validated; spar sizing is a guessed scaling and not based on structural analysis. :param chord: wing mean chord [m] :param span: wing span [m] :param mean_t_over_c: wing thickness-to-chord ratio [unitless] :return: estimated surface mass [kg] """ mean_t = chord * mean_t_over_c volume = chord * span * mean_t return density * volume
1bb2e1c571b6b9fee137bcd226d517151dd51fbd
23,483
def largest(die): """Return the largest value die can take on.""" return max(die)
17ad51ecb0960d3c83c2c8c5e8e67465046d8745
23,484
import math def dist(x1, y1, x2, y2): """ Computes Euclidean distance between two points. Parameters ---------- x1: float point A x y1: float point A y x2: float point B x y2: float point B y Returns ------- float distance from A to B """ return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
ea2cf04ec831f0e045407cb7cb2b4c8a68bebf37
23,485
from typing import Tuple from typing import Optional def restore_postgres_db( backup_file: str, postgres_db: str ) -> Tuple[Optional[str], bytes]: """Restore postgres db from a file.""" try: # restore postgres with pg_restore and terminal pass # check if command succeeded return None, bytes() except Exception as e: return f"Issue with the db restore : {e}", bytes()
9b9b9a43d97118807d190b1a1f317e73b9445484
23,486
def json_pointer(jsonpointer: str) -> str: """Replace escape characters in JSON pointer.""" return jsonpointer.replace("~0", "~").replace("~1", "/")
a0bd02c5cdc6d97ce76ac38ef0b254f0ed592b43
23,493
def expand_buses(pins_nets_buses): """ Take list of pins, nets, and buses and return a list of only pins and nets. """ # This relies on the fact that a bus is an iterable of its nets, # and pins/nets return an iterable containing only a single pin/net. pins_nets = [] for pnb in pins_nets_buses: pins_nets.extend(pnb) return pins_nets
6addae03775fba5b12f5e7c691a5426c12f7e0f7
23,494
def loss_batch(model, loss_func, x, y, opt=None): """ The function to perform backpropagation :param model: Training model :param loss_func: Function to calculate training loss :param x: Feature in train dataset :param y: Labels in train dataset :param opt: Optimizer :return: Loss per epoch, length of the features """ loss = loss_func(model(x), y) if opt is not None: loss.backward() opt.step() opt.zero_grad() return loss.item(), len(x)
a24a379bbfad4db0e7a549f23a9be500cead1610
23,496
def XlaLaunchOpCount(labels): """Count how many XlaLaunch labels are present.""" return sum("XlaLaunch(" in x for x in labels)
e3b083de64bf1627ca98c427a268412cacf9f43b
23,499
def read_parse_sql_file(path_sql_script): """ Function to read and parse a sql file Parameters: path_sql_script (str): path of the sql script to read and parse Returns: (list): list of string of sql requests """ with open(path_sql_script, 'r') as dml_file: dml = dml_file.read().strip().split(';')[:-1] return dml
719ae91664c9f61fc1e0af44edb150e91e0f99e7
23,505
def parse_service_url(url): """Given a URL, extract the 'group' and 'specific_path' (See the get_service function.)""" parts = url.split('/phylotastic_ws/') return (parts[0].split(':')[1], parts[1])
cb8578418deabebc1cfbca1a363bf774acb1d745
23,507
import random def random_simple_split(data, split_proportion=0.8): """Splits incoming data into two sets, randomly and with no overlapping. Returns the two resulting data objects along with two arrays containing the original indices of each element. Args: data: the data to be split split_proportion: proportion of the data to be assigned to the fist split subset. As this function returns two subsets, this parameter must be strictly between 0.0 and 1.0 (Default value = 0.8) Returns: the two resulting datasets and the original index lists """ assert 0.0 < split_proportion < 1.0 indices = list(range(len(data))) # all indices in data random.shuffle(indices) split_index = int(len(data) * split_proportion) return data[indices[:split_index]], data[indices[split_index:]], indices[:split_index], indices[split_index:]
547900ccda505416b10a07a94836649a4c84172a
23,521
from datetime import datetime from pathlib import Path def create_run_path(checkpoints_path): """create the run path to save the checkpoints of the model Arguments: checkpoints_path {str} -- the path to save the checkpoints Returns: Path -- the path to save the checkpoints """ run_folder = 'run_' + datetime.now().strftime('%Y%m%d_%H:%M.%S') run_path = Path(checkpoints_path) / run_folder return run_path
5930ff28ade4edb2c9b02b483b1e41934a519aeb
23,523
def elo_expected(d :float ,f :float =400 )->float: """ Expected points scored in a match by White player :param d: Difference in rating (Black minus White) :param f: "F"-Factor :return: """ if d/ f > 8: return 0.0 elif d / f < -8: return 1.0 else: return 1. / (1 + 10 ** (d / f))
739b01e6c641e1d0bd163cc104deb50dcf76187a
23,528
from typing import Any from typing import Iterable def to_iterable(val: Any) -> Iterable: """Get something we can iterate over from an unknown type >>> i = to_iterable([1, 2, 3]) >>> next(iter(i)) 1 >>> i = to_iterable(1) >>> next(iter(i)) 1 >>> i = to_iterable(None) >>> next(iter(i)) is None True >>> i = to_iterable('foobar') >>> next(iter(i)) 'foobar' >>> i = to_iterable((1, 2, 3)) >>> next(iter(i)) 1 """ if isinstance(val, Iterable) and not isinstance(val, (str, bytes)): return val return (val,)
6c24de85d822a5511adb26149ec863197164c61b
23,531
import random def random_or_none(s): """Return a random element of sequence S, or return None if S is empty.""" if s: return random.choice(s)
05ed40d21d754422c3a9ca45d52867f6947d23a4
23,532
from typing import Union from typing import List def comma_separated_string_to_list(line: str) -> Union[List[str], str]: """ Converts a comma-separated string to a List of strings. If the input is a single item (no comma), it will be returned unchanged. """ values = line.split(",") return values[0] if len(values) <= 1 else values
998a4a93d8a6cdcd31ec6ff53cbd171224aba782
23,534
def arr_to_dict(arr): """ takes in an numpy array or list and returns a dictionary with indices, values """ d = {} for i in range(len(arr)): for j in range(len(arr[0])): d[(i,j)] = arr[i][j] return d
bf9382eaf9ca20b4dfff80e4a18afa055d78ec00
23,537
def compute_intersection_over_union(gt_bbox, pred_bbox): """ Compute the intersection over union for a ground truth bounding box and a prediction bounding box Params: gt_bbox (RectangleProto): single ground truth bbox pred_bbox (RectangleProto): single prediction bbox Returns: iou_width (double): intersection over union for width dimension iou_height (double): intersection over union for height dimension iou_area (double): intersection over union area """ intersection_width = min(gt_bbox.max.y, pred_bbox.max.y) - max(gt_bbox.min.y, pred_bbox.min.y) intersection_height = min(gt_bbox.max.x, pred_bbox.max.x) - max(gt_bbox.min.x, pred_bbox.min.x) intersection_area = intersection_width * intersection_height gt_bbox_width = gt_bbox.max.y - gt_bbox.min.y gt_bbox_height = gt_bbox.max.x - gt_bbox.min.x gt_bbox_area = gt_bbox_width * gt_bbox_height pred_bbox_width = pred_bbox.max.y - pred_bbox.min.y pred_bbox_height = pred_bbox.max.x - pred_bbox.min.x pred_bbox_area = pred_bbox_width * pred_bbox_height union_width = gt_bbox_width + pred_bbox_width - intersection_width union_height = gt_bbox_height + pred_bbox_height - intersection_height union_area = gt_bbox_area + pred_bbox_area - intersection_area iou_width = intersection_width / union_width iou_height = intersection_height / union_height iou_area = intersection_area / union_area return iou_width, iou_height, iou_area
b556b9f2d36118c05de14949a54b8531dbfe1baa
23,538
def spandex_dataset_ids(input_file): """ Read all dataset IDs currently in the Spandex index. Args: input_file (str): The path to a flat file with a list of dataset IDs currently in spandex Returns: A set of dataset IDs """ return {x.strip() for x in open(input_file, "r") if x.strip()}
b1ae62221a7534264ac39881b8365749f4b8d625
23,540
def sortAndReturnQuantiles(values): """Returns minimum, 0.25-quantile, median, 0.75-quantile, maximum""" values.sort() N = len(values) return (values[0], values[N/4], values[N/2], values[(3*N)/4], values[N-1])
067aec1fc88cfcf33f9bab4201b81dfede82f61d
23,549
def generate_diff(old_list, new_list): """Returns 2 lists of added, deleted and unchanged elements. Added elements are elements presents in new_list but not in old_list. Deleted elements are elements presents in old_list but not in new_list. Args: old_list (list): Old list. new_list (list): New list. Returns: tuple: Contains 2 elements: - A list of added elements; - A list of deleted elements. - A list of unchanged elements. """ old_set = set(old_list) new_set = set(new_list) added = new_set - old_set deleted = old_set - new_set unchanged = old_set.intersection(new_set) return list(added), list(deleted), list(unchanged)
f915c0ca33b6f9fa53450dc3d40b042271ca3fc2
23,551
def gcd(a, b): """Euclid's greatest common denominator algorithm.""" if abs(a) < abs(b): return gcd(b, a) while abs(b) > 0: q, r = divmod(a, b) a, b = b, r return a
f0b023ad587c896d99ff196f9bb5a7491b529106
23,561
import asyncio def sync_version(async_coroutine): """ Decorates asyncio coroutine in order to make it synchronous. Args: async_coroutine: asyncio coroutine to wrap. Returns: Synchronous version of the method. """ def sync(*args, **kwargs): event_loop = asyncio.get_event_loop() result = event_loop.run_until_complete(async_coroutine(*args, **kwargs)) return result return sync
b8a6e034186faa080c02c21c2433678634b1f155
23,563
def arithmetic_mean(X): """Computes the arithmetic mean of the sequence `X`. Let: * `n = len(X)`. * `u` denote the arithmetic mean of `X`. .. math:: u = \frac{\sum_{i = 0}^{n - 1} X_i}{n} """ return sum(X) / len(X)
cf6f2300442afe961e96a5f10943393cf071eb5b
23,566
def get_lat_array(bw, bw_array, lat_array): """ Returns the latency for measured bandwidth using bandwidth-latency dependency. @params: bw - Required : measured bandwidth (Float) bw_array - Required : array of measured bandwidths for bw-lat dependency (List of floats) lat_array - Required : array of measured latencies for bw-lat dependency (List of floats) """ if bw > bw_array[len(bw_array)-1]: return lat_array[len(bw_array)-1] i = 0 while bw > bw_array[i]: i+=1 if i == len(bw_array): return 0 if i == 0: return lat_array[0] else: bw_percent = ( bw - bw_array[i-1] )/( bw_array[i] - bw_array[i-1] ) latency = lat_array[i-1] + bw_percent*(lat_array[i] - lat_array[i-1]) return latency
d92c63f8f3a1cb96220a4c13b377244522c32d6a
23,568
def get_quoted_name_for_wlst(name): """ Return a wlst required string for a name value in format ('<name>') :param name: to represent in the formatted string :return: formatted string """ result = name if name is not None and '/' in name: result = '(' + name + ')' return result
1ead7ae2a0b5c1d3dc3d5fc1e8e07a51697597b4
23,570
def get_reverse_bits(bytes_array): """ Reverse all bits in arbitrary-length bytes array """ num_bytes = len(bytes_array) formatstring = "{0:0%db}" % (num_bytes * 8) bit_str = formatstring.format(int.from_bytes(bytes_array, byteorder='big')) return int(bit_str[::-1], 2).to_bytes(num_bytes, byteorder='big')
c4c64624a9fab5d9c564b8f781885a47984f1eaf
23,577
def get_reg_dict(kwd_df): """ Create a dictionary of domains. All regexes belonging to a domain are joined into one regex. Parameters ---------- kwd_df: DataFrame dataframe with the columns `domain` and `regex` Returns ------- dict dictionary with the domain as key and a "joined domain regex" as value """ reg_dict = dict() for domain in kwd_df.domain.unique(): reg_dict[domain] = '|'.join(kwd_df.query("domain == @domain").regex) return reg_dict
2fb3e86e3f77329d88731f9d6743eafcc999133d
23,579
def regex_ignore_case(term_values): """ turn items in list "term_values" to regexes with ignore case """ output = [] for item in term_values: output.append(r'(?i)'+item) return output
dc9fd3cb9e54896bacb7e2a296d5583a16aaf3ec
23,580
import glob import re def get_calcium_stack_lenghts(folder): """ Function to extract calcium stack lenghts from imageJ macro files associated to the stacks. params: - folder: path of the folder containing the IJ macros files return: - list of stack lenghts """ record_lenghts = [] pattern_nFrame = r".*number=(\d*) .*" for fn in glob.glob(folder+"/*.txt"): with open(fn) as f: line = f.readline() record_lenghts.append(int(re.findall(pattern_nFrame, line)[0])) return record_lenghts
a9985bc6427ac31e7e1e3d941c227ac302af9206
23,581
def retry_if_value_error(exception): """ Helper to let retry know whether to re-run :param exception: Type of exception received :return: <bool> True if test failed with ValueError """ return isinstance(exception, ValueError)
a4f184da177fa26dee55f8f7ed76e3c7b8dbfc87
23,582