content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def deserialize_utf8(value, partition_key): """A deserializer accepting bytes arguments and returning utf-8 strings Can be used as `pykafka.simpleconsumer.SimpleConsumer(deserializer=deserialize_utf8)`, or similarly in other consumer classes """ # allow UnicodeError to be raised here if the decoding fails if value is not None: value = value.decode('utf-8') if partition_key is not None: partition_key = partition_key.decode('utf-8') return value, partition_key
21cc61d6048b5f7d9333ceadb86d03666719f05f
25,860
from typing import Iterable from typing import Union from typing import List from typing import Generator def make_single_arguments(iterable_of_args: Iterable, generator: bool = True) -> Union[List, Generator]: """ Converts an iterable of single arguments to an iterable of single argument tuples :param iterable_of_args: A numpy array or an iterable containing tuples of arguments to pass to a worker, which passes it to the function :param generator: Whether or not to return a generator, otherwise a materialized list will be returned :return: Iterable of single argument tuples """ gen = ((arg,) for arg in iterable_of_args) return gen if generator else list(gen)
4469b7bb1a18e72948380ebea90c52459f528be1
25,861
def FindPosCol( iDotData ): """Find the column representing the SNP position, based on our conventions.""" return 'Pos' if 'Pos' in iDotData.headings else 'pos'
7048a0fd3d662f392e5a8b36acd94d1b92b674ba
25,863
def get_media_info(player): """ Function to fetch the current playing track and artist information""" media = player.get_media() info = str(media.get_meta(12)) info = info.split("-") artist = info[0] track = info[1] return artist, track
32463946e98d453ae399b25dc7d6b8555812f319
25,867
def get_step_time_shift(step): """Calculates the time shift generated in each filtering step Parameters ---------- step: dict Dictionary object holding information about a given filter step Returns ------- shift: float Time shift value """ input_sample_period = step["input_sample_period"] numtaps = len(step["window"]) shift = input_sample_period * ((numtaps - 1) / 2) return shift
b6c2d299337e37a13e0fe6472577da3e3663aae1
25,876
def query_row_args_2_dict(args): """ change query_row_args to dict :param args: query row args :type args: QueryRowArgs :return: :rtype dict """ return { 'rowkey': args.rowkey, 'maxVersions': args.max_versions, 'cells': args.cells }
50f7131052cf17fba960759a5d6a27b3203e4008
25,881
def basin_ensemble_mean(dict_by_basin, basin_name, case): """Compute the multi-GCM ensemble mean SPEI for a given basin and case Parameters ---------- dict_by_basin : DICT Stores SPEI per basin basin_name : STR Which basin to study case : STR 'WRunoff', 'NRunoff', 'diff' Returns ------- em: pandas.Series object Ensemble mean SPEI for this basin and case """ basin_df = dict_by_basin[basin_name][case] em = basin_df.mean(axis=1) #compute mean among all models at each timestep return em
b7c9c2bebca2639f260c65abddea0e30c3ecef7f
25,883
def timedelta_to_seconds(td): """Convert a timedelta to seconds. The returned seconds will be rounded to the nearest whole number. """ return round(td.total_seconds())
66cbe5409ac378825590ba96fc5affc469267f3d
25,884
import copy def remove_whitespace(dictionary): """ Remove values that empty whitespace in the dictionary """ nd = copy.deepcopy(dictionary) bad_keys = [] for k,v in nd.iteritems(): if hasattr(v,'strip'): stripped = v.strip() # ghetto and maybe unnecessary if stripped == '' or stripped == u'': bad_keys.append(k) for bad_key in bad_keys: del nd[bad_key] return nd
7f06794cafff4754430a45ee39b38735263fc357
25,886
import hashlib def sha256(filepath,blocksize=2**20): """ Return the sha256 hash of a file. `blocksize` adjusts how much of the file is read into memory at a time. This is useful for large files. 2**20: 1 mb 2**12: 4 kb """ hasher = hashlib.sha256() with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest()
086d6284953709a87b1a14c5c5b8c56b28b1d9d5
25,887
def sum_risk(method): """ Sum risk of all nodes in the method graph. """ risk = 0 for node_name in method.graph.nodes_iter(): node = method.graph.node[node_name] if "entry_point" in node: continue if "risk" in node: risk += node["risk"] return risk
5ae3bc3138a7f4b9d905c5f2122c7a1ecd9bbc09
25,889
def split_labels(data, label_idx=-1): """ Split labels from numerical data :param data: array of inputs data :type data: nd.array :param label_idx: index where label is located in the array. It can be only at start of at the end of the array :type label_idx: int :return: data without labels, labels :rtype: nd.array, nd.array """ # return (Data, Labels) if label_idx == -1 or label_idx == data.shape[-1]: return data[..., :-1], data[..., -1] elif label_idx == 0: return data[..., 1:], data[..., 0] else: raise RuntimeError('Labels must be on axis 0 or 1')
d3b59dca790255ae14269836ace58df151f84684
25,890
import ast def init_quantizer(name: str) -> ast.Assign: """ Generate quantization node initialization to add to the end of __init__() :param name: generated name of the node :return: quantization init ast node """ quant_linear = ast.Attribute(value=ast.Name(id="quant_nn", ctx=ast.Load()), attr="QuantLinear", ctx=ast.Load()) default_quant_desc_input = ast.Attribute(value=quant_linear, attr="default_quant_desc_input", ctx=ast.Load()) tensor_quant = ast.Name(id="TensorQuantizer", ctx=ast.Load()) quant_value = ast.Attribute(value=ast.Name(id="self", ctx=ast.Load()), attr=name, ctx=ast.Store()) return ast.Assign( targets=[quant_value], value=ast.Call(func=tensor_quant, args=[default_quant_desc_input], keywords=[]), )
b6159998556dba27bbb46049d7aeb8cbd390d2c7
25,891
def str2link(s, directory, title=''): """ Used by the --html options, this takes a string and makes it into an html <a href...> link without a closing </a>. """ if directory == '': return '' else: if title: return '<a href="%s/%s" title="%s">' % (directory, s, title) else: return '<a href="%s/%s">' % (directory, s)
512af2dd4308ac65b24f84e24aec95cb8144f8a1
25,893
import re def extract_only_numbers(text): """ Little function to extract only numbers from a string. Usuful in OCR if we want to detect only digits. Also, in converts 'o's and 'O's to 0 since it appears to be hard for tesseract even if I fucking tell it to parse only digits. GG google. :param text: A string :type text: str :return: string with only the digits :rtype: str """ text = text.replace('O', '0') text = text.replace('o', '0') text = re.findall(r'\d+', text) return ''.join(text)
a51520376c492a0629c564e8b8d54a19c1d5041e
25,894
import fnmatch def does_pattern_exists(patterns_list, value): """ Checks a list of patterns against a value. :param: patterns_list : A list of regular glob expression strings :type: str :returns: Returns True if any of the patterns match the value, False otherwise. :type: boolean """ for pattern in patterns_list: if fnmatch.fnmatch(str(value), str(pattern)): return True return False
cb1d6c8ea079aa25f980b42c2755db9253f68284
25,896
def top_n_countries(world_confirmed, feature='Confirmed', n=10): """Return list of top n countries with highest feature (Confirmed, Deaths, Recovered, Daily ...)""" top_n = (world_confirmed .groupby('Country/Region') .agg('max') .sort_values(feature, ascending=False) .head(n) .index .values) return list(top_n)
dd50882f8378e2405c8d6b2eadbe878d11b43351
25,900
def selectors_escape(s): """ Escapes all selectors used in html, css, js & jquery :param s: string The string to escape :return: string The escaped string """ selectors_reserved = '\!"#$%&\'()*+,./:;<=>?@[]^``{|}~' for char in selectors_reserved: s = s.replace(char, f'\\\\{char}') return s
fd95d9ebd5de2461bcf836f4e0792b17433451ce
25,901
def d(series, n=1): """Difference over n periods""" return series-series.shift(n)
b585f13851a9cff9655cad29564b909ecffbd238
25,903
def isnumber(num): """ Checks whether argument is number""" try: float(num) return True except ValueError: return False
4ea3b253ff1ffdd8f5adf532ffa194525c8dfaf0
25,911
def update(day, day_to_implement, new_social_distance, new_stay_at_home): """ updates current policies, panic, quarantine policiy and stay at home order Parameters ---------- day : int current day of the simulation. day_to_implement : int day from which measures are implemented. new_social_distance : int radius of square in which agent will look for other less populated areas. new_stay_at_home : int determines if stay at home order will be activated. Returns ------- updated panic index, quarantine policy and stay at home order """ panic = 0 quarantine_policy = 0 stay_home = 0 if (day >= day_to_implement-1): panic = new_social_distance quarantine_policy = 0 stay_home = new_stay_at_home return panic, quarantine_policy, stay_home
fd0e3232f8ef7b7e7c4ed6fb0716ba851635b2e4
25,912
def get_options_at_path(user_selected_path: str, file_list: list): """ Returns all the alternative items (folders and files) on the provided path """ # Get the number of folders at the current level n_folders = len(user_selected_path.split("/")) # Get all the files at level subfolder_options = [] for filepath in file_list: subfolder = "/".join(filepath.split("/")[:(n_folders+1)]) if filepath.startswith(user_selected_path) and subfolder not in subfolder_options: subfolder_options.append(subfolder) options = [_.split("/")[-1] for _ in subfolder_options] return options
31fc39005fdd3b9151cdcea75738c4ed06b30504
25,914
def format_title(host: str) -> str: """Format the title for config entries.""" return "Controller ({})".format(host)
99ca5c97007a8f0373184d8d9c5c55bb5d7d64b9
25,916
def parse_bool(data): """Parse a string value to bool""" if data.lower() in ('yes', 'true',): return True elif data.lower() in ('no', 'false',): return False else: err = f'"{data}" could not be interpreted as a boolean' raise TypeError(err)
60f35d69178fa6322f3ff29b64b96deb7bd43f94
25,919
def read_last_seen(FILE_NAME: str) -> int: """ gets the id of last seen tweet Args: FILE_NAME: static file name which stores the last seen id Returns: last_seen_id: id of the tweet """ file_read = open(FILE_NAME, 'r') readed = file_read.read() if readed != "": last_seen_id = int(readed.strip()) file_read.close() return last_seen_id else: return 0 print("Last Seen ID is readed.")
d7d919deabc133d8500ab0a6f6ea786e9a291edd
25,921
def reverse_chain(joints): """ Reverses the hierarchy of the joint chain. :param joints: List of joints in the chain to reverse :return: the same list of joints in reverse order """ # -- Store the base parent so we can reparent the chain # -- back under it base_parent = joints[0].getParent() # -- Start by clearing all the hierarchy of the chain for joint in joints: joint.setParent(None) # -- Now build up the hierarchy in the reverse order for idx in range(len(joints)): try: joints[idx].setParent(joints[idx + 1]) except IndexError: pass # -- Finally we need to set the base parent once # -- again joints[-1].setParent(base_parent) joints.reverse() return joints
8961c0031c7fd230935ffd212eca63f9f2ff6112
25,923
def _node_like(test_dict: dict): """ Evaluates whether a dict can be converted to a node safely. test_dict : dict | Dict to check """ if not isinstance(test_dict, dict): return False keys = list(test_dict.keys()) try: keys.remove("ParameterName") keys.remove("ParameterValue") keys.remove("ParameterInfo") except ValueError: return False if keys: return False if not isinstance(test_dict["ParameterName"], str): return False if not isinstance(test_dict["ParameterInfo"], str): return False return True
c03d2f6d5824068646ef011b8b3d9dd2fca58767
25,924
from typing import Set from typing import Tuple def get_line_between_points(x_0: int, y_0: int, x_1: int, y_1: int) -> Set[Tuple[int, int]]: """ Gets a line between two coordinate pairs represented by a list of tuple[x, y]. The line is found using Bresenham's line algorithm. See: https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm :param x_0: The x-coordinate of the first point. :param y_0: The y-coordinate of the first point. :param x_1: The x-coordinate of the second point. :param y_1: The y-coordinate of the second point. :return: A list of coordinates that make up the line between the two points. """ coordinates = set() dx = x_1 - x_0 dy = y_1 - y_0 x_step = 1 if dx >= 0 else -1 y_step = 1 if dy >= 0 else -1 dx = abs(dx) dy = -abs(dy) x = x_0 y = y_0 error = dx + dy while True: coordinates.add((x, y)) if x == x_1 and y == y_1: break error2 = error * 2 if error2 >= dy: error += dy x += x_step if error2 <= dx: error += dx y += y_step return coordinates
98324b303268a6e8b48aa7e7dba2e1fa85f7b463
25,931
import json def parse_json_data(filename) -> dict: """Returns the contents of a JSON file as a dict""" with open(filename, "r") as f: return json.loads("".join(f.readlines()))
d061043cf29d1ed8d790a51d135c251e2226de77
25,940
def get_marble(value=0, prev=None, next=None): """Get new marble, with value, prev and next.""" return {'value': value, 'prev': prev, 'next': next}
2cf13f5a46111b56c154a1bba0eec33bfb3ad8bc
25,944
from typing import List def add_to_leftmost_int(stack: List, x: int) -> List: """ Add x to leftmost int in l if no int in l, do nothing return modified l """ int_locations = [isinstance(i, int) for i in stack] if not any(int_locations): return stack index = int_locations.index(True) stack[index] += x return stack
c110df6643b98ddf7a3c18933fc1911bc247a138
25,965
def squareT(A): """ Returns (A Aᵀ) """ return A.dot(A.T)
ea214c8cccfff2146b66013fc00f06d6cbfc0337
25,966
def GetVectorValue(item): """Given an smtk.attribute.Item, return a list containing its values.""" N = item.numberOfValues() return [item.value(i) for i in range(N)]
573468b6416126553774a97de70b99c069390754
25,971
import platform import inspect def _get_mro(cls): """ Returns the bases classes for cls sorted by the MRO. Works around an issue on Jython where inspect.getmro will not return all base classes if multiple classes share the same name. Instead, this function will return a tuple containing the class itself, and the contents of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. """ if platform.python_implementation() == "Jython": return (cls,) + cls.__bases__ return inspect.getmro(cls)
d6e402e774078788020553c41a2a693591dac88a
25,972
def format_metrics(d: dict) -> dict: """ Format the floats in a dict with nested dicts, for display. :param d: dict containing floats to format :return: new dict matching the original, except with formatted floats """ new = {} for key in d: if isinstance(d[key], dict): new[key] = format_metrics(d[key]) elif isinstance(d[key], float): new[key] = float("{:.8f}".format(d[key])) else: new[key] = d[key] return new
1be92878ce97db21b7775345a9a830c42265a5c8
25,973
def titlecomment(line): """Condition for a line to be a title comment""" return line.startswith('//') and len(line.lstrip('//').strip()) > 0
a1a1f44e01399c4a3511670146d1e2587d9ead26
25,974
def session(context_name="session", request=None, **kwargs): """Returns the session associated with the current request""" return request and request.context.get(context_name, None)
e379e2fbf313557aa8c5087c8c0d75a5738243fe
25,975
def extract_tags(cgx_dict): """ This function looks at a CloudGenix config object, and gets tags. :param cgx_dict: CloudGenix config dict, expects "tags" keys supported in root. :return: list of tags present. """ # tags exist, return them. tags = cgx_dict.get("tags", []) if tags is None: tags = [] # return unique tags. return list(set(tags))
b2b78f46c65d4ca7c9c3ae0ed54a9d2a4da80724
25,980
def split_outfile(line): """ Split the output file from a command. """ redirect = line.find('>') if redirect == -1: return line, None else: return line[:redirect].rstrip(), line[redirect+1:].lstrip()
0ba0f76132256d87c61c6e98d9e5e6d310b04d6c
25,987
def calculate_turnaroundtime( burst_time: list[int], no_of_processes: int, waiting_time: list[int] ) -> list[int]: """ Calculate the turnaround time of each process. Return: The turnaround time for each process. >>> calculate_turnaroundtime([0,1,2], 3, [0, 10, 15]) [0, 11, 17] >>> calculate_turnaroundtime([1,2,2,4], 4, [1, 8, 5, 4]) [2, 10, 7, 8] >>> calculate_turnaroundtime([0,0,0], 3, [12, 0, 2]) [12, 0, 2] """ turn_around_time = [0] * no_of_processes for i in range(no_of_processes): turn_around_time[i] = burst_time[i] + waiting_time[i] return turn_around_time
cac09f266599293abfd2f90d22523444556c3204
25,992
def append_gw_teams(df, df_match): """ Count the number of teams playing in a single gameweek :param df: Input DataFrame (must contain 'player_id', 'gw' columns) :param df: Match DataFrame :returns: Input DataFrame with 'gw_teams_ft' column appended """ df = df.copy() df_teams = (df_match.groupby('gw')['team_id'].nunique() .reset_index() .rename(columns={'team_id':'gw_teams_ft'})) return df.merge(df_teams, on='gw').sort_values(['player_id', 'gw'])
1ff1798d51268d1aa111ebf6763725abd674bc44
25,998
def convert_weight_pounds(weight) -> int: """ Converts Kilograms into Pounds """ pounds = round((weight * 2.20462), 5) return pounds
edc50cbc4bc8f6a40b354376b3d42fedc8097698
26,000
from typing import Any from typing import TypeGuard from typing import Callable from typing import Coroutine import asyncio def acallable(arg: Any) -> TypeGuard[Callable[..., Coroutine[Any, Any, Any]]]: """Type guard for coroutine (async) functions""" return callable(arg) and asyncio.iscoroutinefunction(arg)
c81ed04cc07f20148356be60eddb89fad9bf061e
26,001
def fraction_edge(r): """Calculate fraction of coins that landed on edge""" total = r['Edge'] + r['Tails'] + r['Heads'] fraction_edge = r['Edge'] / total return fraction_edge
0153fc983bc9c2ae3f3d7b3cd1940c11671c71ca
26,004
def convert_to_minutes(num_hours): """ (int) -> int Return the number of minutes there are in num_hours hours. >>> convert_to_minutes(2) 120 """ result = num_hours * 60 return result
a959182939cd3a2e6bd4076a706c4993086daa4a
26,006
def _is_cast(**kwargs) -> bool: """ Does this spec require casting """ return 'cast' in kwargs
97a12699408a23a9f2021532ae36071c2254cc99
26,007
from datetime import datetime def generate_upload_file_path(form_fields): """ Use validated form fields to create the key for S3 """ now = datetime.now().strftime("%Y%m%d-%H%M%S") file_path_to_upload = "{}/{}_{}.{}".format( form_fields["file_location"], now, form_fields["file_name"], form_fields["file_ext"], ) return file_path_to_upload
b981bd059130c953b93fca61fa43b2608f84b07c
26,010
def make_doublebang_pulse_fun(parameters, tf): """Return double-bang pulse function. y0, t1, y1 = parameters For 0 <= t <= t1, constant height y0. For t >= t1, constant height y1. """ y0, t1, y1 = parameters def fun(t, *args): if 0 <= t <= t1: return y0 elif t1 < t <= tf: return y1 else: return 200. return fun
619cdef2117237f683b8d8eea996867cf82c473f
26,011
def df2node_attr(df): """Convert dataframe to dict keyed by index which can be used to set node attributes with networkx""" # NOTE: df.index has to be the nodes return df.T.to_dict()
718506480f5da95c9c1ab90798d06d9696cacd5b
26,020
def sigmoid_across_unit_interval(p, k=1.2): """ Sigmoid transformation for a unit interval. Returns: a value [0,1] associated with a proportion [0,1] Parameters: p -- proportion across the unit interval [0,1] k -- shape of the simoid transformation Special return values based on k: if k== 0 then always return 0 if 0 < k < 1 then always return 1 For all continuous values for k >= 1.0 1.0 step function with a sharp docs from 0 to 1 in the middle at p = 0.5 1.1 very steep transition in the middle at p = 0.5 1.2 transition looks much like a default logistic transition 1.3 transition flattens, becoming more linear as k increases ... 2.0 transition is almost linear by k = 2.0 Source: Function inspired by suggestions made here: https://dinodini.wordpress.com/2010/04/05/normalized-tunable-sigmoid-functions/ """ assert p >= 0, 'Custom sigmoid function has a domain of [0,1]' assert p <= 1, 'Custom sigmoid function has a domain of [0,1]' assert k >= 0, 'Shaping parameter must always be > = 0' k = float(k) if k < 0.0000000001 : return 0 # special case if k < 1.0 : return 1 # special case p = (p * 2) - 1 if not p: return 0.5 # undefined at inflection point if p < 0: return 0.5 + ((-k * p) / (-k + p + 1)) * .5 else: return 0.5 + ((-k * p) / (-k - p + 1)) * .5
e8f1fb1472b3341ee32833bf7c91ebcdaf27fa11
26,027
from typing import Any def delete_none_keys(dict_: dict[Any, Any]) -> dict[Any, Any]: """Remove any keys from `dict_` that are None.""" new_dict = {} for key, value in dict_.items(): if value is not None: new_dict[key] = value return new_dict
6a3ab5457203457044207a985c1588ef0fddcccf
26,030
def is_not_set(check, bits): """Return True if all bits are not set.""" return check & bits == 0
7b60d6ed9d12402240b2a7f7048ab9fd11a4b777
26,036
import random def generate_random_dict(keys_count=10, depth=1): """ Generate a dictionary with fixed random values. """ result = {} keys = list(range(0, keys_count)) random.shuffle(keys) current = {} result = current for index in range(0, depth): current["depth_%s" % (index)] = {} current = current["depth_%s" % (index)] for key in keys: current["key_%s" % (key)] = "value_%s" % (key) return result
f85fef810ee32705a0c1687ed1740c3f59bc04c5
26,038
def get_local_node_mapping(tree, last_tree, spr): """ Determine the mapping between nodes in local trees across ARG. A maps across local trees until it is broken (parent of recomb node). This method assumes tree and last_tree share the same node naming and do not contain intermediary nodes (i.e. single lineages). """ if last_tree is None: # no mapping if last_tree is None return None else: (rname, rtime), (cname, ctime) = spr # assert ARG is SMC-style (no bubbles) assert rname != cname # recomb_parent is broken and does not map to anyone recomb_parent = last_tree[rname].parents[0] mapping = dict((node.name, node.name) for node in last_tree) mapping[recomb_parent.name] = None return mapping
b30a95bb0c23fc00005de9474da19e781bc0485e
26,043
def int2ap(num): """Convert integer to A-P string representation.""" val = '' ap = 'ABCDEFGHIJKLMNOP' num = int(abs(num)) while num: num, mod = divmod(num, 16) val += ap[mod:mod + 1] return val
6f9b0f84485461c5b73525c48838e157bf6136c3
26,044
def heatCapacity_f1(T, hCP): """ heatCapacity_form1(T, hCP) slop vapor & solid phases: T in K, heat capaity in J/mol/K; heat capacity correlation heat capacity = A + B*T + C/T^2 Parameters T, temperature hCP, A=hCP[0], B=hCP[1], C=hCP[2] A, B, and C are regression coefficients Returns heat capacity at T """ return hCP[0] + hCP[1]*T + hCP[2]/T**2
5f90c998bb913bd617b959396bc7e5d67a0cd24f
26,045
import torch def generate_spatial_noise(size, device, *args, **kwargs): """ Generates a noise tensor. Currently uses torch.randn. """ # noise = generate_noise([size[0], *size[2:]], *args, **kwargs) # return noise.expand(size) return torch.randn(size, device=device)
5c9116fdc132d59e4a0ccc6c5674042f70533efe
26,046
def get_file_contents(filename: str) -> str: """ Receive a filename and return its contents """ with open(filename, "rt", encoding="utf-8") as infile: text = infile.read().rstrip() return text
b16f14a65120ba79799ca97186fd119f4bb1b040
26,050
def my_sqrt(x: int) -> int: """Implement int sqrt(int x) https://leetcode.com/problems/sqrtx/ Compute and return the square root of x, where x is guaranteed to be a non-negative integer. Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned. """ if x == 0: return 0 if x < 4: return 1 left = 2 right = x while left <= right: mid = left + (right - left) // 2 if mid * mid == x: return mid elif mid ** 2 < x: left = mid + 1 else: right = mid - 1 return left - 1
c7193b646223b8b89bdd20fd1beaf8c8e0acc003
26,057
import logging def extract_clsy(the_sidd): """ Extract the ownerProducer string from a SIDD as appropriate for NITF Security tags CLSY attribute. Parameters ---------- the_sidd : SIDDType|SIDDType1 Returns ------- str """ owner = the_sidd.ProductCreation.Classification.ownerProducer.upper() if owner is None: return '' elif owner in ('USA', 'CAN', 'AUS', 'NZL'): return owner[:2] elif owner == 'GBR': return 'UK' elif owner == 'NATO': return 'XN' else: logging.warning('Got owner {}, and the CLSY will be truncated ' 'to two characters.'.format(owner)) return owner[:2]
8f5a3f0cd59dfcc9822341fc0effd0a515f23681
26,059
def scale_serpent_to_origen(serpent_volume): """ Function for performing the volume conversion to make Serpent results comparable to ORIGEN ones. ORIGEN scales the results to 1 ton of U, so the serpent volume must be scaled to match. """ fuel_density = 10.41 #10.41 g/cm^3 as default fuel_uranium_density = fuel_density * 0.88 #default 88% of the mass is U in UO2 standard_volume = 1000000/fuel_uranium_density #ORIGEN results are often normalized to 1 ton of U, Serpent material results are conventration per cm^3 #serpent_volume = 0.508958 #My example data has a pin radius of 0.41 which means a pin cell volume of pi * 0.41^2 * 1 = 0.508 cm^3. scale = standard_volume / serpent_volume return scale
efd5b9847386e3f512a8a096ba8d12682e028334
26,062
import glob def generate_files(original_pattern: str, mask_pattern: str): """generator for original and mask img path Args: original_pattern (str): original path pattern mask_pattern (str): mask path pattern """ def real_generator(): original_img_paths = sorted(glob.glob(original_pattern)) mask_img_paths = sorted(glob.glob(mask_pattern)) for o, m in zip(original_img_paths, mask_img_paths): yield o, m return real_generator
bc95ea5cd3ec40c43b875e56c782e1324133826d
26,063
def eval(predict, groundtruth): """计算预测结果的准确率、召回率、F1 Args: predict (list): 预测结果 groundtruth (list): 真实结果 Returns: tuple(precision, recall, f1): 精确率, 召回率, f1 """ assert len(predict) == len(groundtruth) tp, fp, tn, fn = 0, 0, 0, 0 for i in range(len(predict)): right = len([j for j in predict[i] if j in groundtruth[i]]) tp += right fn += len(groundtruth[i]) - right fp += len(predict[i]) - right precision = tp / (tp + fp) recall = tp / (tp + fn) f1 = 2 * precision * recall / (precision + recall) return precision, recall, f1
e75a35156f9bb72b4ec2fd5099bc6b399c923533
26,071
def expect( obj, attr, value, nullable=False, types=None, not_types=None, convert_to=None ): """ Check, whether the value satisfies expectations :param obj: an object, which will set the value to its attribute. It is used to make error messages more specific. :param str attr: name of an attribute of the object. It is used to make error messages more specific. :param value: checked value itself. :param bool nullable: accept ``None`` as a valid value. Default: ``False`` — does not accept ``None``. :param types: define acceptable types of the value. Default: ``None`` — accept any type. :type types: None, type or tuple :param not_types: define implicitly unacceptable types of the value. Default: ``None`` — accept any type. :type types: None, type or tuple :param type convert_to: convert the value to specified type. Default: ``None`` — does not convert the value. :raises TypeError: * if ``types is not None`` and ``not isinstance(value, types)``; * if ``not_types is not None`` and ``isinstance(value, not_types)``. """ if nullable and value is None: return value if types is not None and not isinstance(value, types): raise TypeError( "%s.%s.%s should be of type %r" % (obj.__class__.__module__, obj.__class__.__name__, attr, types) ) if not_types is not None and isinstance(value, not_types): raise TypeError( "%s.%s.%s should not be of type %r" % (obj.__class__.__module__, obj.__class__.__name__, attr, not_types) ) if convert_to is not None and not isinstance(value, convert_to): value = convert_to(value) return value
7d1559dac92ebe8b5ba646a98a6b1a5021751c4a
26,076
def get_channels(color: str) -> tuple[int, int, int]: """Convert a 24-bit hex color string into an RGB color. :param color: A 24-bit hexadecimal color string, like is commonly used in HTML and CSS. :return: A :class:tuple object. :rtype: tuple """ r = int(color[:2], 16) g = int(color[2:4], 16) b = int(color[4:], 16) return (r, g, b)
6f3bc0e366957f3426d7c909ae9a72080bd5def0
26,077
def get_order_by_parts(order_by): """ Extract the order by clause information. In future this could be used to update multiple column ordering. :param order_by: the order by clause, for example: ' ORDER BY column_name ASC/DESC' :return: order by field name, order by order. For example: column_name, ASC/DESC """ order_by_parts = order_by.replace(' ORDER BY ', '').strip().split() return order_by_parts[0], order_by_parts[1]
db400dd1aa4f47ffda58b8ccc6e101b7f395537a
26,081
def db_get_idents(conn): """ Returns the idents table as dict of dicts (the ids in the first dimension are always smaller than those of the second. Args: conn: Sqlite3 connection object. Returns: Idents table as dictionary. """ cur = conn.cursor() result = {} for id_i, id_j, ident in cur.execute('SELECT id_i, id_j, ident from idents'): result.setdefault(id_i, {})[id_j] = ident return result
11d5888ca979425417b4e70808477925c436cf0e
26,082
def toslice(text=None, length=None): """Parses a string into a slice. Input strings can be eg. '5:10', ':10', '1:'. Negative limits are allowed only if the data length is given. In such case, input strings can be e.g. '1:-10'. Last, an integer can be given alone such as '1' to select only the 1st element. If no text not length is given, default slice is slice(None). Arguments --------- text: optional, None, str The input text to parse. Default is None. length: None, int The data length. This is not mendatory if no slice limit is negative. Dafault is None. Returns ------- slice The parsed slice object. """ # For default input. if text is None: return slice(None) # Getting slice limits and storing it into lim. lim = text.split(':') if len(lim) == 2: for cnt in range(2): lim[cnt] = None if lim[cnt] == '' else eval(lim[cnt]) # Let us chack that length is given in case limits are negative if ((lim[0] is not None and lim[0] < 0) or (lim[1] is not None and lim[1] < 0)) and length is None: raise ValueError( 'Please give the length argument to handle negative limits.') # The non-None limits are transformed if length is given to # avoid negative or 'greater than length' limits. for cnt in range(2): if lim[cnt] is not None and lim[cnt] < 0 and length is not None: lim[cnt] = lim[cnt] % length # Last check before output: if limits are a:b, does b is really # greater than a ? if None not in lim and lim[0] >= lim[1]: raise ValueError( 'The slice lower bound is greater or equal to the ' 'slice upper bound.') return slice(*lim) elif len(lim) == 1: lim = eval(lim[0]) if length is not None: lim = lim % length return slice(lim, lim+1) else: raise ValueError(f'Invalid input slice {text}.') return 0
b81345e678e681931c6a3dae287566cd0578bebb
26,084
def address_to_vec(addr): """ Convert an address into a list of numbers. """ # Strip of first and last char, then create list of nums. # -2 to also ignore trailing dash. numbers = addr[1:-2].split('-') v = [] for e in numbers: v.append(int(e)) return v
0ee27bb945ba1c736500cef1772a12890a9cee03
26,085
def z_to_y(mdrrt, z): """convert z into y according to target mdrrt. This method achieve the conversion of y into z. This is (7) in the paper. Args: mdrrt (MDRRT): target MDRRT z (pyqubo.array.Array): decision variable z Returns: list<list<pyqubo.Binary>>: decision variable y """ n = mdrrt.n S = mdrrt.num_slots y = [] for t in range(2 * n): row = [] for s in range(S): # index (t, s) is on row k and line j. (3) in the paper. k, j = mdrrt.unpack_ts_dict[t, s] # the conversion of z to y. (7) in the paper if j in [0, 3]: row.append(z[k]) else: row.append(1 - z[k]) y.append(row) return y
8eb456c7972d5005b74cd07a67fd23032082cc2a
26,090
def normalize(name): """ Normalizes text from a Wikipedia title/segment by capitalizing the first letter, replacing underscores with spaces, and collapsing all spaces to one space. :Parameters: name : string Namespace or title portion of a Wikipedia page name. :Return: string Normalized text """ return name.capitalize().replace("_", " ").strip()
daa0349fbaa21c5edeb5ea8ca65f6b6c7fa8da91
26,093
import re def contentfilter(fsname, pattern): """ Filter files which contain the given expression :arg fsname: Filename to scan for lines matching a pattern :arg pattern: Pattern to look for inside of line :rtype: bool :returns: True if one of the lines in fsname matches the pattern. Otherwise False """ if pattern is None: return True prog = re.compile(pattern) try: with open(fsname) as f: for line in f: if prog.match(line): return True except Exception: pass return False
ea30d6af9df3adc0986d9eaed9913f1160bd24d3
26,094
def con_per_dec(e): """convert p(a percentage) into a decimal""" return e / 100
62febf24b2ca8b38bc2b1f33d12d2b083a32fa5a
26,097
def is_defined_string(value): # pylint: disable=unused-variable """ Returns true if the specified value is a non-zero length String """ if isinstance(value, str) and value: return True return False
e5796f95dc46df17e874db04e0b79f94e35fb539
26,099
import csv def read_csv(file): """ Read a CSV file and return it as a list """ with open(file, 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',') return list(reader)
31fc6f489cbebdb5e83a05bbb8ea3e09c41df6f1
26,104
def isurl(value): """ Determine if the parsed string is a url """ url = False if isinstance(value, str): if value[:4] == 'http' or value[:3] == 'www': url = True return url
58ef164aca40c3b6dd379ea20b798759dfd5e0e0
26,110
def ueGas(ub, umf, emf, delta, fw): """ This function calculates the velocity of the gas in the emulsion phase with K/L eqs. 6.39-6.40. Parameters ---------- ub : float Bubble rise velocity [m/s] umf : float Minimum fluidization velocity [m/s] emf : float Void fraction at minimum fluidization [-] delta : float Fraction of bed volume in bubbles [-] fw : float Ratio of wake volume to bubble volume [-] Returns ------- ue : float Emulsion gas velocity [m/s] """ ue = umf / emf - fw * delta * ub / (1 - delta - fw * delta) return ue
c3756c353a7f317cf249db74f8e8f32503b00ea6
26,113
def track_objs(frame,frame_id,objects): """Perfrom tracking on every object in the frame Parameters ---------- frame : numpy array The input image to track the objects within frame_id : int The order of the frame in the video objects : list list of the current traffic object under tracking to check where they moved in the frame. Returns ------- list a list of the traffic object classes with updated postions according to the tracking result. """ # return new object for obj in objects: obj.track(frame,frame_id) return objects
1b65e8b50efbcd6d31b81208cf7da0f84b588bba
26,115
def delete_invalid_values(dct): """ Deletes entries that are dictionaries or sets """ for k, v in list(dct.items()): if isinstance(v, dict) or isinstance(v, set): del dct[k] return dct
eeadaeea809e7e38fcac7837981c3710b26817ae
26,116
def get_unique(list_): """Returnerar en lista där varje värde bara förekommer en gång. """ return list(set(list_))
b1380522fa407157d03b4d5b87daed73eed70b8d
26,117
def make_function_listener(events): """ Return a simple non-method extension point listener. The listener appends events to the ``events`` list. """ def listener(registry, event): events.append(event) return listener
b385d25eb98ee50f90d428b59cacb356144b2bb1
26,126
def generate_allocation_piechart(risk_prof): """ Queries for the ticker names and weights based on the user's risk profile and the profile's allocation. Returns a dictionary of dictionaries with "Stocks" and "Bonds" as the key with a value of ticker name as key and weight as value. """ chart_ticker_data = {} stock_data = {} bond_data = {} # Risk_prof.allocation is list of # <Risk Profile ID=2 Ticker ID=1 Ticker Weight=25>. for prof_ticker in risk_prof.allocation: # Ticker_description is using the ticker relationship # to get the object <Ticker ID=6 Symbol=VWO # Name=Vanguard FTSE Emerging Markets ETF (VWO)>. ticker_description = prof_ticker.ticker ticker_name = ticker_description.name ticker_weight = prof_ticker.ticker_weight_percent # Creates a stocks and bonds dictionary within the # chart_ticker_data dictionary. if ticker_description.category == "Stocks": stock_data[ticker_name] = ticker_weight chart_ticker_data["Stocks"] = stock_data else: bond_data[ticker_name] = ticker_weight chart_ticker_data["Bonds"] = bond_data return chart_ticker_data
bbbb5b9dff82683fee508e5ac46fbd7e8971f426
26,131
import requests def get_target_chemblid(target_upid): """Get ChEMBL ID from UniProt upid Parameters ---------- target_upid : str Returns ------- target_chembl_id : str """ url = 'https://www.ebi.ac.uk/chembl/api/data/target.json' params = {'target_components__accession': target_upid} r = requests.get(url, params=params) r.raise_for_status() js = r.json() target_chemblid = js['targets'][0]['target_chembl_id'] return target_chemblid
5f1283ea1814e9c4be5c57af98e10c6cf238414e
26,132
def lcore_core_ids(lcore_mask_host): """ Convert CPU ID mask from argument 'lcore_mask' to a list of CPU IDs """ lcore_cores = [] binary_mask = bin(int(lcore_mask_host, 16))[2:] for i, val in enumerate(binary_mask[::-1]): if val == "1": lcore_cores.append(i) return lcore_cores
fe16d49b04a4a08fe6c9a0aacca91f5191738aca
26,135
def nd_normalize(x, mu, sigma): """Re-center and scale `x` given the mean (mu) and standard deviation (sigma) of the distribution of `x`. :param x: :param mu: :param sigma: """ return (x - mu) / sigma
8e664b1bbd976a353bb10cddd22d040bb7c03672
26,136
def get_item_by_id(test_item, item_id): """Return the requested test item by its identifier. Goes over the test item's sub tests recursively and returns the one that matches the requested identifier. The search algorithm assumes that the identifiers assignment was the default one (use of default indexer), which identifies the tests in a DFS recursion. The algorithm continues the search in the sub-branch which root has the highest identifier that is still smaller or equal to the searched identifier. Args: test_item (object): test instance object. item_id (number): requested test identifier. Returns: TestCase / TestSuite. test item object. """ if test_item.identifier == item_id: return test_item if test_item.IS_COMPLEX: sub_test = max([sub_test for sub_test in test_item if sub_test.identifier <= item_id], key=lambda test: test.identifier) return get_item_by_id(sub_test, item_id)
6c27b735cae3ae2f70650a14e9a8483fef7bd05d
26,137
import base64 def _encode(data): """Safely encode data for consumption of the gateway.""" return base64.b64encode(data).decode("ascii")
1b0403a46f65ac90e536c5fd700dfa26fb54865e
26,139
def __make_threephase_node_name(node_name_prefix, phase): """ Returns a node name with "phase" sufix and trailing whitespaces (6 characters). """ new_name = node_name_prefix + phase whitespaces = 6 - len(new_name) new_name = new_name + (" " * whitespaces) return new_name
580c838dace828c4fe2d35d148ddf2da6aa57a0a
26,143
def _compare_trigrams(trig1: set, trig2: set) -> float: """ Checks how many trigrams from the first set are present in the second and returns that value divided by the length of the second set. """ count = 0 for i in trig1: if i in trig2: count += 1 return count / len(trig2)
f6887c1288abb22ca3caac444f20bbc4e51d4541
26,146
def get_epoch_from_header(sig_header: str)-> str: """Extracts epoch timestamp from the X-Telnyx-Signature header value""" sig_key_value = dict(param.split("=", 1) for param in sig_header.split(",")) epoch = sig_key_value["t"] return epoch
1829807121802d149547ad90c4427a8550e46d85
26,147
def test_agent(env, agent, num_rollouts=20): """ This function runs `num_rollouts` using the current agent's policy. :param env: environment to test the agent in (gym.Env) :param agent: Agent to predict actions (DQNAgent) :param num_rollouts: number of episodes to play (int) :return: average_reward: average reward from all the roll-outs """ total_reward = 0.0 for i in range(num_rollouts): state = env.reset() game_reward = 0.0 while True: action, _ = agent([state]) state, reward, done, _ = env.step(action) total_reward += reward game_reward += reward if done: # print("dqn-game ", i, "reward: ", game_reward) break return 1.0 * total_reward / num_rollouts
ba2f65d6220ac225df5189c1d10d55dbe9d608b6
26,148
import yaml def get_dependency_graph(path="configs/dependency-graph.yaml"): """Load dependency_graph file Parameters ---------- path : str, optional dependency-graph.yaml path, by default 'configs/dependency-graph.yaml' Returns ------- dict variables from dependency-graph.yaml """ config = yaml.load(open(path), Loader=yaml.Loader) return config
e1bac88e50e0d7bdf24e2a64d7ae8d2e9d2bd830
26,151
def stats_to_list(stats_dict): """ Parse the output of ``SESConnection.get_send_statistics()`` in to an ordered list of 15-minute summaries. """ result = stats_dict['GetSendStatisticsResponse']['GetSendStatisticsResult'] datapoints = [dp for dp in result['SendDataPoints']] datapoints.sort(key=lambda x: x['Timestamp']) return datapoints
9a0b1709eedec66edca722d60578a2ba2385cc9d
26,153
def indexToSymbol(index): """ returns the nucleotide symbol for an index in the alphabetic order """ if index == 0: return 'A' elif index == 1: return 'C' elif index == 2: return 'G' elif index == 3: return 'T' else: print('number should be 0 to 3, it is ' + str(index)) return 'X'
80f693372919abf3090d54c99d394b05afc5351e
26,156
def to_byte_string(value, count=2, signed=False, byteorder='little'): """Take bytes and return string of integers. Example: to_byte_string(123456, count=4) = '64 226 1 0' """ byte_value = value.to_bytes(count, byteorder=byteorder, signed=signed) return ' '.join([str(x) for x in byte_value])
d1b2cc12000958a3858f73271b300ebe15480a43
26,157
def search_error_for_adams(adams_results: list, adams_results_less: list, m: int) -> list: """ Function for calculating errors for Adams method :param adams_results: results from this method :param adams_results_less: results from this method with divided step :param m: value for the fault :return: list with errors """ errors = [] for index in range(len(adams_results)): error = (adams_results[index][1] - adams_results_less[index * 2][1]) / (2 ** m - 1) errors.append(error) return errors
d63cffa19c840ca8bf3fe3d7b236e999b214317c
26,158
def count_increases(depths: list): """ Count the number of times a value is greater than the previous value. :param depths: list of depths :return: number of times depth increased :rtype: int """ increases = 0 previous = None for depth in depths: depth = int(depth) if previous and depth > previous: increases += 1 previous = depth return increases
43dc82d403747aaa36f2e0bd794e1d3f840bc5da
26,159
def get_ha1_file_htdigest(filename): """Returns a get_ha1 function which obtains a HA1 password hash from a flat file with lines of the same format as that produced by the Apache htdigest utility. For example, for realm 'wonderland', username 'alice', and password '4x5istwelve', the htdigest line would be:: alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c If you want to use an Apache htdigest file as the credentials store, then use get_ha1_file_htdigest(my_htdigest_file) as the value for the get_ha1 argument to digest_auth(). It is recommended that the filename argument be an absolute path, to avoid problems. """ def get_ha1(realm, username): result = None f = open(filename, 'r') for line in f: u, r, ha1 = line.rstrip().split(':') if u == username and r == realm: result = ha1 break f.close() return result return get_ha1
bd79458b89556b570338f3365d0a4ee6582f8aa0
26,160
import six def ExtractSnapshotTtlDuration(args): """Extract the Duration string for the Snapshot ttl. Args: args: The command line arguments. Returns: A duration string for the snapshot ttl. """ return six.text_type(args.snapshot_ttl) + 's'
0816e2e24ccdeceeeb3154c55288dc6733c042ff
26,163
def shuffle(n_cards, actions): """ Return (a, b) such that for any card, shuffled_position = (a * initial_position + b) % n_cards """ a, b = 1, 0 for action in actions: if action.startswith("deal with increment"): n = int(action.split()[-1]) a *= n b *= n elif action == "deal into new stack": a *= -1 b = -b - 1 elif action.startswith("cut"): n = int(action.split()[-1]) b -= n else: raise ValueError("Unknown action:", action) # Simplify a and b a %= n_cards b %= n_cards return a, b
2e3e159ac44b32742015914360d0d6774fc4fddb
26,164