content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def split_str_avoiding_square_brackets(s: str) -> list: """ Splits a string by comma, but skips commas inside square brackets. :param s: string to split :return: list of strings split by comma """ res = list() skipping = 0 last_idx = 0 for i, c in enumerate(s): if c == '[': skipping += 1 elif c == ']': skipping -= 1 elif c == ',' and skipping == 0: res.append(s[last_idx:i]) last_idx = i + 1 res.append(s[last_idx:]) return res
29dad952d9b8151bb2bad0f5c7338436251792b7
16,686
def sizeToTeam(size): """Given a size in kilobytes, returns the 512kb.club team (green/orange/blue), or "N/A" if size is too big for 512kb.club""" if size<100: return "green" elif size<250: return "orange" elif size<=512: return "blue" else: return "N/A"
a61f6a28f8f00cd05271684f715345b7fac4ed54
16,689
def product_consumption_rate(total_items, total_orders): """Returns the average number of units per order. Args: total_items (int): Total number of items of a SKU sold during a period. total_orders (int): Total number of orders during a period. Returns: Average number of units per order. """ return (total_items / total_orders) * 100
e5645a4703d0d9335abc6832da24b8a6b53c0c17
16,693
import json def read_cjson(path): """ Read a json file with #-comment lines """ with open(path) as f: lines = [line for line in f if not line.strip().startswith('#')] data = json.loads('\n'.join(lines)) return data
f0585820e56c5fa8ccbf9894a70b780b4ce00018
16,694
def getSelectRedirect(params): """Returns the pick redirect for the specified entity. """ if params.get('args'): return '/%(url_name)s/pick?%(args)s' % params else: return '/%(url_name)s/pick' % params
1eae1150f180986b74ac2ec9bc31c3d3d1f566e1
16,698
def wrap(char, wrapper): """Wrap a sequence in a custom string.""" return wrapper.format(char=char)
a86bbfb5f1b4ea373eb0a23b365c63ceed106159
16,708
def x2bool(s): """Helper function to convert strings from the config to bool""" if isinstance(s, bool): return s elif isinstance(s, str): return s.lower() in ["1", "true"] raise ValueError()
2850c3ab0421619a087d88181f3b6e2c6ffa9e9a
16,718
def round_down(rounded, divider): """Round down an integer to a multiple of divider.""" return int(rounded) // divider * divider
cf9ea0a437d3246776bd80e03ed19f06827f68ce
16,719
import json import base64 import binascii def validate(arg): """ Validate input parameters for `left` and `right` :param arg: input from request :return: dictionary of {'data': base64encoded_stirng} if valid or False if not valid """ if not arg: return False if isinstance(arg, str) or isinstance(arg, bytes): try: arg = json.loads(arg) except Exception as e: print(e) return False if not arg.get("data"): return False try: base64.b64decode(arg["data"]) except binascii.Error: return False return arg
c10634020e402ffd7b39f657c405e6bcc7283031
16,721
import json def read_json_config(cfg): """Read a JSON configuration. First attempt to read as a JSON string. If that fails, assume that it is a JSON file and attempt to read contents from the file. Args: res: a config string or file path Returns: dict of config options """ try: cfg = json.loads(cfg) except ValueError: cfg_file = open(cfg, 'r') cfg = json.load(cfg_file) return cfg
2268297273dbfb468e0a8391981b4795702ba0b7
16,723
import string def idx_to_label(n: int) -> str: """Convert a number to a corresponding letter in the alphabet. In case the number is higher than the number of letters in the english alphabet, then a second character is appended. >>> For instance: >>>idx_to_label(0) >>> 'a' >>> idx_to_label(25) >>> 'z' >>> idx_to_label(26) >>> 'aa' This function was inspired after: https://stackoverflow.com/questions/2267362/how-to-convert-an-integer-to-a-string-in-any-base :param n: the input number :return: the corresponding string """ alphabet_size = 26 digits = [] n += 1 while n: digits.append(int(n % alphabet_size - 1)) n //= alphabet_size digits.reverse() return ''.join([string.ascii_lowercase[i] for i in digits])
67e4bba437016dddb6d90f286e1d65f2bfec8caf
16,728
def dict_contains(subdict, maindict): """ return True if the subdict is present with the sames values in dict. can be recursive. if maindict contains some key not in subdict it's ok. but if subdict has a key not in maindict or the value are not the same, it's a failure. >>> dict_contains(dict(a=1, b=2), dict(a=1, c=2, b=2)) True >>> dict_contains(dict(a=dict(aa=1)), dict(a=dict(aa=1, bb=2), b=2)) True >>> dict_contains(dict(a=dict(aa=1, bb=2)), dict(a=dict(aa=2))) False >>> dict_contains(dict(a=dict(aa=1)), dict(a=[])) False >>> dict_contains(dict(a=1), dict()) False >>> dict_contains(dict(a=[1, 2, 3]), dict(a=[1, 2, 3])) True >>> dict_contains(dict(a=[1, 3, 2]), dict(a=[1, 2, 3])) False >>> dict_contains(dict(a=[1, 3]), dict(a=[1, 2, 3])) False >>> dict_contains(dict(a=[1, 3, 2]), dict(a={1, 2, 3})) True :param subdict: the smaller dict that should be present in the big one :param maindict: the dict :return: True if subdict is included in maindict :rtype: bool """ try: for k, v in subdict.items(): mainv = maindict[k] if isinstance(mainv, dict) and isinstance(v, dict): if not dict_contains(v, mainv): return False elif isinstance(mainv, (set, frozenset)): return set(v) == mainv elif mainv != v: return False except KeyError: return False return True
3c02145c7572f4c2d815213527effbfd7df93496
16,729
from typing import Any def cast_numeric_greater_than_zero( value: Any, value_name: str, required_type: type ) -> None: """ Checks that `value` is greater than zero and casts it to `required_type`. Raises an exception `value` not greater than zero. Args: value: numeric value to check value_name: name to be included in the error message required_type: target type of the value Returns: value as required type """ if not isinstance(value, required_type): value = required_type(value) if value <= 0: raise ValueError(f"Value {value_name} must be greater than zero.") return value
27f3b26d824863f1a94d4efe8c902cb84bc26c59
16,730
from typing import Iterable def filter_duplicate_key( line: bytes, line_number: int, marked_line_numbers: Iterable[int], ) -> bytes: """Return '' if first occurrence of the key otherwise return `line`.""" if marked_line_numbers and line_number == sorted(marked_line_numbers)[0]: return b"" return line
d592e718832f6d0c4718989d1a0fb96783c1508c
16,732
import re def clean_text(text, *, replace='_'): """ Ensure input contains ONLY ASCII characters valid in filenames. Any other character will be replaced with 'replace'. Characters added in extras will be whitelisted in addiction to normal ASCII. Args: text: The text to clean. replace: The replacement character to use. extras: Additional characters to whitelist. Returns: The cleaned text. """ text = re.sub(r'[^a-zA-Z0-9]', replace, text) text = re.sub(r'{r}{r}+'.format(r=replace), replace, text) return text
92d305e78d4883cea2e0a1c3da218284ad783b72
16,741
from pathlib import Path def label_malformed(path: Path) -> Path: """ Renames the file at the given location to <original_filename>_MALFORMED. If such a file already exists, an incremented number is appended to the name until it can be created. The new file name is returned. Raises: `OSError` """ assert(path.is_file()) malformed_file_path = list(path.parts) malformed_file_path[-1] += "_MALFORMED_CONTENTS" malformed_file = Path(*malformed_file_path) # Avoid naming collision i = 1 while malformed_file.is_file(): malformed_file_path[-1] += str(i) malformed_file = Path(*malformed_file_path) i += 1 path.rename(malformed_file) return malformed_file
12ca5ec240803127dd8aed7d13502c71a12f08ae
16,749
def get_child_parents(edges): """Puts each non-parent node together with its parents Parameters ---------- edges : list A list of tuples corresponding to the Bayesian network structure as described in the input file Returns ------- child_parents A dictionary with non-parent nodes as keys and their parents as values """ child_parents = {} for e in edges: if e[1] in child_parents.keys(): child_parents[e[1]].append(e[0]) else: child_parents[e[1]] = [e[0]] return child_parents
4b01a264ee1e2498c37f1fa0695f9430c207f04d
16,750
import random def get_random_from_distribution(minimum_value, distribution, increment=1): """Returns an integer from minimum_value to len(distribution)*increment, where the probability of any specific integer is determined by the probability distribution. """ x = random.random() result = minimum_value - increment for limits in distribution: if x > limits[1]: result = result + increment else: break return result
0d3cde30c86ea8e230e3740123810d1e2d73d7ee
16,751
def get_divisable(row): """Get numbers from row where one divides another without rest.""" for index, num in enumerate(row[:-1]): for other_num in row[index + 1:]: if num % other_num == 0 or other_num % num == 0: return sorted([num, other_num], reverse=True)
e7e6cb9936cd54df7cd0594168e53b8821cfbae6
16,755
def dms(degrees): """ Calculate degrees, minutes, seconds representation from decimal degrees. Parameters ---------- degrees : float Returns ------- (int, int, float) """ degrees_int = int(abs(degrees)) # integer degrees degrees_frac = abs(degrees) - degrees_int # fractional degrees, used to compute minutes minutes_int = float(int(degrees_frac * 60)) # integer minutes minutes_frac = degrees_frac - minutes_int / 60 # fractional minutes, used to compute seconds seconds = minutes_frac * 3600 # decimal seconds # Handle sign. Degrees portion will contain the sign of the coordinate. # Minutes and seconds will always be positive. # sign function returns -1, 0, +1 for x < 0, x == 0, x > 0, respectively if degrees < 0: degrees_int *= -1 return degrees_int, minutes_int, seconds
ef062ddc4d313c0e8376096952d0011c01d27825
16,756
def parenthesise(s, parens = True): """ Put parentheses around a string if requested. """ if parens: return '(%s)' % s else: return s
37a1abbb4a511eff9b9c63a79fafcc60331bee67
16,762
def delta_sr(processor_sr, aeronet_sr): """ Convention in ACIX I paper :param processor_sr: surface reflectance of the processor :param aeronet_sr: surface reflectance of the reference (aeronet based) :return: """ return processor_sr - aeronet_sr
60b3a69fd986044126cf95b8d87425334e472abe
16,763
from typing import Tuple import math def uniform_divide(total: float, mpp: float) -> Tuple[int, float]: """Return the minimum number of partitions and the quantity per partition that uniformly divide a given quantity :param total: The total quantity to divide :param mpp: Maximum quantity per partition :returns: The minimum number of partitions and the quantity in each partition""" n = int(math.ceil(total / mpp)) p = total / n return n, p
2cccebd710975b34ab7f3538516d8f4c60b18c87
16,764
def matrix_wrapper(input_tuple): """ Parallel wrapper for matrix formation. This wrapper is used whenever a pmap/map-type function is used to make matrices for each cell in parallel. Parameters ---------- input_tuple : Tuple Index 0 is the chain (depletion_chain.DepletionChain), index 1 is the reaction rate array (reaction_rates.ReactionRates), index 2 is the cell_id. Returns ------- scipy.sparse.csr_matrix The matrix for this reaction rate. """ return input_tuple[0].form_matrix(input_tuple[1], input_tuple[2])
4a594e6fda9b4916f644a422d1b51969b86fb44e
16,771
def short_msg(msg, chars=75): """ Truncates the message to {chars} characters and adds three dots at the end """ return (str(msg)[:chars] + '..') if len(str(msg)) > chars else str(msg)
f807c4e2a032bb05ba5736e955af7c03653bcf80
16,773
import torch def accumarray(I, V, size=None, default_value=0): """ Returns a Tensor by accumulating elements of tensor V using the subscripts I The output tensor number of dimensions is/should be equal to the number of subscripts rows plus the values tensor number of dimensions minus one. Parameters ---------- I : LongTensor the (N,) subscripts tensor V : Tensor the (M,F,) values tensor size : tuple (optional) the size of the output tensor. If None it will be automatically inferred (default is None) default_value : float (optional) the default value of the output tensor (default is 0) Returns ------- Tensor the accumulated tensor """ if size is None: size = list(V.size()) size[0] = torch.max(I).item()+1 return default_value + torch.zeros(size, dtype=V.dtype, device=V.device).scatter_add_(0, I.view(-1, 1).expand_as(V), V)
5e5acb0490f305a4498825260d5852a2fd15ea90
16,776
def _AsInt(x): """Converts text to int. Args: x: input value. Returns: Integer value of x, or None. """ try: i = int(x) return i except (ValueError, TypeError): return None
393b88a0cc317b34fee1c0748e508ddac9ce5534
16,778
def coerce_entity_dict(v): """Coerce entity ID strings to a dictionary with key "entity".""" if isinstance(v, str): return {"entity": v} return v
6f8265d08bde871b9379fb693cfb385bff0783ce
16,779
from typing import List def cut_on_stop(text: str, stop: List[str]) -> str: """Cuts a text to the first stop sequences. :param text: Text to cut. :type text: str :param stop: List of stop sequences. :type stop: List[str] :return: Cut text. :rtype: str """ items = [text] for _stop in stop: _items = [] for item in items: _items.extend(item.split(_stop)) items = _items return items[0]
b25d4c4172b171ea126dfaa77203691c935001ac
16,789
def datetime_to_str(time): """convert python datetime object to a {hour}:{min}:{second}:{millisecond} string format """ return '{hour}:{min}:{second}:{millisecond}'.format( hour=time.hour, min=time.minute, second=time.second, millisecond=str(int(round(time.microsecond / 1000.0))), )
8d7c5a7b08c32718cb5e284b30ee9cd57b3c2e2e
16,790
def extract_header_spki_hash(cert): """ Extract the sha256 hash of the public key in the header, for cross-checking. """ line = [ll for ll in cert.splitlines() if ll.startswith('# SHA256 Fingerprint: ')][0] return line.replace('# SHA256 Fingerprint: ', '').replace(':', '').lower()
7cd9f38855be4877e14761ab87994df79d251e4e
16,792
def backend_listener_url(testconfig): """ Returns the url of the backend listener """ return f'{testconfig["threescale"]["backend_internal_api"]["route"]["spec"]["port"]["targetPort"]}' \ f'://{testconfig["threescale"]["backend_internal_api"]["route"]["spec"]["host"]}'
98da2a6ea0d421d3a904858573d5d3d4656db9f9
16,793
import json import ast def flatten_json_1_level(event, field_name, field_name_underscore, dump_to_string): """ Flattens a JSON field 1 level. This function is used in flatten JSON :param event: A dictionary :param field_name: The field name to flatten :param field_name_underscore: The field name with an underscore appended :param dump_to_string: If true any remaining dictionaries will be converted to a string with json.dumps :return: An event with the field flattened Examples: .. code-block:: python # Example #1 event = {'my_field': "{\"a\": None, \"b\"}"} event = flatten_json_1_level(event=input_event, field_name='my_field', field_name_underscore='my_field_', dump_to_string=True) output_event = {'my_field_a': None, 'my_field_b': 2} """ # Load strings to JSON when possible, otherwise return the event if type(event[field_name]) is not dict: try: event[field_name] = json.loads(event[field_name]) except: try: event[field_name] = ast.literal_eval(event[field_name]) except: return event # iterate through the dictionary and flatten a single level try: for k, v in event[field_name].items(): if type(v) is dict and dump_to_string: event[field_name_underscore + k.lower()] = json.dumps(v) else: event[field_name_underscore + k.lower()] = v del event[field_name] except: return event return event
348d8b9b8fbd34577b0e1aa2537fd6887f48bd47
16,796
def clean_intent_labels(labels): """Get rid of `None` intents. sklearn metrics do not support them.""" return [l if l is not None else "" for l in labels]
d1681ac88f3454c33887511aa100bc50a48c8ca2
16,800
import shlex def shlex_quote(string: str) -> str: """Simple wrapper for shlex.quote""" return shlex.quote(string)
d067d5aaaa351a4345d2fb0f63503f0b6ec46860
16,801
def should_keep_road(road, road_shp, record_buffers_index): """Returns true if road should be considered for segmentation :param road: Dictionary representation of the road (with properties) :param roads_shp: Shapely representation of the road :param record_buffers_index: RTree index of the record_buffers """ # If the road has no nearby records, then we can discard it early on. # This provides a major optimization since the majority of roads don't have recorded accidents. if not len(list(record_buffers_index.intersection(road_shp.bounds))): return False if ('highway' in road['properties'] and road['properties']['highway'] is not None and road['properties']['highway'] != 'path' and road['properties']['highway'] != 'footway'): return True # We're only interested in non-bridge, non-tunnel highways # 'class' is optional, so only consider it when it's available. if ('class' not in road['properties'] or road['properties']['class'] == 'highway' and road['properties']['bridge'] == 0 and road['properties']['tunnel'] == 0): return True return False
3deffa4c4f52759fbe38afa6597faf75a8a7284e
16,802
def _is_call(call, func): """ Return whether the first argument is a function call of the second. """ return call.startswith(func + "(") and call.endswith(")")
9c60b3f5ba29e41c1ea91e2d35a08c49a76444ea
16,803
def EnsureEnabledFalseIsShown(cv_config): """Ensures that "enabled" is shown when printing ContinuousValidationConfig. Explicitly sets ContinuousValidationConfig.enforcementPolicyConfig.enabled to False when it's unset, so the field is printed as "enabled: false", instead of omitting the "enabled" key when CV is not enabled. Args: cv_config: A ContinuousValidationConfig. Returns: The modified cv_config. """ if (not cv_config.enforcementPolicyConfig or not cv_config.enforcementPolicyConfig.enabled): cv_config.enforcementPolicyConfig.enabled = False return cv_config
dc6d535b074621e06cb8d36a9a6a03cb81765a16
16,807
def transposition_num(num): """ transposition axis(y) number. 0 => 8, 1 => 7, ..., 8 => 0 """ return (4 - num) + 4
fa7118f655026d4773cea1d5f387789019a72f75
16,809
import itertools def xor(one: bytes, two: bytes) -> bytes: """XOR, re-cycling two if len(one) > len(two).""" assert len(one) >= len(two) return bytes([ a ^ b for a, b in zip(one, itertools.cycle(two)) ])
c308fe0fea62def18fcce7a00b082cd2a959bf38
16,815
def find_node_name(model, name): """ Finds a node by its name. :param model: onnx graph :param name: node name :return: node pointer """ if not hasattr(model, "graph"): raise TypeError( # pragma: no cover "Parameter model is not an ONNX model but " "{}".format(type(model))) for node in model.graph.node: if node.name == name: return node return None
9dc3a308f5134236b12bf79bc76b0d09fc41458d
16,817
import difflib def text_compare(text1, text2, output_file): """ Compares two strings and if they match returns True else writes the difference to the output_file. """ if text1 == text2: return True diff = list(difflib.Differ().compare(text1.split(), text2.split())) te = open(output_file, 'w') for line in diff: te.write(line+"\n") te.close() return False
8cad72b9fcf7f213cdd9c337d86c89c664fdc606
16,821
import jinja2 import yaml def parse(filename): """Parse a configuration file. Parameters ---------- filename : str The config file to parse. Should be YAML formatted. Returns ------- config: dict The raw config file as a dictionary. """ with open(filename, "r") as fp: config_str = jinja2.Template(fp.read()).render() config = yaml.load(config_str, Loader=yaml.Loader) return config
055bcca59e2c9d3adad2ca9bdc527fc849ef2290
16,822
def tag_sibling_ordinal(tag): """ Given a beautiful soup tag, count the same tags in its siblings to get a sibling "local" ordinal value. This is useful in counting child figures within a fig-group, for example """ return len(tag.find_previous_siblings(tag.name)) + 1
ca3f764c7046ac65e99f6a074145b6afc11d2b2d
16,826
import copy def n_max_elements(list1, N): """ Function to compute the N highest numbers of a list """ n_list1 = copy.deepcopy(list1) final_list = [] for i in range(0, N): max1 = 0 for j in range(len(n_list1)): if n_list1[j] > max1: max1 = n_list1[j] n_list1.remove(max1) final_list.append(max1) return final_list
8361994794efe5a3f34723f667c3b83fe75a388e
16,829
from typing import Any def make_safe(value: Any) -> str: """ Transform an arbitrary value into a string Parameters ---------- value: Any Value to make safe Returns ------- str Safe value """ if isinstance(value, bool): return str(value).lower() return str(value)
4b342105d26458ddffd20712c777c5bc8e221c81
16,831
def smooth_vectors(vectors, strength, iterations): """ Smooths the vector iteratively, with the given number of iterations and strength per iteration Parameters ---------- vectors: list, :class: 'compas.geometry.Vector' strength: float iterations: int Returns ---------- list, :class: 'compas.geometry.Vector3d' The smoothened vectors """ for _ in range(iterations): for i, n in enumerate(vectors): if 0 < i < len(vectors) - 1: neighbors_average = (vectors[i - 1] + vectors[i + 1]) * 0.5 else: neighbors_average = n vectors[i] = n * (1 - strength) + neighbors_average * strength return vectors
2a8dd922e5f10d67bcc1285eddbb6e53f43177c9
16,832
from pathlib import Path def scrape_names(path: Path) -> list[str]: """Scrape names into a list and sort them.""" with path.open() as h: return sorted(eval(next(h)))
d1b001a911abf7b81602b61b4b8a185ad257fe78
16,837
def twiddle(objFunction, args, init=0.5, tolerance=0.00001, domain=(float("-inf"), float("inf"))): """ Optimize a single parameter given an objective function. This is a local hill-climbing algorithm. Here is a simple description of it: https://www.youtube.com/watch?v=2uQ2BSzDvXs @param args (tuple) Arguments necessary for the objective function. @param tolerance (float) Number used to determine when optimization has converged to a sufficiently good score. @param objFunction(function)Objective Function used to quantify how good a particular parameter choice is. @param init (float) Initial value of the parameter. @param domain (tuple) Domain of parameter values, as (min, max). @return (dict) Contains: "parameter" (float) Threshold that returns the largest score from the Objective function. "score" (float) The score from the objective function given the threshold. """ pastCalls = {} x = init delta = 0.1 bestScore = objFunction(x, args) pastCalls[x] = bestScore while delta > tolerance: # Keep x within bounds if x + delta > domain[1]: delta = abs(domain[1] - x) / 2 x += delta if x not in pastCalls: score = objFunction(x, args) pastCalls[x] = score score = pastCalls[x] if score > bestScore: bestScore = score delta *= 2 else: # Keep x within bounds if x - delta < domain[0]: delta = abs(domain[0] - x) / 2 x -= 2 * delta if x not in pastCalls: score = objFunction(x, args) pastCalls[x] = score score = pastCalls[x] if score > bestScore: bestScore = score delta *= 2 else: x += delta delta *= 0.5 print("Parameter:", x) print("Best score:", bestScore) print("Step size:", delta) print() return {"parameter": x, "score": bestScore}
8991c328b2fd45b77bb6fbea5bc2b0f8a5d705ce
16,839
def pypi_link(pkg_filename): """ Given the filename, including md5 fragment, construct the dependency link for PyPI. """ root = 'https://files.pythonhosted.org/packages/source' name, sep, rest = pkg_filename.partition('-') parts = root, name[0], name, pkg_filename return '/'.join(parts)
1f71b2c6c34b52a60c2ead14b40e98ef0c89a8cf
16,844
def create_vector_dictionary(vector_file, multiword=False): """ This function creates a dictionary with vector values from affixoids Args: vector_file (file): File with vector values from FastText multiword (bool): Set to True if the word in vector file has multiple parts Returns: Dictionary with vector values as list Example: >>> create_vector_dictionary('doctests/vectors.txt') {'Bilderbuchabsturz': [-0.25007, -0.16484, -0.34915, 0.44351, 0.17918, 0.17356, 0.32336, 0.19306, 0.40586, 0.58886, -0.55027, 0.15365, -0.28948, -0.096226, 0.91019, 0.24468, -0.20271, 0.5475, 0.36233, 0.20612, -0.17727, 0.054958, 0.16082, -0.1237, -0.057176, 0.18833, 0.11853, 0.19447, -0.13197, -0.18862, -0.17965, -0.13153, 0.27431, -0.68191, -0.35592, -0.13321, 0.16669, -0.42519, 0.11905, 0.15686, 0.26408, -0.35616, -0.26065, -0.0021858, 0.34352, -0.39887, 0.59549, -0.35665, -0.60043, -0.16136, -0.19603, -0.57132, 0.11918, -0.22356, 0.1499, -0.22458, -0.081269, 0.0058904, 0.16639, 0.36866, -0.3294, -0.21821, 0.87304, -0.042374, -0.42687, -0.41224, -0.73979, 0.37495, 0.34696, 0.6927, -0.24696, 0.23713, 0.0004817, -0.67652, 0.36679, 0.52095, -0.059838, 0.3779, -0.15106, -0.31892, -0.084559, -0.067978, 0.45779, 0.45037, -0.19661, -0.14229, 0.097991, 0.26219, 0.41556, 0.43363, 0.060991, 0.15759, 0.055367, -0.10719, -0.38255, -0.3, -0.032207, -0.50483, 0.18746, -0.6391]} """ dictionary = {} with open(vector_file, 'r', encoding='utf-8') as f: for line in f: if multiword: word = line.rstrip().split('\t') else: word = line.strip().split() dict_key = word[0] dict_value = list(word[1:]) dict_value_float = [float(x) for x in dict_value] dictionary.update({dict_key: dict_value_float}) return dictionary
2da775668193b0cd545137f90f416dca4fb166be
16,845
def add_linebreaks(text, max_len=80): """ Add linebreaks on whitespace such that no line is longer than `max_len`, unless it contains a single word that's longer. There are probably way faster methods, but this is simple and works. """ br_text = '' len_cnt = 0 for word in text.split(' '): len_cnt += len(word) + 1 if len_cnt > max_len: len_cnt = len(word) br_text += '\n' + word else: br_text += ' ' + word return br_text[1:]
3d09572d34b67da9b639466478ce2d62d6d54116
16,846
def strftime(date, fmt): """ Apply strftime to `date` object with `fmt` prameter. Returns '' if either is non truthy """ try: if not date or not fmt: return '' return date.strftime(fmt) except: return ''
702688a4c7b4c5bef1ee64ba1804335b617baa2f
16,848
import re def normalize(string): """Normalize whitespace.""" string = string.strip() string = re.sub(r'\s+', ' ', string) return string
a677092aa0deaed5a87958f35e63fbe5538e04f3
16,851
def kb(units): """Boltzmann constant Parameters ---------- units : str Units for kb. Supported units ====== ========================= ============== Unit Description Value ====== ========================= ============== J/K Joule per kelvin 1.38064852e-23 kJ/K Kilojoule per kelvin 1.38064852e-26 eV/K Electron volt per kelvin 8.6173303e-5 cal/K Calorie per kelvin 3.2976230e-24 kcal/K Kilocalorie per kelvin 3.2976230e-27 Eh/K Hartree per kelvin 3.1668105e-6 Ha/K Hartree per kelvin 3.1668105e-6 ====== ========================= ============== Returns ------- kb : float Boltzmann constant in appropriate units Raises ------ KeyError If units is not supported. """ kb_dict = { 'J/K': 1.38064852e-23, 'kJ/K': 1.38064852e-26, 'eV/K': 8.6173303e-5, 'cal/K': 3.2976230e-24, 'kcal/K': 3.2976230e-27, 'Eh/K': 3.1668105e-06, 'Ha/K': 3.1668105e-06, } try: return kb_dict[units] except KeyError: err_msg = ('Invalid unit for kb: {}. Use help(pmutt.constants.kb) for ' 'accepted units.'.format(units)) raise KeyError(err_msg)
4f0d4cfa10f617e1a9a0257b1a509606af2d7f18
16,857
def get_one_batch(dataloader): """Returns one batch from a dataloader""" iter_dl = iter(dataloader) #Necessary. You have to tell the fucking thing it's iterable. Why? batch = next(iter_dl) return(batch)
a05bde960649791f6ea8a8a432c5f39af6137275
16,858
import math def calc_stats(base_stats, level): """Calculate a Pokemon's stats based on its base stats and level""" stats = [] stats.append(math.floor((31 + 2 * base_stats[0] + 21) * level/100 + 10 + level)) for i in range(1, 6): stats.append(math.floor((31 + 2 * base_stats[i] + 21) * level/100 + 5)) return stats
a2b94a830d6d6622e8a58ce3831112af31899776
16,867
def build_edges(src_profile, dst_profiles): """create set of edges, compatible with NX graph format.""" edges = set() for dst_profile in dst_profiles: edges.add((src_profile['uid'], dst_profile['uid'])) return edges
3af864e0b847b530c0c524b28a406cf8a99b71e0
16,872
import re def get_filename_parts(filename, default_suffix=''): """ Parses a string representing a filename and returns as a 2-element string tuple with the filename stem and its suffix (the shortest string after a dot from the end of the string). If there's no suffix found a default is used instead. Args: filename(string): The input file name expected to contain a GraphViz DOT digraph. default_suffix(string, optional): The suffix to use if one cannot be found from the filename. Defaults to an empty string. Returns (string, string): A 2 element tuple of the filename stem and its suffix. """ m = re.match(r'(.*)(\..*$)', filename) if not m: return (filename, default_suffix) return m.group(1, 2)
a9f7451ab0da7c6dd661959000b8e9d89911d8c1
16,877
def getWalkTag(node): """Get Controller tag Arguments: node (dagNode): Controller object with tag Returns: tag: Controller tag """ tag = node.listConnections(t="controller", et=True) if tag: return tag[0]
87ffee1216d29a23331e0b7411eccf376326ec5a
16,878
import math def area_triangle_sss(side1, side2, side3): """ Returns the area of a triangle, given the lengths of its three sides. """ # Use Heron's formula semiperim = (side1 + side2 + side3) / 2.0 return math.sqrt(semiperim * (semiperim - side1) * (semiperim - side2) * (semiperim - side3))
3c6276d7b4e9f8f0282eec187964112c7b745a7d
16,879
def _FindOrAddSolution(solutions, name): """Find a solution of the specified name from the given list of solutions. If no solution with the specified name is found, a solution with the specified name is appended to the given list of solutions. This function thus always returns a solution. Args: solutions: The list of solutions to search from. name: The solution name to search for. Returns: The solution with the specified name. """ for solution in solutions: if solution['name'] == name: return solution solution = {'name': name} solutions.append(solution) return solution
50d7d93a0a43062ceba2abd8677e6ca77596911e
16,880
def classify(df, w): """ Classify result of linear discriminant analysis for different classifiers. @param df pandas dataframe; @param w dict[classifier: (list of weights, weight threshold)]; @return df with appended result. """ # get input if 'state' in df.columns: x = df.drop('state', axis=1).to_numpy(dtype='float64') else: x = df.to_numpy(dtype='float64') # initialize result new = df.copy() for classifier, wi in w.items(): # evaluate output y = x@wi[0] # append output new[f'lda_{classifier}'] = y - wi[1] # get states states = classifier.split('_') # append output new[f'lda_{classifier}_class'] = [ states[0] if i > 0 else states[1] for i in y ] return new
184d2aa61ef8cb942b8ba69f15148d2b99c22091
16,881
def id2num(s): """ spreadsheet column name to number http://stackoverflow.com/questions/7261936 :param s: str -- spreadsheet column alpha ID (i.e. A, B, ... AA, AB,...) :returns: int -- spreadsheet column number (zero-based index) >>> id2num('A') 0 >>> id2num('B') 1 >>> id2num('XFD') 16383 >>> """ n = 0 for ch in s.upper(): n = n * 26 + (ord(ch) - 65) + 1 return n - 1
a1966821557324a0e95568bf0f63207d8cd3f350
16,886
def is_calibration_point_finished(message): """Check if calibration for a calibration marker is done""" return "manual_marker_calibration" in str(message[b"name"]) and "Sampled" in str( message[b"msg"] )
709479eeb552f563b690bee7140120893baa2c06
16,902
def get_ellip(q): """Given minor to major axis ratio (q) Returns ellipticity""" return (1-q**2)/(1+q**2)
56f1b2c9d5e821cc55344a3d387efafd82414224
16,903
def clean_up_tokenization(text): """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms. From https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py#L1400 """ despace_substrings = [".", "?", "!", ",", "'", "n't", "'m", "'s", "'ve", "'re"] for s in despace_substrings: text = text.replace(f" {s}", f"{s}") replacements = {"do not":"don't"} for k,v in replacements: text = text.replace(f" {k}", f" {v}") return text
8ecb329e89ddb9a49c23ee4745b574e66359cc6e
16,904
def create_playlist_for_user(spotify_obj, spotify_username, playlist_name): """Method that creates a playlist with given name for given username, using authorized spotipy.Spotify object. Created playlist ID is returned.""" playlist = spotify_obj.user_playlist_create(spotify_username, playlist_name) return playlist['id']
7a45d8250f58f58bb17ba4dfad0bb73bf9a1796a
16,907
import pickle def get_reddit_model(fname='models/reddit_regression.pkl'): """ Load pre-trained reddit model from pickle file """ with open(fname, 'rb') as fid: reddit_model = pickle.load(fid) return reddit_model
8bab0ff3811067830ad9d1a4da824ffa85bda86e
16,911
def inflate_dict(dct, sep=".", deep=-1): """Inflates a flattened dict. Will look in simple dict of string key with string values to create a dict containing sub dicts as values. Samples are better than explanation: >>> from pprint import pprint as pp >>> pp(inflate_dict({'a.x': 3, 'a.y': 2})) {'a': {'x': 3, 'y': 2}} The keyword argument ``sep`` allows to change the separator used to get subpart of keys: >>> pp(inflate_dict({'etc/group': 'geek', 'etc/user': 'bob'}, "/")) {'etc': {'group': 'geek', 'user': 'bob'}} Warning: you cannot associate a value to a section: >>> inflate_dict({'section.key': 3, 'section': 'bad'}) Traceback (most recent call last): ... TypeError: 'str' object does not support item assignment Of course, dict containing only keys that doesn't use separator will be returned without changes: >>> inflate_dict({}) {} >>> inflate_dict({'a': 1}) {'a': 1} Argument ``deep``, is the level of deepness allowed to inflate dict: >>> pp(inflate_dict({'a.b.c': 3, 'a.d': 4}, deep=1)) {'a': {'b.c': 3, 'd': 4}} Of course, a deepness of 0 won't do anychanges, whereas deepness of -1 is the default value and means infinite deepness: >>> pp(inflate_dict({'a.b.c': 3, 'a.d': 4}, deep=0)) {'a.b.c': 3, 'a.d': 4} """ def mset(dct, k, v, sep=".", deep=-1): if deep == 0 or sep not in k: dct[k] = v else: khead, ktail = k.split(sep, 1) if khead not in dct: dct[khead] = {} mset(dct[khead], ktail, v, sep=sep, deep=-1 if deep < 0 else deep - 1) res = {} ## sorting keys ensures that colliding values if any will be string ## first set first so mset will crash with a TypeError Exception. for k in sorted(dct.keys()): mset(res, k, dct[k], sep, deep) return res
fa929cd7a1b4825fb750755a76efcfab0e3a2666
16,918
def encode_module_value(v): """ For all things not in builtins, return the module name, otherwise just return the name """ mod = v.__module__ v = getattr(v, "__qualname__", v.__name__) if mod == "builtins": return v return {"module": mod, "name": v}
497b8838f8458ff973bd9d3a30b839b328d0ab11
16,919
from typing import OrderedDict def csvtable_to_dict(fstream): """ Convert a csv file stream into an in memory dictionary. :param fstream: An open file stream to a csv table (with header) :returns: A dictionary with a key for each column header and a list of column values for each key. """ # Read in the lines from the file stream. lines = fstream.readlines() # There are two pieces of information we need for the headers: # 1. The actual header titles. # 2. A map of index to header title _ = lines.pop(0).strip("\n").split(",") # Retain the order of the columns as they're added. table = OrderedDict() # A map of row index to the appropriate header. indices = {} i = 0 # For each item in the header, mark its index and initialize its column. for item in _: indices[i] = item table[item] = [] i += 1 # Walk each line of the table, mapping the columns in the row to their key. for line in lines: # Split the csv row _ = line.split(",") # Walk each column and map it. for i in range(len(_)): table[indices[i]].append(_[i].strip("\n")) # Return the completed table return table
a562be13f2df806cbdd104eacb7dbca35afd2d35
16,924
def updateCharacterName(old_name: str, new_name: str) -> str: """Return a query to update a given character's name.""" return (f"UPDATE game_character " f"SET name='{new_name}' " f"WHERE name='{old_name}';" )
eb829e6be49393baf1c007c0331fd45a50050af5
16,930
import click def user_callback(_ctx, param, value): """Testing callback that transforms a missing value to -1 and otherwise only accepts 42.""" if not value: return -1 if value != 42: raise click.BadParameter('invalid integer', param=param) return value
f6d2a247f68ff37626a5abb7efc3b6c5967a5202
16,932
def np_gather_ijk_index(arr, index): """Gather the features of given index from the feature grid. Args: arr (numpy array): h*w*d*c, feature grid. index (numpy array): nx*3, index of the feature grid Returns: nx*c, features at given index of the feature grid. """ arr_flat = arr.reshape(-1, arr.shape[-1]) _, j, k, _ = arr.shape index_transform = index[:, 0] * j * k + index[:, 1] * k + index[:, 2] return arr_flat[index_transform]
3d4ddadadad1fbd44b060be96829496b3ecc4888
16,937
def nested_print(this_name: str, root_dict: dict) -> str: """ Get printable report of the elements of a nested dictionary. Parameters: this_name (str): nameof(root_dict), where "from varname import nameof". root_dict (dict): the dictionary whose elements must be printed. Returns: output (str): printable report of the elements of a nested dictionary. """ output = "" for my_key, my_value in root_dict.items(): if isinstance(my_key, int): my_key_value = f"[{my_key}]" elif isinstance(my_key, str): my_key_value = f'["{my_key}"]' else: raise NotImplementedError if isinstance(my_value, int): my_value_value = f"{my_value}" elif isinstance(my_value, str): my_value_value = my_value.replace('\n', '<LF>').replace('\r', '<CR>') my_value_value = f'"{my_value_value}"' else: my_value_value = "WTF?" if not isinstance(my_value, dict): output += f"{this_name}{my_key_value} = {my_value_value}\n" else: output += f"{this_name}{my_key_value} = dict()\n" output += nested_print(this_name+my_key_value, my_value) return output
fd409abec98f9c4f3001f17c49a987e594827c0c
16,943
def example_encoded_image(example): """Gets image field from example as a string.""" return example.features.feature['image/encoded'].bytes_list.value[0]
42da58881e1e55533206cfa147ff4a9e3e68fa23
16,946
def default_formatter(item): """ Default formatter (%s) :param item: The item to save to file :return: The item to be saved to file with a newline appended """ return '%s\n' % item
47603ec796f686a36562520492a591918c1d3041
16,947
def sort_orbitals(element_pdos): """Sort the orbitals of an element's projected density of states. Sorts the orbitals based on a standard format. E.g. s < p < d. Will also sort lm decomposed orbitals. This is useful for plotting/saving. Args: element_pdos (dict): An element's pdos. Should be formatted as a :obj:`dict` of ``{orbital: dos}``. Where dos is a :obj:`~pymatgen.electronic_structure.dos.Dos` object. For example:: {'s': dos, 'px': dos} Returns: list: The sorted orbitals. """ sorted_orbitals = [ "s", "p", "py", "pz", "px", "d", "dxy", "dyz", "dz2", "dxz", "dx2", "f", "f_3", "f_2", "f_1", "f0", "f1", "f2", "f3", ] unsorted_keys = element_pdos.keys() sorted_keys = [] for key in sorted_orbitals: if key in unsorted_keys: sorted_keys.append(key) return sorted_keys
de9b607895bad3c09709dcf9c9f1692fb07d5f63
16,950
def cached_and_cgi(name, template_func, render): """Return 2 functions for testing template in cached and cgi modes.""" _template = template_func() def test_cached(): # reuse early created template render(_template) test_cached.__doc__ = "test_%s" % name def test_cgi(): # create new template on each call render(template_func()) test_cgi.__doc__ = "test_%s_cgi" % name return test_cached, test_cgi
da616817b7a45cfa0c340f7cbde970e009c35f73
16,957
def get_global_address(address, bank): """ Return the rom address of a local address and bank. This accounts for a quirk in mbc3 where 0:4000-7fff resolves to 1:4000-7fff. """ if address < 0x8000: if address >= 0x4000 and bank > 0: return address + (bank - 1) * 0x4000 return address
d72c9022f6c913d9d25f54c48539ea8f68f43b19
16,959
def minibatch(x, batchsize): """Group the rows of x into minibatches of length batchsize""" return [x[i:(i + batchsize)] for i in range(0, len(x), batchsize)]
026774d1a17454aebe714788eb1bed0ff4d45e3f
16,960
def ni_to_hr(ni, f): """Calculate heart rate in beat/min from estimated interval length Args: ni (int): estimated inter-beat interval length f (float): in Hz; sampling rate of input signal Returns: float: heart rate in beat/min """ if ni == -1: return -1 return 60. * f / ni
541e6f31d9df2fb4645f7b7c501ad123ad1d9660
16,964
def insertion(N, M, i, j): """ example input: N = 10000000000, M = 10011, i = 2, j = 6 example output: 10001001100 """ M_shifted = M << i right_mask = (1 << i) - 1 # produces 0...011 left_mask = -1 << j + 1 # produces 1...1000000 full_mask = right_mask | left_mask N_cleared = N & full_mask return N_cleared | M_shifted
41e8d80239cbe42c383078d215339438984622f5
16,969
def l_out(l_in: int, padding: int, dilation: int, kernel: int, stride: int) -> int: """ Determine the L_out of a 1d-CNN model given parameters for the 1D CNN :param l_in: length of input :param padding: number of units to pad :param dilation: dilation for CNN :param kernel: kernel size for CNN :param stride: stride size for CNN :return: """ return (l_in + 2 * padding - dilation * (kernel - 1) - 1) // stride + 1
85e5d94dbcfdce2c7671674b3ddb7dd77f69728e
16,971
def reshape_array(array, new_len): """ array: shape= [M,N] new_len: the length of the new array, the reshaped shape will be [M//new_len, new_len, N] """ M, N = array.shape m = M // new_len return array[: m * new_len, :].reshape([m, new_len, N])
38eefb3ec7caa97c15775b06dcef5a8d0bf7b42a
16,972
def change_list_to_dict(list_to_convert, key): """ Changes a list into a dictionary using the 'key' parameter as the dictionary keyself. Assumes the key is in each dictionary. Assumes the key is unique. """ dict_to_export = {} for each_obj_in_list in list_to_convert: # Check if key already exists in dictionary. if key in dict_to_export.keys(): raise Exception(f"This key name is not unique.") dict_to_export[each_obj_in_list[key]] = each_obj_in_list return dict_to_export
7f624fe85469b0cfddf7723cdb561f1d6c2049ef
16,977
def get_shape_from_value_info(value): """Get shape from a value info. :param value: the value_info proto\\ :return: list of the shape """ return [d.dim_value for d in value.type.tensor_type.shape.dim]
77c3216cffd93900b50bb85ad6dfb43dda31b460
16,978
def zero(x): """return zero.""" return 0
d01d0d47730e2fbf800c37fcbe835ca3702216e7
16,980
def decode(encoded_digits: list[str], mapping: dict) -> int: """decode a number. Use the mapping to decode the encoded digits and combine them to a number. Args: encoded_digits (list[str]): encoded digits mapping (dict): mapping that decodes each segment Returns: (int) decoded number Examples: >>> decode(["cf", "fc", "acf"], {"a":"a", "c":"c", "f":"f"}) 117 >>> decode(["cb", "bc", "acb"], {"a":"a", "c":"c", "b":"f"}) 117 >>> decode(["fcdb", "bc", "acb"], {"a":"a", "b":"f", "c":"c", "d":"d", "f":"b"}) 417 """ digits = { "abcefg": "0", "cf": "1", "acdeg": "2", "acdfg": "3", "bcdf": "4", "abdfg": "5", "abdefg": "6", "acf": "7", "abcdefg": "8", "abcdfg": "9", } result = "" for digit in encoded_digits: decoded_segments = "" for segment in digit: decoded_segments += mapping[segment] decoded_segments = "".join(sorted(decoded_segments)) result += digits[decoded_segments] return int(result)
be4cbd2fbc8be31b1126676a3a115b345b598c8a
16,984
def _is_float(string_inp: str) -> bool: """Method to check if the given string input can be parsed as a float""" try: float(string_inp) return True except ValueError: return False
c9a798b551b5ef0e9460b0d16f48757510237b73
16,985
def str2bool(x): """Converts a string to boolean type. If the string is any of ['no', 'false', 'f', '0'], or any capitalization, e.g. 'fAlSe' then returns False. All other strings are True. """ if x is None or x.lower() in ['no', 'false', 'f', '0']: return False else: return True
b3360b999370137ed5b74e3a1a7d8ddaf17be03f
16,992
def allsame(iterable): """Return whether all elements of an iterable are the same. The test uses `!=` to compare, and short-circuits at the first item that is different. If `iterable` is empty, the return value is `True` (like for `all`). If `iterable` has just one element, the return value is `True`. **CAUTION**: Consumes consumable iterables. """ it = iter(iterable) try: x0 = next(it) except StopIteration: return True # like all(()) is True for x in it: if x != x0: return False return True
7859dcc19a0385978f7f55ae4b35151a85a303a6
16,994
import re def isfilepath(value): """ Return whether or not given value is Win or Unix file path and returns it's type. If the value is Win or Unix file path, this function returns ``True, Type``, otherwise ``False, Type``. Examples:: >>> isfilepath('c:\\path\\file (x86)\\bar') True, 'Win' >>> isfilepath('/path') True, 'Unix' >>> isfilepath('c:/path/file/') False, 'Unknown' :param value: string to validate file path """ win_path = re.compile(r'^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$') nix_path = re.compile(r'^(/[^/\x00]*)+/?$') if win_path.match(value): # check windows path limit see: # http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath if len(value[3:]) > 32767: return False, 'Win' return True, 'Win' elif nix_path.match(value): return True, 'Unix' return False, 'Unknown'
56a423a3b27df5ad0e66291db0bd2698fef5a8b5
16,996
def is_there_a_global(name): """ Simple utility to interrogate the global context and see if something is defined yet. :param name: Name to check for global definition in this module. :returns: Whether the target ``Name`` is defined in module globals and is not falsy. """ gl = globals() return name in gl and (not gl[name])
3c7c90dbb20894171162f14b1a4441e072dfa2c2
16,999
def zero_x_encoding(t): """0x encoding method. >>> zero_x_encoding("A") '0x41' >>> zero_x_encoding("ABC") '0x414243' """ return "0x" + "".join(hex(ord(c))[2:] for c in t)
b3659f372fee1515584a147dec50fb74bb04db94
17,000
import torch def train_model(train_loader, model, optimizer, criterion, device): """ Note: train_loss and train_acc is accurate only if set drop_last=False in loader :param train_loader: y: one_hot float tensor :param model: :param optimizer: :param criterion: set reduction='sum' :param device: :return: """ model.train(mode=True) train_loss = 0 correct = 0 for batch_idx, (x, y) in enumerate(train_loader): x, y = x.to(device), y.to(device) global_prob = model(x)[0] if isinstance(criterion, torch.nn.CrossEntropyLoss): _, yi = y.max(dim=1) loss = criterion(global_prob, yi) else: loss = criterion(global_prob, y) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() with torch.no_grad(): pred = global_prob.max(1, keepdim=True)[1] # get the index of the max log-probability _, y_idx = y.max(dim=1) correct += pred.eq(y_idx.view_as(pred)).sum().item() train_loss /= len(train_loader.dataset) train_acc = correct / len(train_loader.dataset) return {'loss': train_loss, 'acc': train_acc}
43621d0a6a0285960ffb2dad8f19ca3a4eebf29d
17,001
import torch import math def softmax(scores: torch.Tensor, base: float = math.e, axis: int = -1) -> torch.Tensor: """Returns softmax array for array of scores Converts a set of raw scores from a model (logits) into a probability distribution via softmax. The probability distribution will be a set of real numbers such that each is in the range 0-1.0 and the sum is 1.0. Assumes input is a pytorch tensor: tensor([1.0, 4.0, 2.0, 3.0]) Args prediction (pytorch tensor) a pytorch tensor of any positive/negative real numbers. base (float) the base for the exponential (default e) """ exps = base ** scores.float() # exponential for each value in array sum_exps = torch.sum(exps, dim=axis, keepdim=True) # sum of all exponentials prob_dist = exps / sum_exps # normalize exponentials return prob_dist
f2719094a7de73e362a944e28491e34fc56e67d9
17,004
from typing import Optional import json def read_json_file(path: str, silent: bool = True) -> Optional[dict]: """ Convenience function to read a json file with catching exceptions :param path: Path to the json file :param silent: Whether to ignore exceptions or not :return: Optional[dict] """ content = None exception = None try: with open(path, 'r') as json_file: content = json.load(json_file) except FileNotFoundError as file_not_found_error: exception = file_not_found_error except json.decoder.JSONDecodeError as json_decode_error: exception = json_decode_error if not silent and exception: raise exception return content
0793ebd769a40a64ecfd8d65e0f51100f52c51c9
17,006
def summarize_cag_taxa(cag_id, cag_tax_df, taxa_rank): """Helper function to summarize the top hit at a given rank.""" # If there are no hits at this level, return None if cag_tax_df is None: return { "CAG": cag_id, "name": 'none', "label": "No genes assigned at this level" } # Return the top hit return { "CAG": cag_id, "name": cag_tax_df["name"].values[0], "label": "{}<br>{:,} genes assigned".format( cag_tax_df["name"].values[0], int(cag_tax_df["count"].values[0]) ) }
e3b2736bf223490c5f3b9957b1734eeeffa2de07
17,008