content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def split_string(string: str, indices: list) -> list: """Splits string between indices. Notes: It is possible to skip characters from the beginning and end of the string but not from the middle. Examples: >>> s = 'abcde' >>> indices = [1, 2, 4] >>> split_string(s, indices) ['b', 'cd'] """ return [string[n:m] for n, m in zip(indices[:-1], indices[1:])]
0850c4d0f18b70cbd75790e34580ea0567ddea05
18,308
def fact_iter(n): """ 1. number of times around loop is n 2. number of operations inside loop is a constant 3. overall just O(n) >>> fact_iter(5) 120 >>> fact_iter(12) 479001600 >>> fact_iter(10) 3628800 >>> fact_iter(16) 20922789888000 >>> fact_iter(4) 24 """ prod = 1 for i in range(1, n + 1): prod *= i return prod
063abee825c8407b276f839543eb6985a90ac653
18,316
def execute(command, document): """Call obj as an command recursively while callable.""" while callable(command): command = command(document) return command
46f34e8eb04a21aee303f3b13088d7bc4be1c16e
18,322
def _model_field_values(model_instance) -> dict: """Return model fields values (with the proper type) as a dictionary.""" return model_instance.schema().dump(model_instance)
aeffb9cfc9304dec5672be6169c0909f2afbb3cb
18,324
import re def is_in_file(path, search_string): """ Determine whether a string is contained in a file. Like ``grep``. Parameters ---------- path : str The path to a file. search_string : str The string to be located in the file. """ with open(path, 'r') as filep: for line in filep: if re.search(search_string, line): return True return False
9912db6a81551e6b930bbf9b3c11168a34b91fe5
18,325
def rc_one_hot_encoding(encoded_seq): """Reverse complements one hot encoding for one sequence.""" return(encoded_seq[::-1, ::-1])
7dbd65c384cdd89dbb76698fe792fa85b0ff4206
18,327
import uuid def format_jsonrpc_msg(method, params=None, *, notification=False): """ Returns dictionary that contains JSON RPC message. Parameters ---------- method: str Method name params: dict or list, optional List of args or dictionary of kwargs. notification: boolean If the message is notification, no response will be expected. """ msg = {"method": method, "jsonrpc": "2.0"} if params is not None: msg["params"] = params if not notification: msg["id"] = str(uuid.uuid4()) return msg
23797940fb64efd7cc39da6871a377268ace57a7
18,331
import gzip import base64 def decompress(data): """ Decodes a Base64 bytes (or string) input and decompresses it using Gzip :param data: Base64 (bytes) data to be decoded :return: Decompressed and decoded bytes """ if isinstance(data, bytes): source = data elif isinstance(data, str): source = bytes(data, encoding='utf-8') else: raise RuntimeError("Compression is only supported for strings and bytes") return gzip.decompress(base64.b64decode(source))
30dff2aad4facbece190a2b2719fb96b840fd6ee
18,334
from typing import Dict def largest_valued_key(dic: Dict[str, set]) -> str: """Find the key with the largest value.""" biggest_size = -1 biggest_key = None for key, value in dic.items(): length = len(value) if length > biggest_size: biggest_size = length biggest_key = key assert isinstance(biggest_key, str) return biggest_key
2d4312217b93560514fb717251028baaeee7a6fe
18,335
def validate_entangler_map(entangler_map, num_qubits, allow_double_entanglement=False): """Validate a user supplied entangler map and converts entries to ints. Args: entangler_map (list[list]) : An entangler map, keys are source qubit index (int), value is array of target qubit index(es) (int) num_qubits (int) : Number of qubits allow_double_entanglement (bool): If we allow in two qubits can be entangled each other Returns: list: Validated/converted map Raises: TypeError: entangler map is not list type or list of list ValueError: the index of entangler map is out of range ValueError: the qubits are cross-entangled. """ if isinstance(entangler_map, dict): raise TypeError("The type of entangler map is changed to list of list.") if not isinstance(entangler_map, list): raise TypeError("Entangler map type 'list' expected") for src_to_targ in entangler_map: if not isinstance(src_to_targ, list): raise TypeError('Entangle index list expected but got {}'.format(type(src_to_targ))) ret_map = [] ret_map = [[int(src), int(targ)] for src, targ in entangler_map] for src, targ in ret_map: if src < 0 or src >= num_qubits: raise ValueError( 'Qubit entangle source value {} invalid for {} qubits'.format(src, num_qubits)) if targ < 0 or targ >= num_qubits: raise ValueError( 'Qubit entangle target value {} invalid for {} qubits'.format(targ, num_qubits)) if not allow_double_entanglement and [targ, src] in ret_map: raise ValueError('Qubit {} and {} cross-entangled.'.format(src, targ)) return ret_map
5c329eadd1aa1775e0df403ad5c0fe728e8b2750
18,336
def keyName(*args): """Sort values in order and separate by hyphen - used as a unique key.""" a = [int(i) for i in [*args]] # make sure all ints a = [str(i) for i in sorted(a)] # sort ints and then return strings return '-'.join(a)
4eea1461cc308e8e4491f9c970ee23473e48c5a7
18,339
def one_quarter_right_rotation_escalators(escalator): """ Return the escalator coordinates rotated by 1/4. This assumes the escalator is defined for a 4×4 board. """ return ((escalator[0][1], 4 - (escalator[0][0] + 1)), (escalator[1][1], 4 - (escalator[1][0] + 1)))
22e4dfe3fc63f3cc1e81450b921e5c64c00e841e
18,343
def _decorate_tree(t, series): """ Attaches some default values on the tree for plotting. Parameters ---------- t: skbio.TreeNode Input tree series: pd.Series Input pandas series """ for i, n in enumerate(t.postorder()): n.size = 30 if n.is_root(): n.size = 50 elif n.name == n.parent.children[0].name: n.color = '#00FF00' # left child is green else: n.color = '#FF0000' # right child is red if not n.is_tip(): t.length = series.loc[n.name] return t
2e7a14c19882938a6b6132c8a09764ab641f2be2
18,346
def format_response(resp, body): """Format an http.client.HTTPResponse for logging.""" s = f'{resp.status} {resp.reason}\n' s = s + '\n'.join(f'{name}: {value}' for name, value in resp.getheaders()) s = s + '\n\n' + body return s
5866723df307a66a710bb105dc31ed9662780d78
18,350
from typing import Tuple def _should_include(key: str, split_range: Tuple[float, float]) -> bool: """ Hashes key to decimal between 0 and 1 and returns whether it falls within the supplied range. """ max_precision_order = 10000 decimal_hash = (hash(key) % max_precision_order) / max_precision_order return split_range[0] < decimal_hash <= split_range[1]
28cb494f5ca4681d04d3568b0c5e74a1e4d28ee3
18,355
def extendedEuclideanAlgorimth(a, b): """ Compute the coefficients of the bezout's identity. Bezout's identity: for integer a and b exist integers x and y such that a * x + b * y = d, where d = gcd(a, b). Parameters ---------- a: int One of the numbers used for compute the coefficients of the bezout's identity b: int One of the numbers used for compute the coefficients of the bezout's identity Returns ------- out: tuple The coefficientes of the bezout's identity """ r0, r1 = a, b s0, s1 = 1, 0 t0, t1 = 0, 1 while r1 != 0: quotient = r0 // r1 r0, r1 = r1, r0 - quotient * r1 s0, s1 = s1, s0 - quotient * s1 t0, t1 = t1, t0 - quotient * t1 return s0, t0
37f5cd45254e358bc0c0c267d2b1d77351d04711
18,356
def make_vlan_name(parent, vlan_id): """ Create a VLAN name. Parameters ---------- parent : str The parent interface. vlan_id : The vlan id. Returns ------- str The VLAN name. """ return '{}.{}'.format(parent, vlan_id)
52793977737792726066de674d3da854bd3cf129
18,358
def index_to_coord(index): """Returns relative chunk coodinates (x,y,z) given a chunk index. Args: index (int): Index of a chunk location. """ y = index // 256 z = (index - y * 256) // 16 x = index - y * 256 - z * 16 return x, y, z
35c3aa7efdd9d820c1e8ca1e269d74a4b7d082ca
18,359
import torch def orthogonal_random_matrix_(rows, columns, device): """Generate a random matrix whose columns are orthogonal to each other (in groups of size `rows`) and their norms is drawn from the chi-square distribution with `rows` degrees of freedom (namely the norm of a `rows`-dimensional vector distributed as N(0, I)). """ w = torch.zeros([rows, columns], device=device) start = 0 while start < columns: end = min(start+rows, columns) block = torch.randn(rows, rows, device=device) norms = torch.sqrt(torch.einsum("ab,ab->a", block, block)) Q, _ = torch.qr(block) # Q is orthonormal w[:, start:end] = ( Q[:, :end-start] * norms[None, :end-start] ) start += rows return w
bfd40861fb78ecebe7cfceeaf5ab32044cf1a8dc
18,361
def parse_pairs(pairs): """Parse lines like X=5,Y=56, and returns a dict.""" # ENST00000002501=0.1028238844578573,ENST00000006053=0.16846186988367085, # the last elem is always "" data = {x.split("=")[0]: x.split("=")[1] for x in pairs.split(",")[:-1]} return data
f39c329e74ec0e749754386cbffbea186ba43a3c
18,363
def drop_peaks(dataframe,data,cutoff): """ Filters out the peaks larger than a cut-off value in a dataseries Parameters ---------- dataframe : pd.DataFrame dataframe from which the peaks need to be removed data : str the name of the column to use for the removal of peak values cutoff : int cut off value to use for the removing of peaks; values with an absolute value larger than this cut off will be removed from the data Returns ------- pd.DataFrame dataframe with the peaks removed """ dataframe = dataframe.drop(dataframe[abs(dataframe[data]) > cutoff].index) return dataframe
d645b8519a7da9d4dff588ccc1bbf50ecc801d34
18,366
def decap(df, neck, end_func=None): """ Separate the head from the body of a dataframe """ if end_func is None: end_func = lambda h, b: len(b) - 1 head = df.iloc[:neck] body = df.iloc[neck:] end = end_func(head, body) body = body[:end] return head, body
06da0736ce37308a2794722ddc502789fd9a0a5e
18,368
def get_maximal_alignment(address): """ Calculate the maximal alignment of the provided memory location. """ alignment = 1 while address % alignment == 0 and alignment < 256: alignment *= 2 return alignment
efd24a7030b968d9f7630390077b3a0642fd30a2
18,374
def intersection(iterableA, iterableB, key=lambda x: x): """Return the intersection of two iterables with respect to `key` function. Used to compare to set of strings case insensitively. """ def unify(iterable): d = {} for item in iterable: d.setdefault(key(item), []).append(item) return d A, B = unify(iterableA), unify(iterableB) return [(A[k], B[k]) for k in A if k in B]
1e38162dc7508e5621618b089d64be8ae4b57669
18,378
def get_location_from_action_name(action): """Return the location from the name of the action""" if action.startswith('at_'): location_name = action[len('at_'):] elif action.startswith('go_to_'): location_name = action[len('go_to_'):] elif action.startswith('look_at_'): location_name = action[len('look_at_'):] else: location_name = None return location_name
c918630b03c4b233d6b976ebb31fd295a94dee2b
18,380
import itertools def subsequences(iterable, seq=2): """Return subsequences of an iterable Each element in the generator will be a tuple of `seq` elements that were ordered in the iterable. """ iters = itertools.tee(iterable, seq) for i, itr in enumerate(iters): for _ in range(i): next(itr, None) return zip(*iters)
da436028d37f74729a2b5c2dfb74716da61efb2d
18,382
def sign(x): """ Return 1 if x is positive, -1 if it's negative, and 0 if it's zero. """ if x > 0: return 1 elif x < 0: return -1 else: return 0
7625903a16419c8914b92c2c1273c34bd646d9d2
18,383
def copy_metadata(nb_data): """Copy metadata of notebook Args: nb_data (JSON): a json data load from jupyter notebook Returns: dict: metadate copied from nb_data """ metadata = dict() metadata["metadata"] = nb_data["metadata"] metadata["nbformat"] = nb_data["nbformat"] metadata["nbformat_minor"] = nb_data["nbformat_minor"] return metadata
bfd0c0e53097b4a47150b5a2d7a35fabbcc03098
18,387
def get_html_name(form, name): """Return the name used in the html form for the given form instance and field name. """ return form.add_prefix(name)
8ae42f5abbcf9e8131b0edb6868414e1af5a29d8
18,388
def to_lower_camel_case(string): """ Converts string to lower camel case. Args: string (str): input string in any case Returns: str: string converted to lower camel case Example: >>> to_lower_camel_case('snake_case_string') 'snakeCaseString' """ components = string.split('_') # lower first letter in the first component if components[0]: components[0] = components[0][0].lower() + components[0][1:] # join other components with first capitalized first letter return components[0] + ''.join(x.title() for x in components[1:])
6d8ba39e1de7fdc0453712d6bbc0221685163ad5
18,389
def ip_key(ip): """ Return an IP address as a tuple of ints. This function is used to sort IP addresses properly. """ return tuple(int(part) for part in ip.split('.'))
69082fe54aae5b060cbc95b5290d73fdb2bf275b
18,392
def df_move_column(df, column_name, new_location): """Move a dataframe column to a new location based on integer index """ df = df.copy() columns = df.columns.tolist() columns.insert(new_location, columns.pop(columns.index(column_name))) return df[columns]
2b978af20f9cc8d89c91450e136e46947028f741
18,400
def beta2_mu(four_mass2_over_q2_): """Calculate β_μ^2""" return 1.0 - four_mass2_over_q2_
64770e7e023fe6fff7a3391a8d6385f0cddd6dde
18,402
import hashlib def md5_for_file(filename, block_size=2**20): """ Calculates the MD5 of the given file. See `source <http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python>`_. :param filename: The file to read in :param block_size: How much of the file to read in at once (bytes). Defaults to 1 MB :returns: The MD5 """ md5 = hashlib.md5() f = open(filename,'rb') while True: data = f.read(block_size) if not data: break md5.update(data) return(md5)
a972b43c7fc15e92c897101b9bff23e0392152be
18,406
from pathlib import Path def find_root_folder(start_file: Path): """ Find the root package folder from a file within the package """ # Get starting location package_path = start_file if start_file.is_dir() else start_file.parent # Check current location isn't a path if not (package_path / "__init__.py").exists(): raise ValueError("File not part of a package") # Walk up folders to find top level package path while (package_path.parent / "__init__.py").exists(): package_path = package_path.parent return package_path
c3e7e2af6d7ec40359ca30443b0323044314fe32
18,409
import importlib def get_simulator_api(api, reload=False): """ Get the BioSimulators API for a simulator Args: api (:obj:`str`): module which implements the API for the simulator reload (:obj:`bool`, optional): whether to reload the API Returns: :obj:`types.ModuleType` """ module = importlib.import_module(api) if reload: importlib.reload(module) return module
088b129ce31d246af4d85800d0192ee3cf44092e
18,419
def list_intersection(lst1, lst2): """ Intersection of two lists. From: https://stackoverflow.com/questions/3697432/how-to-find-list-intersection Using list comprehension for small lists and set() method with builtin intersection for longer lists. Parameters ---------- lst1, lst2 : list Python lists Returns ------- list List with common elements in both input lists. Examples -------- >>> lst1 = [ 4, 9, 1, 17, 11, 26, 28, 28, 26, 66, 91] >>> lst2 = [9, 9, 74, 21, 45, 11, 63] >>> print(Intersection(lst1, lst2)) [9, 11] """ if (len(lst1) > 10) or (len(lst2) > 10): return list(set(lst1).intersection(lst2)) else: return [ ll for ll in lst1 if ll in lst2 ]
bc9a416dd4fb4d143308c95407ef7f8b5ce52fc0
18,427
def longest_in_list( l:list ) ->str: """ Returns the longest item's string inside a list """ longest :str = '' for i in range( len(l) ): if len( str(l[i]) ) > len(longest): longest = l[i] return longest
43de1bd7a237b336cdb5cb90eb7ecaa6663be2de
18,433
import logging def add_console_handler(formatter, level=logging.INFO): """Creates and returns a handler that streams to the console at the logging level specified""" ch = logging.StreamHandler() ch.setFormatter(formatter) ch.setLevel(level) return ch
2bfdfe427ea32ed206f36f78a105437b98014f67
18,434
from typing import Dict def create_query_string(query_dict: Dict[str, str]): """Create query string with dictionary""" query_string = '?' for key, item in query_dict.items(): query_string += '{}={}&'.format(key, item) return query_string[:-1]
a83ded8a5cc516ea79547c161e84fc4caec4fe71
18,435
def pytest_ignore_collect(path, config): """ Only load tests from feature definition file. """ if path.ext != ".toml": return True return False
80d193ff28a7f2f903ec5d4dd09d13973e066dcf
18,446
import math def circle_touching_line(center, radius, start, end): """ Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector """ C, R = center, radius A, B = start, end a = (B.x - A.x)**2 + (B.y - A.y)**2 b = 2 * (B.x - A.x) * (A.x - C.x) \ + 2 * (B.y - A.y) * (A.y - C.y) c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \ - 2 * (C.x * A.x + C.y * A.y) - R**2 discriminant = b**2 - 4 * a * c if discriminant < 0: return False elif discriminant == 0: u = v = -b / float(2 * a) else: u = (-b + math.sqrt(discriminant)) / float(2 * a) v = (-b - math.sqrt(discriminant)) / float(2 * a) if u < 0 and v < 0: return False if u > 1 and v > 1: return False return True
51dd2c4d9f07bb68e326a7ea1d2c25e65fe93513
18,453
import functools import operator def prod(collection): """Product of all elements in the collection""" return functools.reduce(operator.mul, collection)
225c9d437e1ade873de26bb6ef6b157daa3545a0
18,457
def limit(self, start_or_stop=None, stop=None, step=None): """ Create a new table with fewer rows. See also: Python's builtin :func:`slice`. :param start_or_stop: If the only argument, then how many rows to include, otherwise, the index of the first row to include. :param stop: The index of the last row to include. :param step: The size of the jump between rows to include. (`step=2` will return every other row.) :returns: A new :class:`.Table`. """ if stop or step: s = slice(start_or_stop, stop, step) else: s = slice(start_or_stop) rows = self._rows[s] if self._row_names is not None: row_names = self._row_names[s] else: row_names = None return self._fork(rows, row_names=row_names)
b101ed9eba1b5771b7acbd555ae41c4365cea1d3
18,471
def format_datetime_iso(obj): """Return datetime obj ISO formatted.""" return obj.strftime('%Y-%m-%dT%H:%M:%S')
88d87a81d387f7dab906b2a9775208a1b82b3fce
18,475
import json import random import time from typing import Union import pathlib def save_json_file(json_file: Union[str, pathlib.Path], dictionary_to_save: dict, retries: int = 3) -> None: """ Writes a new JSON file to disk. If the file exists, it will be overwritten. :param json_file: JSON file to write into :param dictionary_to_save: :param retries: If file is locked for any reason, retry writing this number of times :return: None """ assert isinstance(retries, int), "Retries parameter must be an integer" assert retries >= 0, "Retries must be a positive integer" assert isinstance(json_file, (str, pathlib.Path)), "json_file must be a pathlib.Path() or a string path" file_path = pathlib.Path(json_file) while retries >= 0: retries -= 1 try: with file_path.open("w") as file: return json.dump(dictionary_to_save, file, ensure_ascii=False) except PermissionError: wait_time = random.random() time.sleep(wait_time) raise PermissionError(f"Permission issue while writing JSON: {file_path}")
6145c3b8d68bcdeaa5db9ec7771eebcdb65461ab
18,485
def convert_empty_value_to_none(event, key_name): """ Changes an empty string of "" or " ", and empty list of [] or an empty dictionary of {} to None so it will be NULL in the database :param event: A dictionary :param key_name: The key for which to check for empty strings :return: An altered dictionary Examples: .. code-block:: python # Example #1 event = {'a_field': ' '} event = convert_empty_value_to_none(event, key_name='a_field') event = {'a_field': None} # Example #2 event = {'a_field': '{}'} event = convert_empty_value_to_none(event, key_name='a_field') event = {'a_field': None} # Example #3 event = {'a_field': {}} event = convert_empty_value_to_none(event, key_name='a_field') event = {'a_field': None} """ if key_name in event: if type(event[key_name]) == str and (event[key_name] == '' or event[key_name].strip() == '' or event[key_name] == '{}' or event[key_name] == '[]'): event[key_name] = None # Converts an empty list or dictionary to None if not event[key_name]: event[key_name] = None return event
075b6fb14f22e201392539623454e0166b4c7448
18,490
def build_env_file(conf): """ Construct the key=val string from the data structurr. Parameters ---------- conf : dict The key value's Returns ------- str The key=val strings. """ return "\n".join(['{}={}'.format(k, v) for k, v in conf.items()])
aa6dca869becec055392fef010e504559f276bb7
18,492
import hashlib def get_md5(str_): """ hash function --md5 :param str_:origin str :return:hex digest """ md5 = hashlib.md5() md5.update(str_.encode('utf-8')) return md5.hexdigest()
fb905d673ac7407fcaa3f70a822620dd38dbb5e6
18,495
def stringify_value(value): """Convert any value to string. """ if value is None: return u'' isoformat = getattr(value, 'isoformat', None) if isoformat is not None: value = isoformat() return type(u'')(value)
47c3939f06a667eb8e5f8951be827ea1dff325b7
18,497
def make_linear_function(p1,p2): """ Returns the linear function defined by two points, p1 and p2 For example make_linear_function((1,3), (2,5)) returns the function f(x) = 2x + 1. """ m = (p2[1]-p1[1])/(p2[0]-p1[0]) k = p1[1] - p1[0] * m return lambda x: x * m + k
a358e1757cfa1cd8e7e0a729027659349ec22985
18,502
def time2mins(time_taken): """Convert time into minutes. Parameters ---------- time_taken : float Time in seconds Returns ------- float Minutes """ return time_taken / 60.
11877ff009e010f6fa633abf0aff87aabbd44ce0
18,509
import sympy import six def _coprime_density(value): """Returns float > 0; asymptotic density of integers coprime to `value`.""" factors = sympy.factorint(value) density = 1.0 for prime in six.iterkeys(factors): density *= 1 - 1 / prime return density
1017464c175a68e1ae510a8edf3d2f4ee4b74ba5
18,511
def get_binary_representation(n, num_digits): """ Helper function to get a binary representation of items to add to a subset, which combinations() uses to construct and append another item to the powerset. Parameters: n and num_digits are non-negative ints Returns: a num_digits str that is a binary representation of n """ result = '' while n > 0: result = str(n%2) + result n = n//2 if len(result) > num_digits: raise ValueError('not enough digits') for i in range(num_digits - len(result)): result = '0' + result return result
2b2d1e8bc4f964d805e48a8dd0525ee19aa7ab4e
18,514
def merge(list1, list2): """ Merge two sorted lists. Returns a new sorted list containing those elements that are in either list1 or list2. Iterative, because recursive would generate too many calls for reasonably sized lists. """ merged_list = [] copy_list1 = list(list1) copy_list2 = list(list2) # While both lists still have contents (until one runs out) while (len(copy_list1) > 0) and (len(copy_list2) > 0): # Add the lower ranked word to create ascending order if copy_list1[0] >= copy_list2[0]: merged_list.append(copy_list2.pop(0)) else: merged_list.append(copy_list1.pop(0)) # Add contents from remaining list (if any) if len(copy_list1) > 0: merged_list.extend(copy_list1) if len(copy_list2) > 0: merged_list.extend(copy_list2) return merged_list
c4398faf890337d10400f9c71b49f2de7376f826
18,521
def string_address(address): """Make a string representation of the address""" if len(address) < 7: return None addr_string = '' for i in range(5): addr_string += (format(address[i], '02x') + ':') addr_string += format(address[5], '02x') + ' ' if address[6]: addr_string += ' random ' else: addr_string += ' public ' return addr_string
23995bca8ce57ae341113eb9273ab2fcca7fbe96
18,525
from typing import Callable def combine_predicates(*predicates: Callable[..., bool]) -> Callable[..., bool]: """ Combine multiple predicates into a single one. The result is true only if all of the predicates are satisfied. """ def check_all(*args, **kwargs) -> bool: return all(map(lambda f: f(*args, **kwargs), predicates)) return check_all
5805c4bb884dc7c2797353d258bf29c35104b95d
18,527
def getLineAndColumnFromSyntaxItem(syntaxItem): """ Returns a tupel of the line and the column of a tree node. """ line = False column = False while line is False and column is False and syntaxItem: line = syntaxItem.get("line", False) column = syntaxItem.get("column", False) if syntaxItem.hasParent(): syntaxItem = syntaxItem.parent else: syntaxItem = None line = None if line is False else line column = None if column is False else column return line, column
9094b11865b7d8a477df7b5f480673c06c1981c7
18,530
import fnmatch def fnmatch_mult(name,patterns): """ will return True if name matches any patterns """ return any(fnmatch.fnmatch(name,pat) for pat in patterns)
d2edf50c42405c4231d075f232b4b9ac7d3a180a
18,533
def prepend_non_digit(string): """ Prepends non-digit-containing string. Useful in combination with built-in slugify in order to create strings from titles that can be used as HTML IDs, which cannot begin with digits. """ if string[:1].isdigit(): string = "go-to-{0}".format(string) return string
34594a7839a40477a986d284986b2f1eb1e1d994
18,534
def patch(attrs, updates): """Perform a set of updates to a attribute dictionary, return the original values.""" orig = {} for attr, value in updates: orig[attr] = attrs[attr] attrs[attr] = value return orig
ec8b8e4862afdc556512a848882a933214a747b4
18,535
def ge(x, y): """Implement `ge`.""" return x >= y
e1aa97783f3f4cc64c0f833fec053c85c506e1e1
18,536
def _sanitize_feature_name(feature_name: str) -> str: """Returns a sanitized feature name.""" return feature_name.replace('"', '')
7f232680502819d5054ee6852ca4a824565839cc
18,546
def sanitise_db_creds(creds): """Clean up certain values in the credentials to make sure that the DB driver doesn't get confused. """ tmp = {} for name, value in creds.items(): if name == 'port': tmp[name] = int(value) elif name == 'password': tmp['passwd'] = value else: tmp[name] = value return tmp
a5f3e8d4aab2f5959a8a03833f7c3be653234126
18,547
import pickle def save_model(model, filepath="models/"): """Save a trained model to filepath (e.g. 'model/filename') Args: model (var): variable-held trained model (e.g. Linear_Regression) filepath (str): path to save model (excluding file extension) Returns: msg (str): confirmation message """ pickle.dump(model, open(f"{filepath}.sav", "wb")) return f"model saved to: {filepath}.sav"
3de1495e4e207998f251a1977e8e21d0af1b0402
18,561
def django_testdir_initial(django_testdir): """A django_testdir fixture which provides initial_data.""" django_testdir.project_root.join("tpkg/app/migrations").remove() django_testdir.makefile( ".json", initial_data=""" [{ "pk": 1, "model": "app.item", "fields": { "name": "mark_initial_data" } }]""", ) return django_testdir
1b99e811945bb10a3d74e3a1b2cfe5d52fb2a27b
18,563
def find_brackets(smiles): """ Find indexes of the first matching brackets ( "(" and ")" ). It doesn't check if all brackets are valid, i.e. complete. Parameters ---------- smiles Returns ------- list Index of first and second matching bracket. """ indexes = [] n_brackets = 0 for i, char in enumerate(smiles): if char == "(": if n_brackets == 0: indexes.append(i) n_brackets += 1 elif char == ")": n_brackets -= 1 if n_brackets == 0: indexes.append(i) break return indexes
b1b8d40e6f04d7a903b55b85db98a35fb8eab10c
18,568
def get_node_name_parts(obj_name): """ Breaks different Maya node name parts and returns them: - objectName: a:a:grpA|a:a:grpB|a:b:pSphere1 - long_prefix: a:a:grpA|a:a:grpB - namespace: 'a:b - basename': 'pSphere1' :param obj_name: str, name of Maya node :return: tuple(str, str, str), tuple with long_prefix, namespace and basename """ if '|' in obj_name: obj_name = str(obj_name) long_name_parts = obj_name.split('|') long_prefix = ''.join(long_name_parts[:-1]) short_name = long_name_parts[-1] else: short_name = obj_name long_prefix = '' if ':' in short_name: namespace_parts = short_name.split(':') base_name = namespace_parts[-1] namespace = ':'.join(namespace_parts[:-1]) else: base_name = short_name namespace = '' return long_prefix, namespace, base_name
c3c0d47ff7ef791616b93bb0456cb503e4c80140
18,572
def invert_image(image): """ Inverts a binary image Args: image: a binary image (black and white only) Returns: An inverted version of the image passed as argument """ return 255 - image
eb466971c77fae2a57ad86a3b555884865ed404a
18,574
def with_api(func): """Decorate a method to use the client() context manager.""" def wrapper(*args): with args[0].client() as api: return func(args[0], api, *args[1:]) return wrapper
9ecbebc9c0599d83d178820ed88d0f6fa5c34ee1
18,576
import torch def gelu_quick(x): """ Approximation of gelu. Examples: >>> inputs = torch.rand(3, 2) >>> assert torch.allclose(gelu_quick(inputs), F.gelu(inputs), atol=1e-2) References: https://arxiv.org/pdf/1606.08415.pdf """ return x * torch.sigmoid(1.702 * x)
1fc27f052ae9958cab53f499d906c612ef24f3a8
18,584
from pathlib import Path def input_custom_variables(string: str, dmvio_folder: str): """ Replace the following environment variables in the given string. if ${EVALPATH} is inside string it is replaced with the path to the evaltools (the folder where this file is located). ${DMVIO_PATH} is replaced with the path to DM-VIO. """ return string.replace('${EVALPATH}', str(Path(__file__).parent.parent.resolve())).replace('${DMVIO_PATH}', dmvio_folder)
0766874154192a885e49f50f14b5ab9038788ced
18,588
def _GetLines(line_strings): """Parses the start and end lines from a line string like 'start-end'. Arguments: line_strings: (array of string) A list of strings representing a line range like 'start-end'. Returns: A list of tuples of the start and end line numbers. Raises: ValueError: If the line string failed to parse or was an invalid line range. """ lines = [] for line_string in line_strings: # The 'list' here is needed by Python 3. line = list(map(int, line_string.split('-', 1))) if line[0] < 1: raise ValueError('invalid start of line range: %r' % line) if line[0] > line[1]: raise ValueError('end comes before start in line range: %r', line) lines.append(tuple(line)) return lines
d59fc282ef5f7dca251de8b3015eaebd18230f9f
18,590
def match_includes_reaction_center(train_mode, match, atoms_core): """ Determindes whether a substructure match includes the full reaction center. Parameters ---------- train_mode: Literal["single_reactant", "transition_state"] Mode in which diagram was constructed. match: tuple Indices of substructure match. atoms_core: List[int] Atom indices belonging to the reaction center. Returns ------- includes_rc: bool Boolean whether match includes the reaction center. """ includes_rc = True if train_mode == "transition_state": if False in [core_atom in match for core_atom in atoms_core]: includes_rc = False return includes_rc
1da9d3c7304280d24918046ecc8e88ece078040f
18,591
def _get_num_slices(op_slice_sizes): """Returns the number of slices in a list of OpSlice sizes. Args: op_slice_sizes: List of list of slice sizes, where the outer list has a list per op and the inner list is the slice sizes of the op. Returns: Integer max number of slices in the list of ops. """ return max([len(slices) for slices in op_slice_sizes])
55d7170d4e1318fdd72e8c4e6ab1da30d42640e9
18,592
def _explode_lines(shape): """ Return a list of LineStrings which make up the shape. """ if shape.geom_type == 'LineString': return [shape] elif shape.geom_type == 'MultiLineString': return shape.geoms elif shape.geom_type == 'GeometryCollection': lines = [] for geom in shape.geoms: lines.extend(_explode_lines(geom)) return lines return []
689deed3c3674fdc7d0cb12917004bbe9eca2227
18,593
import random def packet_loss(P): """Adds a uniformly distributed packet loss, returns True if packet to be dropped""" u = random.uniform(0, 1) if u < P: return True return False
a5a7e3ce2a7b23937a4c23ce498cdd1aa5561841
18,594
def default_narrative( end_yr, value_by, value_ey, diffusion_choice='linear', sig_midpoint=0, sig_steepness=1, base_yr=2015, regional_specific=True, fueltype_replace=0, fueltype_new=0, ): """Create a default single narrative with a single timestep E.g. from value 0.2 in 2015 to value 0.5 in 2050 Arguments ---------- end_yr : int End year of narrative value_by : float Value of start year of narrative value_ey : float Value at end year of narrative diffusion_choice : str, default='linear' Wheter linear or sigmoid sig_midpoint : float, default=0 Sigmoid midpoint sig_steepness : float, default=1 Sigmoid steepness base_yr : int Base year regional_specific : bool If regional specific or not Returns ------- container : list List with narrative """ return [{ 'base_yr': base_yr, 'end_yr': end_yr, 'value_by': value_by, 'value_ey': value_ey, 'diffusion_choice': diffusion_choice, 'sig_midpoint': sig_midpoint, 'sig_steepness': sig_steepness, 'regional_specific': regional_specific, 'fueltype_replace': fueltype_replace, 'fueltype_new': fueltype_new }]
7076e19af13337d52241c4cd35a6ec3392678d3c
18,596
def reshape_bboxes(bboxes): """ Convert bboxes from [x1, y1, x2, y2] to [y1, x1, y2, x2] bboxes : [num_bboxes, 4] """ return [bboxes[:,[1, 0, 3, 2]]]
23e1b59e77d282d0d9f8a67519bd0933e21c1998
18,598
import time def datetime_to_millis(dt): """ Convert a ``datetime`` object to milliseconds since epoch. """ return int(time.mktime(dt.timetuple())) * 1000
91091c6a84a0001d1ee6847b9912b2590f1cc57f
18,607
def ord_prio(prio): """Compute the ordinal number of a text priority :param prio: string :rtype: integer """ return { 'urgmust': 1, 'must' : 2, 'high' : 3, 'medium' : 4, 'low' : 5 }.get(prio, 5)
fb84a9c7d244bd3c2664bb97cb56f5ec23517671
18,612
def convert_string_to_tuple(creds_string): """Recreate a MAAS API credentials tuple from a colon-separated string.""" creds_tuple = tuple(creds_string.split(':')) if len(creds_tuple) != 3: raise ValueError( "Malformed credentials string. Expected 3 colon-separated items, " "got %r." % creds_string) return creds_tuple
a0f0553553733340d276bbb0f01d44d4ff842008
18,613
def _split_host_and_port(servers): """Convert python-memcached based server strings to pymemcache's one. - python-memcached: ['127.0.0.1:11211', ...] or ['127.0.0.1', ...] - pymemcache: [('127.0.0.1', 11211), ...] """ _host_and_port_list = [] for server in servers: connection_info = server.split(':') if len(connection_info) == 1: _host_and_port_list.append( (connection_info[0], 11211)) elif len(connection_info) == 2: _host_and_port_list.append( (connection_info[0], int(connection_info[1]))) return _host_and_port_list
2f4544566bb00684b99cbbb796ca4a0246891f08
18,617
def get_lomb_signif_ratio(lomb_model, i): """ Get the ratio of the significances (in sigmas) of the ith and first frequencies from a fitted Lomb-Scargle model. """ return (lomb_model['freq_fits'][i-1]['signif'] / lomb_model['freq_fits'][0]['signif'])
e3f4d8db9a08926be49725c2a10696ede4e6d1b0
18,618
def validate(raw): """ Checks the content of the data provided by the user. Users provide tickers to the application by writing them into a file that is loaded through the console interface with the <load filename> command. We expect the file to be filled with coma separated tickers :class:`string`. Parameters: - `raw` : :class:`string` content of the user provided file. The function strips the raw data from spaces, carrier returns and split the content around comas. It will also check if there are trailing comas or if the user mistakenly put two comas instead of one between tickers. Returns a :class:`list` of sanitized tickers """ tickers = [] raw = raw.replace(' ', '') # remove spaces raw = raw.replace('\n', '') # removes cr for item in raw.split(','): # comma split if item is not '': tickers.append(str(item).upper()) return tickers
c69b5b4177e11fabc3f70c0388e3b50f56a201b7
18,620
def expand_qgrams_word_list(wlist, qsize, output, sep='~'): """Expands a list of words into a list of q-grams. It uses `sep` to join words""" n = len(wlist) for start in range(n - qsize + 1): t = sep.join(wlist[start:start+qsize]) output.append(t) return output
0937f53fa12dded031dec21deb32282c85c904ac
18,621
def convert_thrift_header(thrift_header): """ returns a dictionary representation of a thrift transaction header """ return { "actor": thrift_header.actor, "block_id": thrift_header.block_id, "business_unit": thrift_header.business_unit, "create_ts": thrift_header.create_ts, "creator_id": thrift_header.creator_id, "entity": thrift_header.entity, "family_of_business": thrift_header.family_of_business, "line_of_business": thrift_header.line_of_business, "owner": thrift_header.owner, "status": thrift_header.status, "transaction_id": thrift_header.transaction_id, "transaction_ts": thrift_header.transaction_ts, "transaction_type": thrift_header.transaction_type }
f0554da0c10c464633d19a001ca32a0180c42dd0
18,628
def _find_exclude_idx(ch_names, exclude): """Find the index of all channels to exclude. If there are several channels called "A" and we want to exclude "A", then add (the index of) all "A" channels to the exclusion list. """ return [idx for idx, ch in enumerate(ch_names) if ch in exclude]
db754d5e92af59563d6ee2004e5470bfe08a0fc1
18,629
import torch def compute_jacobian_on_surface(u, v, forward_transform, eps=0.01): """ Computes the differentials: [dX/dv, dY/dv, dX/dv, dX/du, dY/du, dX/du] for the given projection function) using central differences. u and v are an orthogonal coordinate system on the surface and X, Y, Z are 3D Cartesian coordinates.. Returns (u.shape[0], u.shape[1], 2, 2) """ # Compute dX/du, dY/du, dZ, du x0, y0, z0 = forward_transform(u - eps, v) x1, y1, z1 = forward_transform(u + eps, v) dx_du = (x1 - x0) / (2 * eps) dy_du = (y1 - y0) / (2 * eps) dz_du = (z1 - z0) / (2 * eps) # Compute dX/dv, dY/dv, dZ/dv x2, y2, z2 = forward_transform(u, v - eps) x3, y3, z3 = forward_transform(u, v + eps) dx_dv = (x3 - x2) / (2 * eps) dy_dv = (y3 - y2) / (2 * eps) dz_dv = (z3 - z2) / (2 * eps) return torch.stack((torch.stack( (dx_du, dy_du, dz_du), -1), torch.stack((dx_dv, dy_dv, dz_dv), -1)), -1)
3d0a7a749abeba834fe800365dac1c208e16a87a
18,631
def compute_factorial(n: int) -> int: """ Compute n-factorial. :param n: Number to compute factorial for :return: n-factorial """ if (not isinstance(n, int)) or (n < 0): raise ValueError("compute_factorial() only accepts non-negative integer values.") factorial = 1 for i in range(1, n + 1): factorial *= i return factorial
75061c245376f09ec01e6bcf018d04e938f419c1
18,632
def linear_annealing(n, total, p_initial, p_final): """Linearly interpolates a probability between p_initial and p_final. Current probability is based on the current step, n. Used to linearly anneal the exploration probability of the RLTuner. Args: n: The current step. total: The total number of steps that will be taken (usually the length of the exploration period). p_initial: The initial probability. p_final: The final probability. Returns: The current probability (between p_initial and p_final). """ if n >= total: return p_final else: return p_initial - (n * (p_initial - p_final)) / (total)
2f79b56efd11477a1f649e9b374891ff09632c7f
18,633
def get_charging_status(battery_id): """ Check if the battery is currently charging :param battery_id: Battery ID/Number e.g. BAT0 :return: bool, True is battery is charging """ with open(f'/sys/class/power_supply/{battery_id}/status') as f: if 'Charging' in f.read(): return True return False
18fa1cc07a4338ec526954342383f346f9cd057c
18,634
def _convertToElementList(elements_list): """ Take a list of element node indexes deliminated by -1 and convert it into a list element node indexes list. """ elements = [] current_element = [] for node_index in elements_list: if node_index == -1: elements.append(current_element) current_element = [] else: # We also add one to the indexes to suit Zinc node indexing current_element.append(node_index + 1) return elements
750a7a7780dc901b7e00cd8a36fdfd3638005322
18,637
def get_common_introduced(db_entry, arches): """Returns the common introduction API level or None. If the symbol was introduced in the same API level for all architectures, return that API level. If the symbol is not present in all architectures or was introduced to them at different times, return None. """ introduced = None for arch in arches: introduced_tag = 'introduced-' + arch if introduced_tag not in db_entry: return None if introduced is None: introduced = db_entry[introduced_tag] elif db_entry[introduced_tag] != introduced: return None # Else we have the symbol in this arch and it's the same introduction # level. Keep going. return introduced
f71d5f91faa5a8553cd85cdbd248dea8052b2fab
18,641
def kf_derivative_wrt_density(kf, n): """Computes the derivative of kf with respect to density It is given by `kf / (3 * n)` Parameters ---------- kf : array-like The fermi momentum in fm^-1 n : array-like The density in fm^-3 Returns ------- d(kf)/d(n) : array-like In units of fm^2 """ return kf / (3 * n)
76dd7581f0248a0f7c5f01706cf6f23fcf27f079
18,643
def extract_phone_number(num, replacement): """Takes in the phone number as string and replace_with arg as replacement, returns processed phone num or None""" phone_num = "".join(i for i in num if i.isdigit()) if len(phone_num) != 10: phone_num = replacement if replacement == "--blank--" else num return phone_num
cf49aa0f2cea5974feb487385d76349430e3b5f7
18,644
def get_region_string(point, size=0.1): """ Construct a string of coordinates that create a box around the specified point. Parameters ---------- point : list of float Latitude and longitude of the specified point. size : float, optional Side length of the output square. Returns ---------- str A string with coordinates for each corner of the box. Can be passed to Earth Engine. """ left = point[0] - size / 2 right = point[0] + size / 2 top = point[1] + size / 2 bottom = point[1] - size / 2 coords = str([[left, top], [right, top], [right, bottom], [left, bottom]]) return coords
d39da98ebc14224817d4b093875e3fafce143441
18,648
def _nbits(n, correction = { '0': 4, '1': 3, '2': 2, '3': 2, '4': 1, '5': 1, '6': 1, '7': 1, '8': 0, '9': 0, 'a': 0, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 0}): """Number of bits in binary representation of the positive integer n, or 0 if n == 0. """ if n < 0: raise ValueError("The argument to _nbits should be nonnegative.") hex_n = "%x" % n return 4*len(hex_n) - correction[hex_n[0]]
11d6367e41273037108680634c63958de6e72730
18,649
def incrementFilename(filename, increment): """ add count to filename """ fname = filename.split('.') finalname= fname[0]+'_'+increment+'.'+fname[1] return finalname
972c6434a3746b801aff70190ec82c2fd3de1c20
18,652
def get_keys(dict): """ extract the keys from a dictionary """ return dict.keys()
7c12a9717a4ed57aec53366f60a15f3aa04f672d
18,655