content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from datetime import datetime import itertools def satisfies(query, **kwargs): """Check whether a given datetime object satisfies day and time predicates Keyword Args: month: The month predicate (January, February, ...) day: The day of month predicate [1 31) weekday: The day of week predicate (Sunday, Monday, ...) hour: The hour of day predicate [0 24) minute: The minute of hour predicate [0 60) """ formatters = { 'month': lambda: datetime.strftime(query, '%B'), 'weekday': lambda: datetime.strftime(query, '%A'), 'day': lambda: query.day, 'hour': lambda: query.hour, 'minute': lambda: query.minute } attributes = kwargs.keys() predicates = itertools.product(*kwargs.values()) for values in predicates: if all([formatters[attr]() == value for attr,value in zip(attributes,values)]): return True return False
e36ff908a31e71eab242b9ff6cf4df6721a1fbf7
11,604
import requests import json def duckduckgo(search, appName=""): """Gives instant answers from DuckDuckGo (https://duckduckgo.com/). Keyword arguments: search -- <str>; what you are searching for (case sensitive) appName -- <str>; the name of your app Return value: { "AbstractText": <str>; topic summary, "AbstractSource": <str>; name of <AbstractText> source, "Heading": <str>; name of topic that goes with <AbstractText>, "Answer": <str>; instant answer, "Definition": <str>; dictionary definition (may differ from <AbstractText>), "DefinitionSource": <str>; name of <Definition> source, "DefinitionURL": <str>; deep link to expanded definition page in <DefinitionSource> "URL": <str>; URL associated with <AbstractText>, "URLText": <str>; text from <FirstURL> } """ url = "http://api.duckduckgo.com/?q={}&format=json&t={}" url = url.format(search, appName) data = requests.get(url).text data = json.loads(data) items = {"AbstractText": data["AbstractText"], "AbstractSource": data["AbstractSource"], "Heading": data["Heading"], "Answer": data["Answer"], "Definition": data["Definition"], "DefinitionSource": data["DefinitionSource"], "DefinitionURL": data["DefinitionURL"]} exists = data["Results"] items["URL"] = data["Results"][0]["FirstURL"] if exists else "" items["URLText"] = data["Results"][0]["Text"] if exists else "" return items
7bb2958986b0e1b0219e7d1a63e475ff840f0136
11,607
def binary_to_decimal(number): """ Calculates the decimal of the given binary number. :param number: decimal number in string or integer format :return integer of the equivalent decimal number """ decimal = [] number = list(str(number)[::-1]) for i in range(len(number)): decimal.append(int(number[i]) * (2 ** i)) return sum(decimal)
6d615b9bc5a50cc9d2a970fa77c989fa95d0d77e
11,608
def get_dom_attr_value(dom, tag_name, attr_name): """ Return value of a tag's attribute from dom (XML file). Arguments: tag_name -- name of dom tag in which the attribute is found. attr_name -- name of dom attribute for which the value should be returned. """ tag=dom.getElementsByTagName(tag_name) value=tag[0].attributes[attr_name].value return value
26f31fd7db526bb5503b7ce156b19a99705041d1
11,609
import torch def word_to_one_hot(word, word2idx): """Return the one hot encoding of the word given to this function. Args: word (str): The word for which one hot representation is required. word2idx (Dict): The dictionary mapping from word to indices. Returns: x (torch.Tensor): The one hot representation for the word. """ # Create a vector or zeros equal to the length of the vocab x = torch.zeros(len(word2idx)).float() # Setting the value corresponding to the index of word 1 x[word2idx[word]] = 1.0 return x
6628f25695cacb202dd51be070ff03f9a594654d
11,618
def rev_comp(seq: str) -> str: """ Generates the reverse complement of a sequence. """ comp = { "A": "T", "C": "G", "G": "C", "T": "A", "B": "N", "N": "N", "R": "N", "M": "N", "Y": "N", "S": "N", "W": "N", "K": "N", "a": "t", "c": "g", "g": "c", "t": "a", "n": "n", " ": "", } rev_seq = "".join(comp.get(base, base) for base in reversed(seq)) return rev_seq
cb6b95d2d3f15910ff3ad793d99bb56de898026e
11,619
def codeblock(text): """ Returns text in a Markdown-style code block :param text: Str :return: Str """ return "```\n" + text + "\n```"
fb8fd7314273b47ace577a72c705742d6646aa0d
11,626
def generate_ensemble(num_layers, *activations): """Given a set of string names and a number of target layers, generate a list of ensemble architectures with those activations Args: num_layers: int the number of hidden layers in the neural network, and also the number of activation functions activations: list of str a list of strings that indicates the candidates for activation functions at for every layer Returns: ensemble: list of list of str a list of architectures, where an architecture is given by a list of activation function names """ if num_layers == 0: return [] if num_layers == 1: return [[act] for act in activations] return [[act, *o] for act in activations for o in generate_ensemble(num_layers - 1, *activations)]
42bce2fc861122938d2e5da54e4538b0e6a285bc
11,627
def match_server_args_factory(tick_rate: int, realtime: bool, observations_only: bool, env_config_string: str): """ Helper factory to make a argument dictionary for servers with varying ports """ def match_server_args(port): arg_dict = { "tick_rate": tick_rate, "port": port, "realtime": realtime, "observations_only": observations_only, "config": env_config_string } return arg_dict return match_server_args
5059d0a4224067f485455a54f4e6d1f83ba68531
11,633
from typing import Iterable from typing import Any def parametrize(arg_names: Iterable[str], arg_values: Iterable[Iterable[Any]]): """ Decorator to create parameterized tests. # Parameters arg_names : `Iterable[str]`, required. Argument names to pass to the test function. arg_values : `Iterable[Iterable[Any]]`, required. Iterable of values to pass to each of the args. The decorated test will be run for each inner iterable. """ def decorator(func): def wrapper(*args, **kwargs): for arg_value in arg_values: kwargs_extra = {name: value for name, value in zip(arg_names, arg_value)} func(*args, **kwargs, **kwargs_extra) return wrapper return decorator
e385d2449a2572d89a055d7cbdcc2475b00b2942
11,634
import math def get_distance(lat_1, lng_1, lat_2, lng_2): """calculates the distance between two coordinates Args: lat_1 (float): start latitude lng_1 (float): start longitude lat_2 (float): end latitude lng_2 (float): end longitude Returns: float: distance in meter """ # transform coordinates to radians lat_1, lng_1, lat_2, lng_2 = map(math.radians, [lat_1, lng_1, lat_2, lng_2]) # calculate the distance d_lat = lat_2 - lat_1 d_lng = lng_2 - lng_1 temp = ( math.sin(d_lat / 2) ** 2 + math.cos(lat_1) * math.cos(lat_2) * math.sin(d_lng / 2) ** 2 ) return 6373.0 * 1000 * (2 * math.atan2(math.sqrt(temp), math.sqrt(1 - temp)))
3e09826a4b556e897f2c39d792ed2f1d8da6109c
11,639
import math def spherical_to_cartesian(r, theta, phi): """ :param r: Radius. :param theta: in radians. :param phi: azimuth angle in radians :return: x=[x1, x2, x3] coordinates. """ return [r*math.sin(phi)*math.cos(theta), r*math.sin(phi)*math.sin(theta), r*math.cos(phi)]
08d0748d5acd9dab4e74a19879b2f8ac7858bb31
11,640
import copy def flat_ingredients_list_DFS(product): """ Recursive function to search the ingredients graph by doing a Depth First Search and return it as a flat list of all nodes. Sub ingredients are placed right after their parents. Args: product (dict): Dict corresponding to a product or a compound ingredient. Returns: list: List containing all the ingredients graph nodes. """ if 'ingredients' in product: product_without_ingredients = copy.deepcopy(product) del product_without_ingredients['ingredients'] if '_id' in product: # It is a product and not a compound ingredient: return [y for x in product['ingredients'] for y in flat_ingredients_list_DFS(x)] else: return [product_without_ingredients] + [y for x in product['ingredients'] for y in flat_ingredients_list_DFS(x)] else: return [product]
d2b3ada88963d3967fa8fffc4aa294bed4b1fe31
11,641
def parse_raw(output): """Just return `output` as a single string assigned to dict key '_' for reference in assertion expressions. Returns {'_': output} """ return dict(_=output)
7fe463b997687bedad6d77d4bca4718037f18069
11,651
import re def http_header_link_regex(relation_type): """Create a regex matching the http header links of the given type.""" return re.compile(r'.*;+\s*rel="{}"\s*(;.*)?'.format( re.escape(relation_type)))
4085e9258c0f6d5d1de33f82318aa9006fbe40bc
11,652
def read_txt(path): """Read a mass spectrum from a text file. Args: path (str): Path to the spectrum. Returns: tuple: Lists with m/z and intensities. """ mz = [] i = [] with open(path) as f: for line in f: line = line.split() mz.append(float(line[0])) i.append(float(line[1])) return mz, i
e0efe8549596ec2bf312967ecf265d8eb8b3372c
11,655
def find_parent( child: str, parent_of: dict, recursive=False): """ Find the parent or great-great-...-great-parent of a child Required Parameters ------------------- child: str If this is already the greatest-parent, will return itself otherwise, raise KeyError parent_of: dict dictionary with key = child and value = parent, eg: parent_of = {} parent_of["child"] = "parent" Other Parameters ---------------- recursive: bool (default: False) if True, look for greatest-parent of a child. Returns ------- itself, the Parent or the greatest-parent """ try: parent = parent_of[child] except KeyError: if child in parent_of.values(): return child raise if recursive: return find_parent(parent, parent_of) else: return parent
429f91160f6abc8710ced71b07e5e2e5c0a97cfc
11,656
def _TimeToString(datetime): """Converts a datetime object into a string adapted from M-LOOP: mloop.utilities Args: datetime (datetime): datetime object (e.g. datetime.datetime.now()) Returns: str: date time as 'yyyy-mm-dd_hh-mm' """ return datetime.strftime("%Y-%m-%d_%H-%M")
3f778b8b15f19bdfe5238799cb248316772380b4
11,659
def array(string): """Converts string to a list, split on whitespace :param: string(str): The string to split :return string.split()(list): The string split into a list on the white space.""" return string.split()
c48c35d321848f3e997931c5ab9f9357842b9228
11,661
def label_mapper(raw_labels, new_labels): """Map some raw labels into new labels. When dealing with GEO DataSets it is very common that each GSM sample has a different phenotye (e.g. 'Brain - 001', 'Brain - 002', ...). This function maps these raw labels into new homogeneous labels. Parameters ----------- raw_labels : list of strings list of unpreprocessed labels new_labels : list of strings list of labels to map Returns ----------- y : array of float, shape : n_samples the modified label vector Examples ----------- >>> raw_labels = ['Brain - 001', 'Brain - 002', 'Muscle - 001', 'Muscle - 002'] >>> label_mapper(raw_labels, ['Brain', 'Muscle']) ['Brain', 'Brain', 'Muscle', 'Muscle'] """ y = [] for rl in raw_labels: for nl in new_labels: if nl in rl: y.append(nl) break else: y.append(rl) # print('No mapping rule for %s', rl) return y
67aaa329374169f61414032b0949344437d16022
11,663
from typing import Optional def validate_self_discharge(self_discharge: Optional[float]) -> Optional[float]: """ Validates the self discharge of an object. Self discharge is always optional. :param self_discharge: The self discharge of the object. :return: The validated self discharge. """ if self_discharge is None: return None if self_discharge < 0 or self_discharge > 1: raise ValueError("Self discharge must be between 0 and 1.") return self_discharge
4d1d6c3ddb6530fc6fc6e03809a4320b0b50033f
11,664
import pickle import base64 def unbp(data): """Un-(Base64-Pickle).""" return pickle.loads(base64.b64decode(data))
d1338e2d877ac84c78cd67e2b5b425c173011331
11,665
def l_min(s, m): """ Minimum allowed value of l for a given s, m. The formula is l_min = max(\|m\|,\|s\|). Parameters ---------- s: int Spin-weight of interest m: int Magnetic quantum number Returns ------- int l_min """ return max(abs(s), abs(m))
9eec996df3b8e026c8b58649fddbbbcc05c38372
11,672
def threshold_means(df, thresh_name, thresholds, comp_df=None, error_fac=1.0, use_percents=True): """Computes the means (and standard deviations) along a set of threshold values. This is handy for doing the Threshold v.s. Robustness plots when in comparison to DREA. Args: df (DataFrame): DataFrame of the data we want to plot. Often, this needs to be filtered to be only one algorithm. thresh_name (str): String representing the column name for thresholds comp_df (DataFrame, optional): Data frame to compare to, percent wise. error_fac (float, optional): Multiply error sizes by this number. Particularly useful if we want to use confidence intervals. Default is 1.0. use_percents (float, optional): Return results in percents. Returns: Returns an object with properties: robs -> returns a list of robustness means robs_err -> returns list of robustness errors. sends -> returns a list of send frequencies. sends_err -> returns a list of errors of send frequencies. res -> returns a list of reschedule frequencies res_err -> returns a list of reschedule frequencies errors runtimes -> returns a list of runtimes. """ if comp_df is not None: comp_rob = comp_df["robustness"].mean() comp_res = comp_df["reschedule_freq"].mean() comp_run = comp_df["runtime"].mean() comp_send = comp_df["send_freq"].mean() else: comp_rob = 1.0 comp_res = 1.0 comp_run = 1.0 comp_send = 1.0 rob_means = [] stderrs = [] sends = [] sends_err = [] reschedules = [] reschedules_err = [] runtimes = [] runtimes_err = [] if use_percents: p = 100 else: p = 1 for t in thresholds: point = df.loc[df[thresh_name] == t] mean = point["robustness"].mean() / comp_rob * p rob_means.append(mean) se = point["robustness"].sem() * p stderrs.append(se * error_fac) send_dat = point["send_freq"].mean() / comp_send * p sends.append(send_dat) send_err = point["send_freq"].sem() * p sends_err.append(send_err * error_fac) res = point["reschedule_freq"].mean() / comp_res * p reschedules.append(res) res_err = point["reschedule_freq"].sem() * p reschedules_err.append(res_err * error_fac) runtime = point["runtime"].mean() / comp_run * p runtimes.append(runtime) runtime_err = point["runtime"].sem() * p runtimes_err.append(runtime_err * error_fac) class ThreshResponse(object): def __init__(self, robs, robs_err, sends, sends_err, res, res_err, runtimes): self.robs = robs self.robs_err = robs_err self.sends = sends self.sends_err = sends_err self.res = res self.res_err = res_err self.runtimes = runtimes self.runtimes_err = runtimes_err return ThreshResponse(rob_means, stderrs, sends, sends_err, reschedules, reschedules_err, runtimes)
99f7855f457a2aec71a1454ac08fb656745f84d1
11,674
from typing import Iterable from typing import Any from functools import reduce def concat(*it: Iterable[Any]) -> Any: """ Concatenation of iterable objects Args: it: Iterable object Examples: >>> fpsm.concat([1, 2, 3], [4, 5, 6]) [1, 2, 3, 4, 5, 6] """ return reduce(lambda x, y: x + y, map(list, it))
e2b9d1604630198486fa0649d62784078547031a
11,675
def plot(domain, solution=None, solution_exact=None, **kwargs): """Plots a solution (and optionally a reference solution) onto the given DiscreteDomain instance. **kwargs are the same as the ones in the DiscreteDomain's visualize() method. Parameters ---------- domain : DiscreteDomain Discrete domain instance to plot the solution onto. The solution must have as many values as the number of vertices in the domain. solution : numpy.ndarray, optional Values to plot at each vertex of the domain. If None, then the domain is plot as is with the z-component of each vertex as value. solution_exact : numpy.ndarray, optional Value references to plot at each vertex of the domain. If None, only the graph of the computed solution is shown; else two graphs are created to show the computed and exact solutions side by side. """ if solution is None: return domain.visualize(**kwargs) else: return domain.visualize(z=solution, z_ex=solution_exact, **kwargs)
003f55c54a19145e847b3baf97266f301b09ddaf
11,676
import shutil def check_java() -> bool: """ Check if Java is installed on the system.""" return shutil.which('java') is not None
44c19db5c9b72c6904e01b09797009cd8f2079b0
11,691
def data_ref_type_str(dref_enum): """ Translate an ``enum DataRefTypes`` value into a string representation. """ if dref_enum == 0x9000: return 'unknown' elif dref_enum == 0x9001: return 'integer' elif dref_enum == 0x9002: return 'fp' else: return 'INVALID'
28f3c493c1f927e8286be5d00b695ecb399210c5
11,692
def takewhile(pred, *args): """Produce a sequence with the same elements as the input sequence until pred returns false for some element; that element and all those following are discarded. The filter predicate is passed an element of the input sequence plus extra arguments, if provided. """ def taker(input): for elt in input: if pred(elt, *args): yield elt else: break return taker
74503e290b31d263964e532851a90a74d09f3568
11,694
def escape_html(message): """Escapes HTML characters in the given message.""" # Only the following characters need to be escaped # (https://wiki.ubuntu.com/NotificationDevelopmentGuidelines). message = message.replace('&', '&amp;') message = message.replace('<', '&lt;') message = message.replace('>', '&gt;') return message
1fdbfa9b972ad6057e97967c61644b1c2e0994d0
11,702
def crop_image(img, ymin, ymax, xmin, xmax): """Crop image with given size Args: img: image as numpy array ymin: start cropping position along height in pixels ymax: end cropping position along height in pixels xmin: end cropping position along width in pixels xmax: end cropping position along width in pixels Returns: Image as numpy array """ return img[int(ymin) : int(ymax), int(xmin) : int(xmax), :]
929be89de22b5aa129f17459d53f34b335803813
11,706
def rect_overlap(r1, r2): """Return the area of the intersection of two rectangles. Args: r1: an object with attributes left, top, width, height r2: an object with attributes left, top, width, height Returns: float """ left = float(max(r1.left, r2.left)) right = float(min(r1.left + r1.width, r2.left + r2.width)) top = float(max(r1.top, r2.top)) bottom = float(min(r1.top + r1.height, r2.top + r2.height)) if left >= right or top >= bottom: return 0. return (right - left) * (bottom - top)
cba6c109fe7e9cdc3532781ebb9abbae7088754d
11,710
def is_title_case(line): """Determine if a line is title-case (i.e. the first letter of every word is upper-case. More readable than the equivalent all([]) form.""" for word in line.split(u' '): if len(word) > 0 and len(word) > 3 and word[0] != word[0].upper(): return False return True
e769d589d0f84030768c901a5b5b2285788bdc97
11,718
import random def random_hex_seeded(length, seed): """Get a random hex string of a given lenth with a specific seed.""" random.seed(seed) return bytearray(random.getrandbits(8) for _ in range(length)).hex()
5737057a33063cd9c62bc6071f0ef98552001117
11,721
import re def parse_define(line): """Check if the specified line contains an #define directive""" pattern = r'#(?:\s)*define\s*([\w_]+)(?:\s)*["\']?(.*)["\']' match = re.match(pattern, line, re.IGNORECASE) if match: return (match.group(1), match.group(2))
5fc15e792e1f457c7466d9ff0a97241cbdf0873f
11,723
def format_iter(body: list) -> str: """ Formats an iterable into a multi-line bulleted string of its values. """ return "\n".join(sorted([f" - {getattr(v, 'value', v)}" for v in body]))
0f55b06276c45ef652e89df3dfd24d1fe9a4e844
11,724
import click def n_protocols_option(required=True, **kwargs): """Get option for number of protocols.""" def custom_n_protocols_option(func): def callback(ctx, param, value): value = abs(value) ctx.meta["protocols_number"] = value return value return click.option( "-pn", "--protocols-number", type=click.INT, show_default=True, required=required, help="The number of protocols of cross validation.", callback=callback, **kwargs )(func) return custom_n_protocols_option
bcdd204ea9b9cd7d09e159ba45ea54b349c648b6
11,729
def load_trace(logfile, root_dir, api, blacklist): """Loads a trace file and returns the Results instance. Arguments: - logfile: File to load. - root_dir: Root directory to use to determine if a file is relevant to the trace or not. - api: A tracing api instance. - blacklist: Optional blacklist function to filter out unimportant files. """ data = api.parse_log(logfile, (blacklist or (lambda _: False))) assert len(data) == 1, 'More than one trace was detected!' if 'exception' in data[0]: # It got an exception, raise it. raise data[0]['exception'] results = data[0]['results'] if root_dir: results = results.strip_root(root_dir) return results
e51ad3e61ee4206e74800f1c24b14fd20f51e477
11,732
def splitUIAElementAttribs(attribsString): """Split an UIA Element attributes string into a dict of attribute keys and values. An invalid attributes string does not cause an error, but strange results may be returned. @param attribsString: The UIA Element attributes string to convert. @type attribsString: str @return: A dict of the attribute keys and values, where values are strings @rtype: {str: str} """ attribsDict = {} tmp = "" key = "" inEscape = False for char in attribsString: if inEscape: tmp += char inEscape = False elif char == "\\": inEscape = True elif char == "=": # We're about to move on to the value, so save the key and clear tmp. key = tmp tmp = "" elif char == ";": # We're about to move on to a new attribute. if key: # Add this key/value pair to the dict. attribsDict[key] = tmp key = "" tmp = "" else: tmp += char # If there was no trailing semi-colon, we need to handle the last attribute. if key: # Add this key/value pair to the dict. attribsDict[key] = tmp return attribsDict
db472d90b2dacdda4606b39e2e1f1d959ae056ca
11,736
def apply_filter_include_exclude( filename, include_filters, exclude_filters): """Apply inclusion/exclusion filters to filename The include_filters are tested against the given (relative) filename. The exclude_filters are tested against the stripped, given (relative), and absolute filenames. filename (str): the file path to match, should be relative include_filters (list of regex): ANY of these filters must match exclude_filters (list of regex): NONE of these filters must match returns: (filtered, exclude) filtered (bool): True when filename failed the include_filter excluded (bool): True when filename failed the exclude_filters """ filtered = not any(f.match(filename) for f in include_filters) excluded = False if filtered: return filtered, excluded excluded = any(f.match(filename) for f in exclude_filters) return filtered, excluded
12d1f56436bfbee606d5a75f8aab5ad34e930981
11,737
def filter_none(data, split_by_client=False): """This function filters out ``None`` values from the given list (or list of lists, when ``split_by_client`` is enabled).""" if split_by_client: # filter out missing files and empty clients existing_data = [ [d for d in client_data if d is not None] for client_data in data ] existing_data = [client_data for client_data in existing_data if client_data] else: # filter out missing files existing_data = [d for d in data if d is not None] return existing_data
7cc88ecdf7aba245f56598ee1094fed7c1f9f4f7
11,746
from bs4 import BeautifulSoup import re import logging def skip_team(soup: BeautifulSoup) -> bool: """Skip team if no players meet the minimum :param soup: BeautifulSoup object for a team :returns: True if the team should be skipped, False otherwise """ pattern = re.compile("No players meet the minimum") skip = len(soup.find_all(string=pattern)) > 0 if skip: logging.warning("No players meet the minimum. Skipping team") return skip
023a0a0076f8751f5447acd30e09ac239736e751
11,751
def sieve(limit): """ Returns a list of prime numbers up to but not including the given number. """ array = [True] * limit array[0] = array[1] = False primes = [] for (number, prime) in enumerate(array): if prime: primes.append(number) for index in range(number * number, limit, number): array[index] = False return primes
2b31d824cf8058044fa58de62d88cadf2f473c17
11,753
from typing import List from typing import Tuple def key_exists(key: str, list_keys: List) -> Tuple[int, bool]: """ Finds a dict which has `key` defined from a list `list_keys` Args: key: the key to find list_keys: list of dicts Returns: index of key if found, whether the key was found """ for i, x in enumerate(list_keys): if key in x: return i, True return -1, False
65d0dbb35be5773a6435d639ab218ebe05638c7b
11,754
import random def death_with_chance(p_death: float) -> bool: """ Takes a float between 0 and 1 and returns a boolean if the player has survived (based on random chance) Returns True if death, False if survived """ return p_death > random.random()
ad2a88727369e703cee6c345882c873db2104827
11,757
def makeReturn(items: dict) -> dict: """ Format output for alfred """ out = {'items': items} return out
114866d9fb142e072f77bb8bb1a7417a58fc76b6
11,760
import json def read_json(file): """ Read JSON file """ try: with open(file, 'r') as f: data = json.load(f) except FileNotFoundError: data = None return data
92842612769ae122a46fe90179c58d830534ff84
11,761
import pickle import codecs def pickle_string_to_obj(string: str): """Pickle string to object Arguments: s {str} -- String object to pickle Returns: [pickle] -- base64 encoded pickled object """ unmarshal = pickle.loads(codecs.decode(string.encode(), "base64")) return unmarshal
79b547dd8981a67786dc38adeab96a95f07fc911
11,763
def pad_number(number, bits): """Pad integer number to bits after converting to binary.""" return f'{bin(number)[2:]:0>{bits}}'
73353469bad9bfb26b9e9c41f37d88f92e09aa6a
11,765
from datetime import datetime def time_string_one(float_time=None, fmt=None): """ Transform a single float daytime value to string. Parameters ---------- float_time : float, optional Input time. The default is None, which returns the time now. fmt : float, optional Time format. The default is None, which uses '%Y-%m-%d %H:%M:%S.%f'. Returns ------- str Datetime as string. """ if fmt is None: fmt = '%Y-%m-%d %H:%M:%S.%f' if float_time is None: str_time = datetime.now().strftime(fmt) else: str_time = datetime.utcfromtimestamp(float_time).strftime(fmt) return str_time
aa74d63300100ae6a6d1225c36269d4029b5b26d
11,770
def _indent(level): """Returns leading whitespace corresponding to the given indentation `level`. """ indent_per_level = 4 return ' ' * (indent_per_level * level)
4391c308b59db321ef3f810c73b66e35d44566fa
11,771
def get_hover_data(df): """ Creates and formats the hover string over the map data points :param df: Pandas dataframe :return: list: A string of dataframe row information formatted for hover. """ details_labels = ["needothers","detailmed", "detailrescue"] hover_string_list = [] for index, row in df.iterrows(): info_string = row['location'] + "<br>" + "Phone:" +row['requestee_phone'] + "<br>" details_string_list = [] for i in details_labels: if row[i]: details_string_list.append(i + ":" + str(row[i]).strip()) details_string = "<br>".join(details_string_list) hover_string_list.append(info_string + details_string) return hover_string_list
cab8f2a08a3b16e254c8ebfa76196fcdcff06170
11,781
def find_stim_channel(raw): """ Finds the appropriate stim channel from raw. Heuristically just looks for STI101 or STI014. Parameters ---------- raw : mne.io.Raw The raw object Returns ------- str Channel name of the stimulus channel. """ channels = raw.info.get('ch_names') if 'STI101' in channels: return 'STI101' elif 'STI 101' in channels: return 'STI 101' elif 'STI 014' in channels: return 'STI 014' elif 'STI014' in channels: return 'STI014'
1a4014a09ef7050e90f29151b1ff56ac4d5abe89
11,784
def parse_chunk_header_file_range(file_range): """Parses a chunk header file range. Diff chunk headers have the form: @@ -<file-range> +<file-range> @@ File ranges have the form: <start line number>,<number of lines changed> Args: file_range: A chunk header file range. Returns: A tuple (range_start, range_end). The endpoints are adjusted such that iterating over [range_start, range_end) will give the changed indices. """ if ',' in file_range: file_range_parts = file_range.split(',') start = int(file_range_parts[0]) amount = int(file_range_parts[1]) if amount == 0: return (start, start) return (start - 1, start + amount - 1) else: return (int(file_range) - 1, int(file_range))
1af8c8750707c29171373ff4ed03de155d8a1cf1
11,785
import re def remove_puncatuation(review_str:str)->str: """remove puncatuation of a string """ return re.sub(r'[^\w\s]', '', review_str)
99381b5b6573f0a20466c5de21d7a3b3c65f6ef8
11,792
from typing import Dict from typing import Any from typing import Optional from typing import Callable def try_get_from_dict(data: Dict[str, Any], key: str, original_value: Any, conversion: Optional[Callable[[Any], Any]] = None) -> Any: """Try to get value from dict, otherwise set default value""" if not key in data: return None value = data[key] if value is None: return original_value if conversion is None: return value return conversion(value)
354be2365af69dd5169aa3d74bf45cf8d095ed4d
11,794
def as_float(s): """Returns a float from a string """ if not s: return 0.0 return float(s)
de5e2074b19f723b36c676f20904fa53a4878aa5
11,801
def get_handler_filename(handler): """Shortcut to get the filename from the handler string. :param str handler: A dot delimited string representing the `<module>.<function name>`. """ module_name, _ = handler.split('.') return '{0}.py'.format(module_name)
4e6d464e83e4a6557d03ebabbb4f275fc4784d8f
11,814
def parseEPSGCode(string, parsers): """ parse EPSG code using provided sequence of EPSG parsers """ for parser in parsers: epsg = parser(string) if epsg is not None: return epsg return None
b85e2d69952cc16d7f5f3b9e22009d14432a014f
11,816
import re def get_value_for_key(text_buffer, key): """Parsing value from a line with key=value""" for match in re.finditer("%s=(?P<value>.*)" % key, text_buffer): return match.group('value') return ""
86865f806adc3fa5f78671d8a686389cc5f0f353
11,817
import pathlib def _is_valid_doc_version(folder: pathlib.Path) -> bool: """ Test if a version folder contains valid documentation. A version folder contains documentation if: - is a directory - contains an `index.html` file """ if not folder.is_dir(): return False if not (folder / "index.html").exists(): return False return True
5dadea657b717373e8e84360f1f3a9e8b8adc1f8
11,819
def strip_comments(l: str) -> str: """ Strip any ``#`` comments from a line. :param str l: A string line, which may contain ``#`` comments :return str clean_line: The line ``l`` - stripped of any comments and excess whitespace. """ return l.split('#', 1)[0].strip()
0587cb5e2a986c9d0bb4c610c289186b8c184942
11,821
def shengbte_code(aiida_local_code_factory): """Get a shengbte code. """ shengbte_code = aiida_local_code_factory(executable='diff', entry_point='shengbte') return shengbte_code
de9f6bba763c4bfe7f6a1153927f73542a83dc6a
11,822
import mimetypes def get_content_type(content, name): """ Checks if the content_type is already set. Otherwise uses the mimetypes library to guess. """ if hasattr(content.file, "content_type"): return content.file.content_type else: mime_type, encoding = mimetypes.guess_type(name) return mime_type
838d11c2660e0d76c813961498ab0ebcc25c1f3c
11,823
def in_or_none(x, L): """Check if item is in list of list is None.""" return (L is None) or (x in L)
bc3e4ef5a8daf7669e7430940e361d4c7ec1a240
11,825
def apply(R,point): """Applies the rotation to a point""" return (R[0]*point[0]+R[3]*point[1]+R[6]*point[2], R[1]*point[0]+R[4]*point[1]+R[7]*point[2], R[2]*point[0]+R[5]*point[1]+R[8]*point[2])
f6f4e5431e92dac9711f80da9ca0dc75bae793fb
11,829
def has_new_triggerword(predictions, chunk_duration, feed_duration, threshold=0.5): """ Function to detect new trigger word in the latest chunk of input audio. It is looking for the rising edge of the predictions data belongs to the last/latest chunk. Argument: predictions -- predicted labels from model chunk_duration -- time in second of a chunk feed_duration -- time in second of the input to model threshold -- threshold for probability above a certain to be considered positive Returns: True if new trigger word detected in the latest chunk """ predictions = predictions > threshold chunk_predictions_samples = int(len(predictions) * chunk_duration / feed_duration) chunk_predictions = predictions[-chunk_predictions_samples:] level = chunk_predictions[0] for pred in chunk_predictions: if pred > level: return True else: level = pred return False
4a3d7b82e94fac7ee26b18dd848da4ef79378d30
11,834
def _password_repr(val: str) -> str: """Change representation of password to hide its content.""" del val return "'********'"
f7e5d653e874b023888d2f13502fc2ff9d894161
11,835
def format_cpus(n): """Formats n as a number of CPUs""" return '{:.1f}'.format(n)
f28f8d24826341aab70347679920dc68f0b394b0
11,839
def _intersect_items(baselist, comparelist): """Return matching items in both lists.""" return list(set(baselist) & set(comparelist))
393e31d13dc63656167505068bcb6fee199d4c06
11,843
def bin_to_int(bin_list, inverse=True): """ Given arbitrary length list of binary values, calculate integer value. Parameters ---------- bin_list : list of int List of binary values for calculating integer value. inverse : bool, optional If true (default) caluclate integer from bin_list assuming first element is LSB. If false, integer will be calculated from bin_list assuming first element is MSB. Returns ------- int Integer value calculated from bin_list. """ if inverse: step = -1 else: step = 1 bin_list = bin_list[::step] bin_int = 0 for bit in bin_list: bin_int = (bin_int << 1) | bit return int(bin_int)
0ab74bafe4d045081732f10733704d7bbeb4a9ea
11,847
def get_chem_names(particles): """ Create a list of chemical names for the dispersed phase particles Reads the composition attribute of each particle in a `particles` list and compiles a unique list of particle names. Parameters ---------- particles : list of `Particle` objects List of `SingleParticle`, `PlumeParticle`, or `bent_plume_model.Particle` objects describing each dispersed phase in the simulation Returns ------- chem_names : str list List of the chemical composition of particles undergoing dissolution in the `particles` list """ # Initialize a list to store the names chem_names = [] # Add the chemicals that are part of the particle composition for i in range(len(particles)): if particles[i].particle.issoluble: chem_names += [chem for chem in particles[i].composition if chem not in chem_names] # Return the list of chemical names return chem_names
7b1a462732b6bdd389fb0d9c2b80aab5a5f385d5
11,853
def rmask_byte(num_mask_bits,value): """ This function applies a right-justified mask of a specified number of bits to an unsigned integer representing a single byte value. >>> rmask_byte(3,list(b'\\xff')[0]) 7 >>> bin(7) '0b111' """ return ((1<<(num_mask_bits))-1) & value
156fe1797590c5418a28db13b1064dfdb4e7bc4c
11,861
from io import StringIO def GetLines(data, strip=False): """Returns a list of all lines in data. Args: strip: If True, each line is stripped. """ ret = StringIO(data).readlines() if strip: ret = [x.strip() for x in ret] return ret
8ce3a3e829eed590ba0978a659ff1d3b063dc50d
11,866
def has_an_update(page_data: dict, tracker_data: dict) -> bool: """Checks if there was an update comparing two story mappings of the same story. Arguments: page_data {dict} -- Requested story mapping, from `get_story_data`. tracker_data {dict} -- Story mapping from the tracked list. Returns: bool -- Whether or not there was an update. """ for key in ("words", "chapter-amt", "last-update-timestamp"): if page_data[key] > tracker_data[key]: return True return False
a7374a167c368f85bdab6ec68dbea044c801ef90
11,869
from typing import Callable from typing import Any from typing import OrderedDict def multimap(f: Callable, *xs: Any) -> Any: """ Each x in xs is a tree of the same structure. Apply f at each leaf of the tree with len(xs) arguments. Return a tree of the same structure, where each leaf contains f's return value. """ first = xs[0] if isinstance(first, dict) or isinstance(first, OrderedDict): assert all(isinstance(x, dict) or isinstance(x, OrderedDict) for x in xs) assert all(x.keys() == first.keys() for x in xs) return {k: multimap(f, *(x[k] for x in xs)) for k in sorted(first.keys())} else: return f(*xs)
7c81deab0875b2396a2127b504a4ce8773ab356c
11,873
def inside_string(i, j, string_ranges): """Returns true if the range described by i and j is contained within a range in the list string_ranges""" for s, e in string_ranges: if i >= s and j <= e: return True return False
aea2ac8a777914c2e86aea40cf43f4d01b2ef8b0
11,877
def cast_ext(ext: str) -> str: """Convert ext to a unified form.""" ext = ext.lower() if ext == 'jpeg': ext = 'jpg' return ext
7b4894e035a3b785b017c9794e494ff7b78e7c1b
11,879
def get_reference_output_files(reference_files_dict: dict, file_type: str) -> list: """ Returns list of files matching a file_type from reference files Args: reference_files_dict: A validated dict model from reference file_type: a file type string, e.g. vcf, fasta Returns: ref_vcf_list: list of file_type files that are found in reference_files_dict """ ref_vcf_list = [] for reference_key, reference_item in reference_files_dict.items(): if reference_item['file_type'] == file_type: ref_vcf_list.append(reference_item['output_file']) return ref_vcf_list
7050c39a8116f8874dbc09bcf7ff2908dcd13ff8
11,886
import hashlib def testfunc(res, i): """Return a hash of the index plus the other results.""" m = hashlib.md5() m.update(str(i).encode('utf-8')) for r in res: m.update(r.encode('utf-8')) return m.hexdigest()
f19a38d99bf50c33614134dec0d8184f35b27d60
11,889
def clasificar(a1: float, a2: float, a3: float) -> str: """ Retorna 'Equilatero' si el triángulo es equilatero, 'Isóceles' si es isóceles y 'Escaleno' si es escaleno. """ if a1 == a2 and a1 == a3 and a2 == a3: retorno = "Equilatero" elif a1 == a2 or a1 == a3 or a2 == a3: retorno = "Isóceles" else: retorno = "Escaleno" return retorno
e44104b58ab5fa24ab37831920e4d1c855eb25f3
11,890
def pretty_print_prime_rules(primes): """Prints pyboolnet a prime dictionary as Boolean rules The output format is of the form: A* = B & C | !D, for example. Parameters ---------- primes : pyboolnet primes dictionary Update rules to print. """ if primes is None: return "" for k,v in primes.items(): s = k + "* = " sl = [] for c in v[1]: sll = [] for kk,vv in c.items(): if vv: sli = kk else: sli = '!'+kk sll.append(sli) if len(sll) > 0: sl.append(' & '.join(sll)) if len(sl) > 0: s += ' | '.join(sl) if v[1]==[]: s = k + "* = 0" if v[1]==[{}]: s = k + "* = 1" print(s)
f72f5b9a2c9b3c90c6682e253dffd1d22f78fa0c
11,891
def cut_list(list, length): """ This function allows to cut a list into parts of a certain length. It returns a new list which takes less memory and contains for each index a part of the initial list. Args : list : list of the images path of the whole database\n length (int): the length of the parts\n Returns : list containing the images path cut by parts of <length> """ listing_parts = [] intervalle_0 = 0 intervalle_1 = length while intervalle_0 <=(len(list)): listing_parts.append(list[intervalle_0:intervalle_1]) intervalle_0 = intervalle_1 intervalle_1 = intervalle_1 + length return listing_parts
210a1ad7db3058396ad32493a91b5c70176fb77c
11,896
import json def _convert_vars_to_json(env_dict): """Converts 'variables' in the given environment dict into string.""" if ('variables' in env_dict and isinstance(env_dict.get('variables'), dict)): env_dict['variables'] = json.dumps(env_dict['variables']) return env_dict
8dbf0456fd29833ff42570e011a45b368aa2ac0c
11,897
def reverse_key_value(orig_dict): """ DESCRIPTION ----------- Reverse the key value pairs of a dictionary object. PARAMETERS ---------- orig_dict : dict A dictionary object. RETURNS ------- rev_dict : dict A dictionary with the values of the original dictionary stored as keys and the keys of the oriinal dictionary stored as values. MODIFICATIONS ------------- Created : 4/24/19 """ rev_dict = {} for j, k in orig_dict.items(): rev_dict[k] = j return rev_dict
92599452e511193ce34c4421b13029023fb9c762
11,910
def PyTmHMSXtoS(h, m, s, x): """ Convert hours-minutes-seconds-milliseconds to seconds as float Parameters ---------- h: int, hours m: int, minutes s: int, seconds x: float, milliseconds Returns ------- float, seconds """ return h * 3600.0 + m * 60.0 + s + x
550362fe48d4e6c8c94b0c885611b607c8e39e63
11,913
def baby_names_collapsed_from_list(a_baby_names_list): """ a_baby_names_list is a list of lists, each element [name, rank] Collapse list element to a string """ print('baby_names_collapsed_from_list') baby_names_collapsed = [] for baby_element in a_baby_names_list: baby_names_collapsed.append('{} {}'.format(baby_element[0], baby_element[1])) #print(baby_names_collapsed) return baby_names_collapsed
f874c1fb205e0a86e46db8a3e2c0002712db82cb
11,916
import re def commit_message_contains_query(message, query_terms): """ Check if the commit message contains the query terms @param message: The commit message @param query_terms: The terms that we look for in the message @return: """ tester = r'\b(' + '|'.join(query_terms) + ')' has_refactor_string = r'\b(refactor)' return bool(re.search(tester, message, re.IGNORECASE)) and not bool( re.search(has_refactor_string, message, re.IGNORECASE))
b701dac00971658be13a6b7207f8685c3388609e
11,925
def get_event_role(bot, guild): """Return the event role, if it exists""" result = bot.db.get_event_role_id(guild.id) if result: for role in guild.roles: if role.id == result.get('event_role_id'): return role
85656ee6b65896762197008108c7b09830a5a4a8
11,926
from typing import Optional from typing import Dict from typing import Any def commit_draft( access_key: str, url: str, owner: str, dataset: str, *, draft_number: int, title: str, description: Optional[str] = None, ) -> Dict[str, str]: """Execute the OpenAPI `POST /v2/datasets/{owner}/{dataset}/commits`. Arguments: access_key: User's access key. url: The URL of the graviti website. owner: The owner of the dataset. dataset: Name of the dataset, unique for a user. draft_number: The draft number. title: The draft title. description: The draft description. Returns: The response of OpenAPI. Examples: >>> commit_draft( ... "ACCESSKEY-********", ... "https://api.graviti.com", ... "czhual", ... "MNIST", ... draft_number=2, ... title="commit-2", ... ) { "commit_id": "85c57a7f03804ccc906632248dc8c359", "parent_commit_id": "784ba0d3bf0a41f6a7bfd771d8c00fcb", "title": "upload data", "description": "", "committer": "czhual", "committed_at": "2021-03-03T18:58:10Z" } """ url = f"{url}/v2/datasets/{owner}/{dataset}/commits" post_data: Dict[str, Any] = {"draft_number": draft_number, "title": title} if description: post_data["description"] = description return open_api_do( # type: ignore[no-any-return] "POST", access_key, url, json=post_data ).json()
395070863b678892ca1004de20ac1b163d6e2da8
11,929
import random def capnp_id() -> str: """ Generates a valid id for a capnp schema. Returns: str -- capnp id """ # the bitwise is for validating the id check capnp/parser.c++ return hex(random.randint(0, 2 ** 64) | 1 << 63)
55ae4e112c3ba223168627d7d06fab327a0d4f82
11,934
def linspace(start,stop,np): """ Emulate Matlab linspace """ return [start+(stop-start)*i/(np-1) for i in range(np)]
b1be58298ff9983f6e2f6c5156cb4497ef8668d9
11,935
def generate_wiki_redirect_text(redirect_name: str) -> str: """Generate wikitext for redirect.""" return f'#REDIRECT [[{redirect_name}]]'
f6e55fa20004d836ea601a1d3966d070273df237
11,938
import re def read_html(htmlfile): """ Reads the HTML file to a string. Removes some potential trouble makers. """ with open(htmlfile, "r") as infile: html = infile.read() # Clean out some unnecessary stuff html = re.sub("<div class=\"wp-about-author.*?</div>", "", html, re.S) html = re.sub("<h4>Sobre la autora</h4>", "", html) html = re.sub("More Posts</a>", "", html) html = re.sub("Follow Me:", "", html) html = re.sub("<em>", "", html) html = re.sub("</em>", "", html) html = re.sub("<i>", "", html) html = re.sub("</i>", "", html) return html
bb0f724792cd817464a8720199ce7c7035e6b0f1
11,941
def WENOReconstruct( u_stencil, eps, p ): """WENO reconstruction. This reconstructs u_{i+1/2} given cell averages \\bar{u}_i at each neighboring location. See `High Order Weighted Essentially Nonoscillatory Schemes for Convection Dominated Problems'. Input ----- u_stencil : stencil describing current solution eps : regularization parameter p : power parameter Returns ------- uiphf : u_{i+1/2} after performing the full reconstruction procedure. """ uim2, uim1, ui, uip1, uip2 = u_stencil return uim2/30 - (13*uim1)/60 + 47*(ui/60) + 9*(uip1/20) - uip2/20
aa49be7b069f09c90c9b350d86575376eb5c9fbb
11,949
from datetime import datetime def chart_grouping_as_date(value): """Transforms a string YYYYMM or YYYY in a date object""" value = str(value) for format in ('%Y', '%Y%m'): try: return datetime.strptime(str(value), format).date() except ValueError: pass
92f80be0baea30944ebd90e1377dfa953f5fb7fa
11,954
def get_existing_symptoms(journal): """Given a journal w/ proper format, aggregates all the symptoms args: journal (dict) returns: [str]: array of symptom names """ symptoms = [] for log in journal['journal']: symptoms.extend([ symptom['name'] for symptom in log['symptoms'] if symptom["name"] not in symptoms ]) return symptoms
e8f90ff3344318b53ae91e79d29f572110103959
11,958
import re def clean(s): """Strip leading & trailing space, remove newlines, compress space. Also expand '{NL}' to a literal newline. :param str s: :rtype: str """ s = s.strip() s = re.sub(' +', ' ', s) s = s.replace('\n', '') s = s.replace('\r', '') s = s.replace('{NL}', '\n') return s
f2c07984de1766b7a1d0258b29ad8030e87896e8
11,971
from bs4 import BeautifulSoup def wrap_with_tag(html: str, document_wrapper_class: str) -> str: """ Wraps a string of HTML with a div using a given wrapper class Args: html (str): The HTML to be wrapped document_wrapper_class(str): The class with which to wrap the HTML Returns: str: Newly wrapped HTML """ soup = BeautifulSoup(html, 'html.parser') new_div = soup.new_tag('div') new_div['class'] = document_wrapper_class for element in soup: new_div.append(element) return new_div.prettify()
075cf4ef818eb38f2b0a8a16c76fcc5cc11cdec9
11,983
def slave_entry(slave, programs, filesystems): """ Template tag {% slave_entry slave programms %} is used to display a single slave. Arguments --------- slave: Slave object programs: Array of programs Returns ------- A context which maps the slave object to slave and the array of programs to programs. """ return { 'slave': slave, 'programs': programs, 'filesystems': filesystems, }
7449eec9d906bfe74245cbdec2a76fd7a2fc8157
11,984