content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def yes_or_no(question, default="no"): """ Returns True if question is answered with yes else False. default: by default False is returned if there is no input. """ answers = "yes|[no]" if default == "no" else "[yes]|no" prompt = "{} {}: ".format(question, answers) while True: answer = input(prompt).lower() if answer == '': answer = default if answer in ['no', 'n']: return False elif answer in ['yes', 'y']: return True
496137bcd3d99a3f0bcc5bb87ab3dc090f8fc414
706,965
def decode(encoded: list): """Problem 12: Decode a run-length encoded list. Parameters ---------- encoded : list The encoded input list Returns ------- list The decoded list Raises ------ TypeError If the given argument is not of `list` type """ if not isinstance(encoded, list): raise TypeError('The argument given is not of `list` type.') decoded = [] for x in encoded: if isinstance(x, list): decoded.extend(x[0] * [x[1]]) else: decoded.append(x) return decoded
8fb273140509f5a550074c6d85e485d2dc1c79d0
706,966
def geom_cooling(temp, k, alpha = 0.95): """Geometric temperature decreasing.""" return temp * alpha
4263e4cc8a5de21d94bc560e8ff364d8c07f97fd
706,971
import re def bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False): """ Take bytes and return a safe string that can be displayed to the user. Single quotes are always escaped, double quotes are never escaped: "'" + bytes_to_escaped_str(...) + "'" gives a valid Python string. Args: keep_spacing: If True, tabs and newlines will not be escaped. """ if not isinstance(data, bytes): raise ValueError("data must be bytes, but is {}".format(data.__class__.__name__)) # We always insert a double-quote here so that we get a single-quoted string back # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their ret = repr(b'"' + data).lstrip("b")[2:-1] if not escape_single_quotes: ret = re.sub(r"(?<!\\)(\\\\)*\\'", lambda m: (m.group(1) or "") + "'", ret) if keep_spacing: ret = re.sub( r"(?<!\\)(\\\\)*\\([nrt])", lambda m: (m.group(1) or "") + dict(n="\n", r="\r", t="\t")[m.group(2)], ret ) return ret
fe8aa0ed3a8e3f2c7a2cf1aaeebc555b7281bde7
706,972
from datetime import datetime def datetime_to_timestamp(dt, epoch=datetime(1970,1,1)): """takes a python datetime object and converts it to a Unix timestamp. This is a non-timezone-aware function. :param dt: datetime to convert to timestamp :param epoch: datetime, option specification of start of epoch [default: 1/1/1970] :return: timestamp """ td = dt - epoch return (td.microseconds + (td.seconds + td.days * 86400))
2fbd5b3d6a56bc04066f7aaa8d4bef7c87a42632
706,982
def connectivity_dict_builder(edge_list, as_edges=False): """Builds connectivity dictionary for each vertex (node) - a list of connected nodes for each node. Args: edge_list (list): a list describing the connectivity e.g. [('E7', 'N3', 'N6'), ('E2', 'N9', 'N4'), ...] as_edges (bool): whether to return connected vertices / nodes or edges Returns: (dict): connectivity dictionary, each node is a key and the value is a set of connected nodes e.g. {'N3': {'N6', 'N11', 'N7'}, 'N9': {'N4'}, etc} """ connectivity_dict = {} for b, n1, n2 in edge_list: n_set = connectivity_dict.get(n1,set()) n_set.add(b if as_edges else n2) connectivity_dict[n1] = n_set n_set = connectivity_dict.get(n2,set()) n_set.add(b if as_edges else n1) connectivity_dict[n2] = n_set return connectivity_dict
58f24c6465fa1aaccca92df4d06662b0ce1e1e77
706,983
def calc_recall(TP, FN): """ Calculate recall from TP and FN """ if TP + FN != 0: recall = TP / (TP + FN) else: recall = 0 return recall
8f3513e11f8adad111eee32740c271aad31fbe28
706,992
def make_segment(segment, discontinuity=False): """Create a playlist response for a segment.""" response = [] if discontinuity: response.append("#EXT-X-DISCONTINUITY") response.extend(["#EXTINF:10.0000,", f"./segment/{segment}.m4s"]), return "\n".join(response)
8419b100409934f902c751734c396bc72d8a6917
706,993
from typing import Any def from_dicts(key: str, *dicts, default: Any = None): """ Returns value of key in first matchning dict. If not matching dict, default value is returned. Return: Any """ for d in dicts: if key in d: return d[key] return default
508febc48fd22d3a23dc0500b0aa3824c99fdbc3
706,994
def time_in_words(h, m): """Hackerrank Problem: https://www.hackerrank.com/challenges/the-time-in-words/problem Given the time in numerals we may convert it into words, as shown below: ---------------------------------------------- | 5:00 | -> | five o' clock | | 5:01 | -> | one minute past five | | 5:10 | -> | ten minutes past five | | 5:15 | -> | quarter past five | | 5:30 | -> | half past five | | 5:40 | -> | twenty minutes to six | | 5:45 | -> | quarter to six | | 5:47 | -> | thirteen minutes to six | | 5:28 | -> | twenty eight minutes past five | ---------------------------------------------- At minutes = 0, use o' clock. For 1 <= minutes <= 30, use past, and for 30 < minutes use to. Note the space between the apostrophe and clock in o' clock. Write a program which prints the time in words for the input given in the format described. Args: h (int): hour of the day m (int): minutes after the hour Returns: str: string representation of the time """ time = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "twenty one", "twenty two", "twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"] # We check for a certain set of cases: # Case 1 - we're on the hour, so we use o' clock if m == 0: return "{0} o' clock".format(time[h-1]) # Case 2 - we're one minute after, so we use minute (versus minutes later on to describe the time) if m == 1: return "{0} minute past {1}".format(time[m-1], time[h-1]) # Case 3 - we're a quarter past the hour if m == 15: return "quarter past {0}".format(time[h-1]) # Case 4 - we're half past the hour if m == 30: return "half past {0}".format(time[h-1]) # Case 5 - we're a quarter to the next hour if m == 45: return "quarter to {0}".format(time[h]) # Case 6 - we check for minutes after the hour, which is until we hit minute 30 if m < 30: return "{0} minutes past {1}".format(time[m-1], time[h-1]) # Case 7 - this covers the cases where the minutes are after 30 so we're mintues to the next hour return "{0} minutes to {1}".format(time[59-m], time[h])
85f2247f01df36ef499105a9940be63eee189100
706,995
def concat_files(*files): """ Concat some files together. Returns out and err to keep parity with shell commands. Args: *files: src1, src2, ..., srcN, dst. Returns: out: string err: string """ out = '' err = '' dst_name = files[-1] sources = [files[f] for f in range(len(files)) if f < len(files) - 1] with open(dst_name, 'w') as dst: for f in sources: with open(f, 'r') as src: for line in src: dst.write(line) return out, err
101c37e5b3955c153c8c2210e7575a62341c768a
706,998
def midi_to_chroma(pitch): """Given a midi pitch (e.g. 60 == C), returns its corresponding chroma class value. A == 0, A# == 1, ..., G# == 11 """ return ((pitch % 12) + 3) % 12
25ef72f78269c3f494ca7431f1291891ddea594a
707,005
import re def _snippet_items(snippet): """Return all markdown items in the snippet text. For this we expect it the snippet to contain *nothing* but a markdown list. We do not support "indented" list style, only one item per linebreak. Raises SyntaxError if snippet not in proper format (e.g. contains anything other than a markdown list). """ unformatted = snippet.text and snippet.text.strip() # treat null text value as empty list if not unformatted: return [] # parse out all markdown list items items = re.findall(r'^[-*+] +(.*)$', unformatted, re.MULTILINE) # if there were any lines that didn't yield an item, assume there was # something we didn't parse. since we never want to lose existing data # for a user, this is an error condition. if len(items) < len(unformatted.splitlines()): raise SyntaxError('unparsed lines in user snippet: %s' % unformatted) return items
bdeb5b5c5e97ef3a8082b7131d46990de02a59af
707,006
import itertools import re def parse_cluster_file(filename): """ Parse the output of the CD-HIT clustering and return a dictionnary of clusters. In order to parse the list of cluster and sequences, we have to parse the CD-HIT output file. Following solution is adapted from a small wrapper script ([source code on Github](https://github.com/Y-Lammers/CD-HIT-Filter/blob/master/CD-HIT-Filter.py), author: Youri Lammers). """ # parse through the .clstr file and create a dictionary # with the sequences per cluster # open the cluster file and set the output dictionary cluster_file, cluster_dic = open(filename), {} # parse through the cluster file and store the cluster name + sequences in the dictionary # This is a generator comprehension which groups lines together based of wether the # line starts with a ">". cluster_groups = (x[1] for x in itertools.groupby(cluster_file, key=lambda line: line[0] == '>')) # Now we get alternate groups of cluster name and sequence list. for cluster in cluster_groups: # Note: next(cluster) retrieves the first line of the cluster i (>cluster name) name = next(cluster).strip() name = re.sub(' ', '_', name[1:]) # Note: next(cluster_groups) retrieves the next cluster i+1 containing the sequences # the cluster is itself an iterator (every line) seqs = [seq.split('>')[1].split('...') for seq in next(cluster_groups)] # Write a boolean value True if sequence is the reference sequence from the cluster seqs = [[seq[0], (True if seq[1] == ' *\n' else False)] for seq in seqs] cluster_dic[name] = seqs # return the cluster dictionary return cluster_dic
d50eaeb926be3a7b8d1139c82142e4a1b595c1a0
707,011
import base64 def decode_password(base64_string: str) -> str: """ Decode a base64 encoded string. Args: base64_string: str The base64 encoded string. Returns: str The decoded string. """ base64_bytes = base64_string.encode("ascii") sample_string_bytes = base64.b64decode(base64_bytes) return sample_string_bytes.decode("ascii")
0f04617c239fbc740a9b4c9c2d1ae867a52e0c74
707,015
from typing import Iterable from typing import Any from typing import List def drop(n: int, it: Iterable[Any]) -> List[Any]: """ Return a list of N elements drop from the iterable object Args: n: Number to drop from the top it: Iterable object Examples: >>> fpsm.drop(3, [1, 2, 3, 4, 5]) [4, 5] """ return list(it)[n:]
0732bd560f0da0a43f65ee3b5ed46fd3a05e26f5
707,017
def csv_args(value): """Parse a CSV string into a Python list of strings. Used in command line parsing.""" return map(str, value.split(","))
b2596180054f835bfe70e3f900caa5b56a7856a6
707,018
def reverse( sequence ): """Return the reverse of any sequence """ return sequence[::-1]
f08ae428844347e52d8dbf1cd8ad07cfbf4ef597
707,020
def kwargs_to_flags(**kwargs): """Convert `kwargs` to flags to pass on to CLI.""" flag_strings = [] for (key, val) in kwargs.items(): if isinstance(val, bool): if val: flag_strings.append(f"--{key}") else: flag_strings.append(f"--{key}={val}") return " ".join(flag_strings)
aa672fe26c81e7aaf8a6e7c38354d1649495b8df
707,025
def _groupby_clause(uuid=None, owner=None, human_name=None, processing_name=None): """ Build the groupby clause. Simply detect which fields are set, and group by those. Args: uuid: owner: human_name: processing_name: Returns: (str): "field, ..., field" """ gbc = '' clauses = [] if uuid is not None: clauses.append('uuid') if owner is not None: clauses.append('owner') if human_name is not None: clauses.append('human_name') if processing_name is not None: clauses.append('processing_name') if len(clauses) > 0: gbc = ','.join(clauses) return gbc
21546efa19e841661ed3a7ad8a84cf9a9a76d416
707,027
def has_numbers(input_str: str): """ Check if a string has a number character """ return any(char.isdigit() for char in input_str)
5038cb737cdcfbad3a7bd6ac89f435559b67cebc
707,028
import collections def get_gradients_through_compute_gradients(optimizer, loss, activations): """Compute gradients to send to TPU embedding. Args: optimizer: a subclass of optimizer.Optimizer, usually CrossShardOptimizer. Used to call compute_gradients(). loss: a Tensor to call optimizer.compute_gradients() on. activations: an OrderedDict mapping feature_name to Tensors of activations. Returns: An OrderedDict mapping from feature name Strings to Tensors of gradients of the loss wrt the activations of the features. """ activation_list = activations.values() grads_and_vars = optimizer.compute_gradients(loss, activation_list) grads = [grad for grad, _ in grads_and_vars] feature_to_gradient_dict = collections.OrderedDict( zip(activations.keys(), grads)) return feature_to_gradient_dict
2a2ebca1e6024e11f541e3ccaf1fee4acd7ab745
707,035
def AdditionalMedicareTax(e00200, MARS, AMEDT_ec, sey, AMEDT_rt, FICA_mc_trt, FICA_ss_trt, ptax_amc, payrolltax): """ Computes Additional Medicare Tax (Form 8959) included in payroll taxes. Notes ----- Tax Law Parameters: AMEDT_ec : Additional Medicare Tax earnings exclusion AMEDT_rt : Additional Medicare Tax rate FICA_ss_trt : FICA Social Security tax rate FICA_mc_trt : FICA Medicare tax rate Taxpayer Charateristics: e00200 : Wages and salaries sey : Self-employment income Returns ------- ptax_amc : Additional Medicare Tax payrolltax : payroll tax augmented by Additional Medicare Tax """ line8 = max(0., sey) * (1. - 0.5 * (FICA_mc_trt + FICA_ss_trt)) line11 = max(0., AMEDT_ec[MARS - 1] - e00200) ptax_amc = AMEDT_rt * (max(0., e00200 - AMEDT_ec[MARS - 1]) + max(0., line8 - line11)) payrolltax += ptax_amc return (ptax_amc, payrolltax)
de0e35fbe5c7c09de384e1302cba082149ea5930
707,036
def get_parameter(dbutils, parameter_name: str, default_value='') -> str: """Creates a text widget and gets parameter value. If ran from ADF, the value is taken from there.""" dbutils.widgets.text(parameter_name, default_value) return dbutils.widgets.get(parameter_name)
cf8359e6acea68ea26e24cc656847e5560019bd1
707,039
def aic(llf, nobs, df_modelwc): """ Akaike information criterion Parameters ---------- llf : {float, array_like} value of the loglikelihood nobs : int number of observations df_modelwc : int number of parameters including constant Returns ------- aic : float information criterion References ---------- https://en.wikipedia.org/wiki/Akaike_information_criterion """ return -2.0 * llf + 2.0 * df_modelwc
3940c1c86325630248fdf4a50c2aa19b4f4df623
707,040
def packpeeklist1(n1, n2, n3, n4, n5): """ Packs and returns 5 item list """ listp = [n1, n2, n3, n4, n5] return listp
4b781ff3e8eb4a1bd51f8e834fab5462371a85c5
707,041
def gen_gap(Pn, T, Q): """Runs the generalization gap test. This test simply checks the difference between the likelihood assigned to the training set versus that assigned to a held out test set. Inputs: Pn: (n X d) np array containing the held out test sample of dimension d T: (l X d) np array containing the training sample of dimension d Q: trained model of type scipy.neighbors.KernelDensity Outputs: log_lik_gap: scalar representing the difference of the log likelihoods of Pn and T """ return Q.score(T) - Q.score(Pn)
d57d16c06d05cea86e6f6ea89484574f20500170
707,043
def create_slice_obj(start, end, step): """Create slice object""" return slice(start, end, step)
88a5c5a9e0d3b714b4316d8744fcdd1a34f347a7
707,047
def scalar(typename): """ Returns scalar type from ROS message data type, like "uint8" from "uint8[100]". Returns type unchanged if already a scalar. """ return typename[:typename.index("[")] if "[" in typename else typename
729fb68bced11e190b3d32d03bbadd921f191bee
707,048
from typing import Dict from typing import Type def remap_shared_output_descriptions(output_descriptions: Dict[str, str], outputs: Dict[str, Type]) -> Dict[str, str]: """ Deals with mixed styles of return value descriptions used in docstrings. If the docstring contains a single entry of return value description, that output description is shared by each output variable. :param output_descriptions: Dict of output variable names mapping to output description :param outputs: Interface outputs :return: Dict of output variable names mapping to shared output description """ # no need to remap if len(output_descriptions) != 1: return output_descriptions _, shared_description = next(iter(output_descriptions.items())) return {k: shared_description for k, _ in outputs.items()}
06d589016a747230f88aa3507bd751fd30095222
707,050
def fitarg_rename(fitarg, ren): """Rename variable names in ``fitarg`` with rename function. :: #simple renaming fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1}, lambda pname: 'y' if pname=='x' else pname) #{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1}, #prefixing figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1}, lambda pname: 'prefix_'+pname) #{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1} """ tmp = ren if isinstance(ren, str): ren = lambda x: tmp + '_' + x ret = {} prefix = ['limit_', 'fix_', 'error_', ] for k, v in fitarg.items(): vn = k pf = '' for p in prefix: if k.startswith(p): vn = k[len(p):] pf = p newvn = pf + ren(vn) ret[newvn] = v return ret
151233d0f18eaea564afbc6d600d576407504b35
707,051
def _get_matching_stream(smap, itag): """ Return the url and signature for a stream matching itag in smap. """ for x in smap: if x['itag'] == itag and x.get("s"): return x['url'], x['s'] raise IOError("Sorry this video is not currently supported by pafy")
dc83fd3207d5ab4e1c85eb719f5f7d023131565e
707,053
def linear_search(alist, key): """ Return index of key in alist . Return -1 if key not present.""" for i in range(len(alist)): if alist[i] == key: return i return -1
ab4c0517f9103a43509b0ba511c75fe03ea6e043
707,058
def extract_sigma_var_names(filename_nam): """ Parses a 'sigma.nam' file containing the variable names, and outputs a list of these names. Some vector components contain a semicolon in their name; if so, break the name at the semicolon and keep just the 1st part. """ var_names = [] with open(filename_nam, 'r') as file: for line in file: var_name = line.strip() # check for semicolon if ';' in var_name: var_name = var_name.split(';')[0] var_names.append(var_name) return var_names
930e855d47c4303cac28e9973982392489fb577d
707,060
from typing import Any def all_tasks_stopped(tasks_state: Any) -> bool: """ Checks if all tasks are stopped or if any are still running. Parameters --------- tasks_state: Any Task state dictionary object Returns -------- response: bool True if all tasks are stopped. """ for t in tasks_state["tasks"]: if t["lastStatus"] in ("PENDING", "RUNNING"): return False return True
98edffe71052cc114a7dda37a17b3a346ef59ef8
707,062
def dataclass_fields(dc): """Returns a dataclass's fields dictionary.""" return {name: getattr(dc, name) for name in dc.__dataclass_fields__}
4b82af3bfbc02f7bbfcf1aecb6f6501ef10d86e1
707,063
import pathlib def _suffix_directory(key: pathlib.Path): """Converts '/folder/.../folder/folder/folder' into 'folder/folder'""" key = pathlib.Path(key) shapenet_folder = key.parent.parent key = key.relative_to(shapenet_folder) return key
147539065c3d21ee351b23f2d563c662fe55f04a
707,078
def text_to_string(filename, useEncoding): """Read a text file and return a string.""" with open(filename, encoding=useEncoding, errors='ignore') as infile: return infile.read()
f879bb747699496204820b74944fd563658a7117
707,080
def iscomment(s): """ Define what we call a comment in MontePython chain files """ return s.startswith('#')
ab3a9d240e423c562c9e83cdd9599fddf144b7c3
707,081
def CommaSeparatedFloats(sFloatsCSV): """Read comma-separated floats from string. [sFloatsCSV]: string, contains comma-separated floats. <retval>: list, floats parsed from string. """ return [float(sFloat) for sFloat in sFloatsCSV.replace(" ","").split(",")]
1aa12ca7297aa3bd809f6d2ffaf155233a826b49
707,089
def topological_sort(g): """ Returns a list of vertices in directed acyclic graph g in topological order. """ ready = [] topo = [] in_count = {} for v in g.vertices(): in_count[v] = g.degree(v, outgoing=False) if in_count[v] == 0: # v has no constraints, i.e no incoming edges ready.append(v) while len(ready) > 0: u = ready.pop() topo.append(u) for e in g.incident_edges(u): v = e.opposite(u) in_count[v] -= 1 # v now no longer has u as a constraint if in_count[v] == 0: ready.append(v) return topo
5ac6261bf1b6fa92280abdc3fc95679ad9294e80
707,092
from typing import List from typing import Tuple def getElementByClass(className: str, fileName: str) -> List[Tuple[int, str]]: """Returns first matching tag from an HTML/XML document""" nonN: List[str] = [] with open(fileName, "r+") as f: html: List[str] = f.readlines() for line in html: nonN.append(line.replace("\n", "")) pattern: str = f'class="{className}"' patternAlt: str = f"class='{className}'" matches: List[Tuple[int, str]] = [] for line in nonN: if pattern in line or patternAlt in line: lineNo = nonN.index(line) + 1 matches.append((int(lineNo), line)) break return matches
969e4070e16dec2e10e26e97cbaaab9d95e7b904
707,095
import math def func (x): """ sinc (x) """ if x == 0: return 1.0 return math.sin (x) / x
c91242e360547107f7767e442f40f4bf3f2b53e8
707,100
import copy def db_entry_trim_empty_fields(entry): """ Remove empty fields from an internal-format entry dict """ entry_trim = copy.deepcopy(entry) # Make a copy to modify as needed for field in [ 'url', 'title', 'extended' ]: if field in entry: if (entry[field] is None) or \ (type(entry[field]) is str and len(entry[field]) == 0): del entry_trim[field] return entry_trim
d5b31c823f4e8091872f64445ab603bcbf6a2bef
707,102
def _is_install_requirement(requirement): """ return True iff setup should install requirement :param requirement: (str) line of requirements.txt file :return: (bool) """ return not (requirement.startswith('-e') or 'git+' in requirement)
339f6a8a573f33157a46193216e90d62475d2dea
707,104
def move_to_next_pixel(fdr, row, col): """ Given fdr (flow direction array), row (current row index), col (current col index). return the next downstream neighbor as row, col pair See How Flow Direction works http://desktop.arcgis.com/en/arcmap/latest/tools/spatial-analyst-toolbox/how-flow-direction-works.htm D8 flow direction grid | 32 | 64 | 128 | | 16 | X | 1 | | 8 | 4 | 2 | """ # get the fdr pixel value (x,y) value = fdr[row, col] # Update the row, col based on the flow direction if value == 1: col += 1 elif value == 2: col += 1 row += 1 elif value == 4: row += 1 elif value == 8: row += 1 col -= 1 elif value == 16: col -= 1 elif value == 32: row -= 1 col -= 1 elif value == 64: row -= 1 elif value == 128: row -= 1 col += 1 else: # Indetermine flow direction, sink. Do not move. row = row col = col return (row, col)
d134bb35ed4962945c86c0ac2c6af1aff5acd06b
707,105
from typing import Union def addition(a:Union[int, float], b:Union[int, float]) -> Union[int, float]: """ A simple addition function. Add `a` to `b`. """ calc = a + b return calc
b9adaf3bea178e23bd4c02bdda3f286b6ca8f3ab
707,107
import contextlib import sqlite3 def run_sql_command(query: str, database_file_path:str, unique_items=False) -> list: """ Returns the output of an SQL query performed on a specified SQLite database Parameters: query (str): An SQL query database_file_path (str): absolute path of the SQLite database file unique_items (bool): whether the function should return a list of items instead of a list of tuples with one value Returns: records (list): The output of the SQLite database """ with contextlib.closing(sqlite3.connect(database_file_path)) as conn: with conn: with contextlib.closing(conn.cursor()) as cursor: # auto-closes cursor.execute(query) records = cursor.fetchall() if unique_items: return [x[0] for x in records] return records
705584db31fd270d4127e7d1b371a24a8a9dd22e
707,110
def compact_float(n, max_decimals=None): """Reduce a float to a more compact value. Args: n: Floating point number. max_decimals: Maximum decimals to keep; defaults to None. Returns: An integer if `n` is essentially an integer, or a string representation of `n` reduced to `max_decimals` numbers after the decimal point. Otherwise, simply returns `n`. """ compact = n if float(n).is_integer(): compact = int(n) elif max_decimals is not None: compact = "{0:.{1}f}".format(n, max_decimals) return compact
827e49e05aaca31d497f84c2a8c8dd52cfad73d9
707,112
def arrToDict(arr): """ Turn an array into a dictionary where each value maps to '1' used for membership testing. """ return dict((x, 1) for x in arr)
3202aac9a6c091d7c98fd492489dbcf2300d3a02
707,118
def convert_to_dtype(data, dtype): """ A utility function converting xarray, pandas, or NumPy data to a given dtype. Parameters ---------- data: xarray.Dataset, xarray.DataArray, pandas.Series, pandas.DataFrame, or numpy.ndarray dtype: str or numpy.dtype A string denoting a Python datatype name (e.g. int, float) or a NumPy dtype (e.g. np.int16, np.float32) to convert the data to. """ if dtype is None: # Don't convert the data type. return data return data.astype(dtype)
ec3130311fe9c136707d5afb8f564b4f89067f4e
707,120
import re def match(text: str, pattern: str) -> bool: """ Match a text against a given regular expression. :param text: string to examine. :param pattern: regular expression. :returns: ``True`` if pattern matches the string. """ return re.match(pattern, text) is not None
a59d71283766c5079e8151e8be49501246218001
707,125
from typing import Tuple import math def euler_to_quaternion(roll: float = 0, pitch: float = 0, yaw: float = 0) -> Tuple[float, float, float, float]: """ Convert Euler to Quaternion Args: roll (float): roll angle in radian (x-axis) pitch (float): pitch angle in radian (y-axis) yaw (float): yaw angle in radian (z-axis) Returns: Tuple[float, float, float, float]: x, y, z, w """ # Abbreviations for the various angular functions cy = math.cos(yaw * 0.5) sy = math.sin(yaw * 0.5) cp = math.cos(pitch * 0.5) sp = math.sin(pitch * 0.5) cr = math.cos(roll * 0.5) sr = math.sin(roll * 0.5) # Quaternion w = cr * cp * cy + sr * sp * sy x = sr * cp * cy - cr * sp * sy y = cr * sp * cy + sr * cp * sy z = cr * cp * sy - sr * sp * cy return x, y, z, w
e8346172f07510c377e14827842eb18f1631402e
707,128
def table_exists(conn, table_name, schema=False): """Checks if a table exists. Parameters ---------- conn A Psycopg2 connection. table_name : str The table name. schema : str The schema to which the table belongs. """ cur = conn.cursor() table_exists_sql = ('select * from information_schema.tables ' f'where table_name={table_name!r}') if schema: table_exists_sql += f' and table_schema={schema!r}' cur.execute(table_exists_sql) return bool(cur.rowcount)
c9b698afbe795a6a73ddfb87b2725c3c4205f35e
707,132
def aggregate_by_player_id(statistics, playerid, fields): """ Inputs: statistics - List of batting statistics dictionaries playerid - Player ID field name fields - List of fields to aggregate Output: Returns a nested dictionary whose keys are player IDs and whose values are dictionaries of aggregated stats. Only the fields from the fields input will be aggregated in the aggregated stats dictionaries. """ players = {} # create nested dict with outer keys of player ids and inner dict of fields for dic in statistics: if dic[playerid] not in players: players[dic[playerid]] = {playerid: dic[playerid]} for field in fields: players[dic[playerid]][field] = 0 # loop through statistics again, incrementing field values for dic in statistics: for field in fields: players[dic[playerid]][field] += int(dic[field]) return players
c137fc8820f8898ebc63c54de03be5b919fed97a
707,133
import pickle def loadStatesFromFile(filename): """Loads a list of states from a file.""" try: with open(filename, 'rb') as inputfile: result = pickle.load(inputfile) except: result = [] return result
cc2f64a977ff030ec6af94d3601c094e14f5b584
707,134
import re def is_mismatch_before_n_flank_of_read(md, n): """ Returns True if there is a mismatch before the first n nucleotides of a read, or if there is a mismatch before the last n nucleotides of a read. :param md: string :param n: int :return is_mismatch: boolean """ is_mismatch = False flank_mm_regex = r"^(\d+).*[ACGT](\d+)$" flank_mm = re.findall(flank_mm_regex,md) if flank_mm: flank_mm = flank_mm[0] if flank_mm[1]: if int(flank_mm[1]) < n: is_mismatch = True if flank_mm[0]: if int(flank_mm[0]) < n: is_mismatch = True return is_mismatch
1e41c67e29687d93855ed212e2d9f683ef8a88d7
707,135
def _read_txt(file_path: str) -> str: """ Read specified file path's text. Parameters ---------- file_path : str Target file path to read. Returns ------- txt : str Read txt. """ with open(file_path) as f: txt: str = f.read() return txt
5f0657ee223ca9f8d96bb612e35304a405d2339e
707,137
def load_data(data_map,config,log): """Collect data locally and write to CSV. :param data_map: transform DataFrame map :param config: configurations :param log: logger object :return: None """ for key,df in data_map.items(): (df .coalesce(1) .write .csv(f'{config["output"]}/{key}', mode='overwrite', header=True)) return None
2b690c4f5970df7f9e98ce22970ce3eb892f15bc
707,139
def extract_first_value_in_quotes(line, quote_mark): """ Extracts first value in quotes (single or double) from a string. Line is left-stripped from whitespaces before extraction. :param line: string :param quote_mark: type of quotation mark: ' or " :return: Dict: 'value': extracted value; 'remainder': the remainder after extraction 'error' empty string if success or 'syntax' otherwise; """ line = line.lstrip() result = {'value': '', 'remainder': line, 'error': 'syntax'} if len(line) < 2: return result if line[0] != quote_mark: return result next_qm_pos = line.find(quote_mark, 1) if next_qm_pos == -1: return result result['value'] = line[1:next_qm_pos] result['remainder'] = line[next_qm_pos + 1:] result['error'] = '' return result
4f614cbbb3a1a04ece0b4da63ea18afb32c1c86b
707,141
def format_dependency(dependency: str) -> str: """Format the dependency for the table.""" return "[coverage]" if dependency == "coverage" else f"[{dependency}]"
981a38074dbfb1f332cc49bce2c6d408aad3e9e2
707,143
def _unpickle_injected_object(base_class, mixin_class, class_name=None): """ Callable for the pickler to unpickle objects of a dynamically created class based on the InjectableMixin. It creates the base object from the original base class and re-injects the mixin class when unpickling an object. :param type base_class: The base class of the pickled object before adding the mixin via injection. :param type mixin_class: The :class:`InjectableMixin` subclass that was injected into the pickled object. :param str class_name: The class name of the pickled object's dynamically created class. :return: The initial unpickled object (before the pickler restores the object's state). """ obj = base_class.__new__(base_class, ()) return mixin_class.inject_into_object(obj, class_name)
1821509506ad31dcdb21f07a2b83c544ff3c3eb3
707,148
import colorsys def hsl_to_rgb(hsl): """Convert hsl colorspace values to RGB.""" # Convert hsl to 0-1 ranges. h = hsl[0] / 359. s = hsl[1] / 100. l = hsl[2] / 100. hsl = (h, s, l) # returns numbers between 0 and 1 tmp = colorsys.hls_to_rgb(h, s, l) # convert to 0 to 255 r = int(round(tmp[0] * 255)) g = int(round(tmp[1] * 255)) b = int(round(tmp[2] * 255)) return (r, g, b)
4417ce8468e71b7139b57fe270809c7030b2c3df
707,151
import six def strip(val): """ Strip val, which may be str or iterable of str. For str input, returns stripped string, and for iterable input, returns list of str values without empty str (after strip) values. """ if isinstance(val, six.string_types): return val.strip() try: return list(filter(None, map(strip, val))) except TypeError: return val
893986e69f6d64167f45daf30dacb72f4b7f2bff
707,153
import math def tau_polinomyal_coefficients(z): """ Coefficients (z-dependent) for the log(tau) formula from Raiteri C.M., Villata M. & Navarro J.F., 1996, A&A 315, 105-115 """ log_z = math.log10(z) log_z_2 = log_z ** 2 a0 = 10.13 + 0.07547 * log_z - 0.008084 * log_z_2 a1 = -4.424 - 0.7939 * log_z - 0.1187 * log_z_2 a2 = 1.262 + 0.3385 * log_z + 0.05417 * log_z_2 return [a0, a1, a2]
ebef7d773eeb400ef87553fc5838ee2cb97d0669
707,154
def get_all_playlist_items(playlist_id, yt_client): """ Get a list of video ids of videos currently in playlist """ return yt_client.get_playlist_items(playlist_id)
c7a8cc806b552b1853eba1d8223aa00225d5539e
707,155
def get_library_isotopes(acelib_path): """ Returns the isotopes in the cross section library Parameters ---------- acelib_path : str Path to the cross section library (i.e. '/home/luke/xsdata/endfb7/sss_endfb7u.xsdata') Returns ------- iso_array: array array of isotopes in cross section library: """ lib_isos_list = [] with open(acelib_path, 'r') as f: lines = f.readlines() for line in lines: iso = line.split()[0] lib_isos_list.append(iso) return lib_isos_list
d93d319b84c02b8156c5bad0998f5943a5bbe8ae
707,156
import json def odict_to_json(odict): """ Dump an OrderedDict into JSON series """ json_series = json.dumps(odict) return json_series
d18a4e0f0d11a2c529edb395671052f15ad8071d
707,157
def encode_data(data): """ Helper that converts :class:`str` or :class:`bytes` to :class:`bytes`. :class:`str` are encoded with UTF-8. """ # Expect str or bytes, return bytes. if isinstance(data, str): return data.encode('utf-8') elif isinstance(data, bytes): return data else: raise TypeError("data must be bytes or str")
3cd54389719439e8f18cf02b110af07799c946b5
707,158
from typing import Any def safe_string(value: Any) -> str: """ Consistently converts a value to a string. :param value: The value to stringify. """ if isinstance(value, bytes): return value.decode() return str(value)
0ba8dcfe028ac6c45e0c17f9ba02014c2f746c4d
707,163
from typing import List def count_short_tail_keywords(keywords: List[str]) -> int: """ Returns the count of short tail keywords in a list of keywords. Parameters: keywords (List[str]): list with all keywords as strings. Returns: total (int): count of short tail keywords (1 o 2 words per keyword). """ total = 0 for keyword in keywords: keyword_list = keyword.split() if len(keyword_list) > 1 and len(keyword_list) < 3: total += 1 return total
1af42d71be75d9279584a8c3edc090a39ec6cf77
707,165
def is_odd(number): """Determine if a number is odd.""" if number % 2 == 0: return False else: return True
4efe5114f2e25431808492c768abc0f750e63225
707,166
def fmt_quil_str(raw_str): """Format a raw Quil program string Args: raw_str (str): Quil program typed in by user. Returns: str: The Quil program with leading/trailing whitespace trimmed. """ raw_quil_str = str(raw_str) raw_quil_str_arr = raw_quil_str.split('\n') trimmed_quil_str_arr = [qs.strip() for qs in raw_quil_str_arr] trimmed_quil_str = '\n'.join([x for x in trimmed_quil_str_arr]) return trimmed_quil_str
e95c26f3de32702d6e44dc09ebbd707da702d964
707,167
from typing import Optional from typing import Callable from typing import Literal def _not_json_encodable(message: str, failure_callback: Optional[Callable[[str], None]]) -> Literal[False]: """ Utility message to fail (return `False`) by first calling an optional failure callback. """ if failure_callback: failure_callback(message) return False
6979261a5f14a32c1ae34d01bad346344f38ed14
707,168
def bitwise_dot(x, y): """Compute the dot product of two integers bitwise.""" def bit_parity(i): n = bin(i).count("1") return int(n % 2) return bit_parity(x & y)
074b09a92e3e697eb08b8aaefa6ffd05d58698f4
707,169
def validate_mash(seq_list, metadata_reports, expected_species): """ Takes a species name as a string (i.e. 'Salmonella enterica') and creates a dictionary with keys for each Seq ID and boolean values if the value pulled from MASH_ReferenceGenome matches the string or not :param seq_list: List of OLC Seq IDs :param metadata_reports: Dictionary retrieved from get_combined_metadata() :param expected_species: String containing expected species :return: Dictionary with Seq IDs as keys and True/False as values """ seq_status = {} for seqid in seq_list: print('Validating MASH reference genome for {} '.format(seqid)) df = metadata_reports[seqid] observed_species = df.loc[df['SeqID'] == seqid]['MASH_ReferenceGenome'].values[0] if observed_species == expected_species: seq_status[seqid] = True else: seq_status[seqid] = False return seq_status
9eb4fd6e1f156a4fed3cc0be0c5b7153a05b038b
707,171
def redirect_to_url(url): """ Return a bcm dictionary with a command to redirect to 'url' """ return {'mode': 'redirect', 'url': url}
01e4deb80bbd8f8e119c99d64001866c6cd644d9
707,172
def sqrt(number): """ Calculate the floored square root of a number Args: number(int): Number to find the floored squared root Returns: (int): Floored Square Root """ assert number >= 0, 'Only square root of positive numbers are valid' start = 0 end = number res = None while start <= end: middle = (start + end) // 2 square = middle ** 2 next_square = (middle + 1) ** 2 if square <= number and next_square > number: res = middle break if square > number: end = middle - 1 else: start = middle + 1 return res
7ed4d547e0dbabebff7ffdf1e368817a415cbb9e
707,173
def sum_fn(xnum, ynum): """ A function which performs a sum """ return xnum + ynum
61a1ae2e4b54348b9e3839f7f2779edd03f181df
707,176
def matlabize(s): """Make string s suitable for use as a MATLAB function/script name""" s = s.replace(' ', '_') s = s.replace('.', '_') s = s.replace('-', '_') assert len(s) <= 63 # MATLAB function/script name length limitation return s
5dccb9497a3ee28dae5fb7de6e15a1fa02f144cf
707,177
def get_spec_res(z=2.2, spec_res=2.06, pix_size=1.8): """ Calculates the pixel size (pix_size) and spectral resolution (spec_res) in km/s for the MOCK SPECTRA. arguments: z, redshift. spec_res, spectral resoloution in Angst. pixel_size in sngst. returns: (pixel_size, spec_res) in km/s """ # conversion factor from Angstrom to km/s at any redshift conv_fac = 3e5*0.000823/(1+z) return(pix_size*conv_fac, spec_res*conv_fac)
597db8ce00c071624b0877fe211ab9b01ec889de
707,178
import platform def get_default_command() -> str: """get_default_command returns a command to execute the default output of g++ or clang++. The value is basically `./a.out`, but `.\a.exe` on Windows. The type of return values must be `str` and must not be `pathlib.Path`, because the strings `./a.out` and `a.out` are different as commands but same as a path. """ if platform.system() == 'Windows': return r'.\a.exe' return './a.out'
d06abdefab189f9c69cba70d9dab25ce83bebc75
707,182
def object_type(r_name): """ Derives an object type (i.e. ``user``) from a resource name (i.e. ``users``) :param r_name: Resource name, i.e. would be ``users`` for the resource index URL ``https://api.pagerduty.com/users`` :returns: The object type name; usually the ``type`` property of an instance of the given resource. :rtype: str """ if r_name.endswith('ies'): # Because English return r_name[:-3]+'y' else: return r_name.rstrip('s')
b74e373691edf8a8b78c2a3ff5d7b9666504330a
707,183
def convert_to_roman_numeral(number_to_convert): """ Converts Hindi/Arabic (decimal) integers to Roman Numerals. Args: param1: Hindi/Arabic (decimal) integer. Returns: Roman Numeral, or an empty string for zero. """ arabic_numbers = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1) roman_numerals = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I') result = "" for index, arabic_number in enumerate(arabic_numbers): count = int(number_to_convert / arabic_number) result += roman_numerals[index] * count number_to_convert -= arabic_numbers[index] * count return result
f970517a7c2d1ceb13ec025d6d446499ce5c21ff
707,184
import re def wikify(value): """Converts value to wikipedia "style" of URLS, removes non-word characters and converts spaces to hyphens and leaves case of value. """ value = re.sub(r'[^\w\s-]', '', value).strip() return re.sub(r'[-\s]+', '_', value)
dc4504ea6eb7905b5e18a1d1f473a4f337697b26
707,192
def _tolist(arg): """ Assure that *arg* is a list, e.g. if string or None are given. Parameters ---------- arg : Argument to make list Returns ------- list list(arg) Examples -------- >>> _tolist('string') ['string'] >>> _tolist([1,2,3]) [1, 2, 3] >>> _tolist(None) [None] """ if isinstance(arg, str): return [arg] try: return list(arg) except TypeError: return [arg]
e4293991eeb6d15470511281680af44353232c37
707,193
def ConvertToFloat(line, colnam_list): """ Convert some columns (in colnam_list) to float, and round by 3 decimal. :param line: a dictionary from DictReader. :param colnam_list: float columns :return: a new dictionary """ for name in colnam_list: line[name] = round(float(line[name]), 3) return line
e95fd6cfa9bb57060fdd835eea139fd9c67bc211
707,194
from typing import List import json def transform_application_assigned_users(json_app_data: str) -> List[str]: """ Transform application users data for graph consumption :param json_app_data: raw json application data :return: individual user id """ users: List[str] = [] app_data = json.loads(json_app_data) for user in app_data: users.append(user["id"]) return users
625c8f662b364bb3fe63bb26b06eaca57ae8be79
707,195
def get_day_suffix(day): """ Returns the suffix of the day, such as in 1st, 2nd, ... """ if day in (1, 21, 31): return 'st' elif day in (2, 12, 22): return 'nd' elif day in (3, 23): return 'rd' else: return 'th'
7d9277303357de5405b3f6894cda24726d60ad47
707,196
def power_law_at_2500(x, amp, slope, z): """ Power law model anchored at 2500 AA This model is defined for a spectral dispersion axis in Angstroem. :param x: Dispersion of the power law :type x: np.ndarray :param amp: Amplitude of the power law (at 2500 A) :type amp: float :param slope: Slope of the power law :type slope: float :param z: Redshift :type z: float :return: Power law model :rtype: np.ndarray """ return amp * (x / (2500. * (z+1.))) ** slope
508227f332f652d00c785074c20f9acefbce9258
707,202
def extract_vuln_id(input_string): """ Function to extract a vulnerability ID from a message """ if 'fp' in input_string.lower(): wordlist = input_string.split() vuln_id = wordlist[-1] return vuln_id else: return None
06673f2b401472185c8a3e6fc373d39c171791db
707,203
from typing import Any def _element( html_element: str, html_class: str, value: Any, is_visible: bool, **kwargs, ) -> dict: """ Template to return container with information for a <td></td> or <th></th> element. """ if "display_value" not in kwargs: kwargs["display_value"] = value return { "type": html_element, "value": value, "class": html_class, "is_visible": is_visible, **kwargs, }
4ce4d2ff9f547470d4a875508c40d3ae2a927ba0
707,205
def get_gene_summary(gene): """Gets gene summary from a model's gene.""" return { gene.id: { "name": gene.name, "is_functional": gene.functional, "reactions": [{rxn.id: rxn.name} for rxn in gene.reactions], "annotation": gene.annotation, "notes": gene.notes, } }
dd9cb3f8e9841a558898c67a16a02da1b39479d2
707,206
def tle_fmt_float(num,width=10): """ Return a left-aligned signed float string, with no leading zero left of the decimal """ digits = (width-2) ret = "{:<.{DIGITS}f}".format(num,DIGITS=digits) if ret.startswith("0."): return " " + ret[1:] if ret.startswith("-0."): return "-" + ret[2:]
686cb4061e5cf2ad620b85b0e66b96a8cd1c3abf
707,207
def is_vulgar(words, sentence): """Checks if a given line has any of the bad words from the bad words list.""" for word in words: if word in sentence: return 1 return 0
f8ff64f1d29313c145ebbff8fef01961e14cfd1f
707,209
import re def matchNoSpaces(value): """Match strings with no spaces.""" if re.search('\s', value): return False return True
6b33c6b500f78664c04ef8c507e9b25fa19c760d
707,211
def get_number(line, position): """Searches for the end of a number. Args: line (str): The line in which the number was found. position (int): The starting position of the number. Returns: str: The number found. int: The position after the number found. """ word = "" for pos, char in enumerate(line[position:]): if char.isdigit() or char == ".": word += char else: return word, position + pos return word, len(line)
df41a1b53953b912e5ce5d6d9b3d69c4133460f1
707,213
def levelize_smooth_or_improve_candidates(to_levelize, max_levels): """Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters ---------- to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- >>> from pyamg.util.utils import levelize_smooth_or_improve_candidates >>> improve_candidates = ['gauss_seidel', None] >>> levelize_smooth_or_improve_candidates(improve_candidates, 4) ['gauss_seidel', None, None, None] """ # handle default value (mutable) # improve_candidates=(('block_gauss_seidel', # {'sweep': 'symmetric', 'iterations': 4}), # None) # -> make it a list if isinstance(to_levelize, tuple): if isinstance(to_levelize[0], tuple): to_levelize = list(to_levelize) if isinstance(to_levelize, (str, tuple)): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
8b302b8cae04adae010607c394c2e5059aa46eeb
707,214
def get_max_num_context_features(model_config): """Returns maximum number of context features from a given config. Args: model_config: A model config file. Returns: An integer specifying the max number of context features if the model config contains context_config, None otherwise """ meta_architecture = model_config.WhichOneof("model") meta_architecture_config = getattr(model_config, meta_architecture) if hasattr(meta_architecture_config, "context_config"): return meta_architecture_config.context_config.max_num_context_features
1df5d220e30cfa5b440c0063149e2ebaf896352a
707,215
def parse_encoding_header(header): """ Break up the `HTTP_ACCEPT_ENCODING` header into a dict of the form, {'encoding-name':qvalue}. """ encodings = {'identity':1.0} for encoding in header.split(","): if(encoding.find(";") > -1): encoding, qvalue = encoding.split(";") encoding = encoding.strip() qvalue = qvalue.split('=', 1)[1] if(qvalue != ""): encodings[encoding] = float(qvalue) else: encodings[encoding] = 1 else: encodings[encoding] = 1 return encodings
0d423ad51ff14589b5858681cf32a0f318e6dbfa
707,217