content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def __api_reverse_suffix(path): """Return the normalized suffix of a url without any api information so that the correct version can be added.""" if path.startswith('/api/'): components = path.split('/', 3) if len(components) >= 4 and components[2].isdigit(): return '/' + components[3] else: return path[4:] else: return path
57534f79a8162769a6a236d5b4df307021d3b573
29,019
def GenerateBlock(group): """group is a list of (name, value) pairs in which all values have the same Most Significant Byte. Prefix the list with, and insert dummy (name, value) pairs so that value % 256 = position in the list for all values. Example input: [ ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("GL_BAZ", 0xcc05) ] And the result: [ ("", 0xcc00), ("", 0xcc01), ("GL_FOO", 0xcc02), ("GL_BAR", 0xcc03), ("", 0xcc004), ("GL_BAZ", 0xcc05) ] """ i = 0 block = [] for (name, value) in group: while i < value % 256: block.append(("", value)) i += 1 assert i == value % 256 block.append((name, value)) i += 1 return block
d22e6223ede2c60f16f9716aefe1fbc9ac928478
29,026
def get_fprimer_percent_aln(fprimer, percent_alignment): """ Gets the len of the fprimer and calculates minimum percent alignment based on user input of maximum alignment between 2 primers allowed. See percent alignment help for info. Args: fprimer (str): the forward primer Returns: fp_len (int): user defined total number of bases to allowed to match. """ fp_len = [] for fseq in fprimer: f_len = len(fseq) min_dimer_alignment = int(f_len*(percent_alignment/100)) fp_len.append(min_dimer_alignment) return fp_len
07316dabe0cfc12606bf6caddb8cee58a26534f1
29,028
def linearEOS(S,S0=1.0271,beta=1.0,RHO0=1000.0): """ Linear equation of state Returns density from salinity and/or temperature """ return RHO0 * ( beta * (S-S0) )
51b23beff235905f0aa107e148121860c70a5de8
29,029
from typing import Tuple def get_report_format_types() -> Tuple: """ Get all graph summary report formats supported by KGX. Returns ------- Tuple A tuple of supported file formats """ return 'yaml', 'json'
fd3626a21551734179eaeef5423267c302c31013
29,030
def dictdict_to_listdict(dictgraph): """Transforms a dict-dict graph representation into a adjacency dictionary representation (list-dict) :param dictgraph: dictionary mapping vertices to dictionary such that dictgraph[u][v] is weight of arc (u,v) :complexity: linear :returns: tuple with graph (listdict), name_to_node (dict), node_to_name (list) """ n = len(dictgraph) # vertices node_to_name = [name for name in dictgraph] # bijection indices <-> names node_to_name.sort() # to make it more readable name_to_node = {} for i in range(n): name_to_node[node_to_name[i]] = i sparse = [{} for _ in range(n)] # build sparse graph for u in dictgraph: for v in dictgraph[u]: sparse[name_to_node[u]][name_to_node[v]] = dictgraph[u][v] return sparse, name_to_node, node_to_name
4dbd8f57230ba1a94a3eb861dddb3b9f35b2a80e
29,032
import json def get_host_osd_map(cls): """ Method to get the OSDs deployed in each of the hosts Args: cls: cephadm instance object Returns: Dictionary with host names as keys and osds deployed as value list """ out, _ = cls.shell(args=["ceph", "osd", "tree", "-f", "json"]) osd_obj = json.loads(out) osd_dict = {} for obj in osd_obj["nodes"]: if obj["type"] == "host": osd_dict[obj["name"]] = obj["children"] return osd_dict
2ce441001814b1fbdd00b32255f3365ade8a5ad5
29,034
def bool_formatter(attr): """Format a boolean into a more readable format Args: attr (bool): a boolean attribute Returns: str: the string "True" or "False", depending of the boolean value of the given attribute. """ return str(attr)
b263320919a13d3f870b7474fbf1f371ef375fe2
29,037
import yaml def read_cfg(cfg_file): """ Read configurations from yaml file Args: cfg_file (.yaml): path to cfg yaml Returns: (dict): configuration in dict """ with open(cfg_file, 'r') as rf: cfg = yaml.safe_load(rf) return cfg
ee44f0f6310240ea5e5d56ebe2ce68fabeb9dcec
29,038
def intersection(bb1, bb2): """ Calculates the Intersection of two aabb's """ min_w = min(bb1[2], bb2[2]) min_h = min(bb1[3], bb2[3]) if bb1[0] < bb2[0]: leftbb, rightbb = bb1, bb2 else: leftbb, rightbb = bb2, bb1 if bb1[1] < bb2[1]: topbb, bottombb = bb1, bb2 else: topbb, bottombb = bb2, bb1 w = min(min_w, max(leftbb[0] + leftbb[2] - rightbb[0], 0)) h = min(min_h, max(topbb[1] + topbb[3] - bottombb[1], 0)) return w * h
368177cc00fcfff507198f39be3184fc2eed1855
29,040
def prds_worked_weekly_idx_rule(M): """ Index is (window, tour type, week) if more than one shift length in tour type. :param M: Model :return: Constraint index rule """ return [(i, t, w) for (i, t) in M.okTourType for w in M.WEEKS if len(M.tt_length_x[t]) > 1]
478182485e9bd199550172cf54856d5d867ad010
29,048
def ngramname(n): """Return the name of the nth n-gram""" ngrams = ['bigrams', 'trigrams'] return ngrams[n]
b8ecbb832319824ef85f49528f59791fa8ecec40
29,054
def cm2nm(E_cm): """Converts photon energy from absolute cm-1 to wavelength Parameters ---------- E_cm: float photon energy in cm-1 Returns ------- float Photon energy in nm Examples -------- >>> cm2nm(1e5) 100 """ return 1 / (E_cm * 1e-7)
ff7034356a42f01e1c876d0983e581b8c6860dd3
29,065
def render_chart(word_list): """ Renders a bar chart to the console. Each row of the chart contains the frequency of each letter in the word list. Returns: A dictionary whose keys are the letters and values are the freqency (N) of the letter. The value is a string containing the key repeated N times. For example in the string 'apple' the result would be like this: {"A": "a"}, {"E": "e"}, {"L": "l"}, {"P": "pp"} Although not shown above all keys are returned even if the frequency is zero. """ chart = {chr(n): "" for n in range(ord('A'), ord('Z') + 1)} for word in word_list: for letter in word: try: chart[letter.upper()] += letter.upper() except KeyError: continue return chart
7d004d7a3ca61151e9b58b68aa769fdb8c3ab98e
29,066
def clean_popest(table): """Cleans the population estimate data""" return table[["Code", "All ages"]].rename( columns={"Code": "geo_code", "All ages": "pop_2020"} )
ef0b7bb8d9a61709f03889833baba6e3b0ef7a00
29,067
def has_chr(s): """Returns whether the ``str`` starts with ``"chr"``.""" return s.startswith("chr")
43b6e00a3deefe9d88cb7c74413eeb94a3ec6856
29,068
def event_type(play_description): """ Returns the event type (ex: a SHOT or a GOAL...etc) given the event description :param play_description: description of play :return: event """ events = {'GOAL SCORED': 'GOAL', 'SHOT ON GOAL': 'SHOT', 'SHOT MISSED': 'MISS', 'SHOT BLOCKED': 'BLOCK', 'PENALTY': 'PENL', 'FACEOFF': 'FAC', 'HIT': 'HIT', 'TAKEAWAY': 'TAKE', 'GIVEAWAY': 'GIVE'} event = [events[e] for e in events.keys() if e in play_description] return event[0] if event else None
cefd67ae82a3e22a8f8598218887eb9c6a5ea06c
29,069
def cmr_filter_json(search_results, request_type="application/x-hdfeos"): """ Filter the CMR json response for desired data files Arguments --------- search_results: json response from CMR query Keyword arguments ----------------- request_type: data type for reducing CMR query Returns ------- producer_granule_ids: list of ICESat-2 granules granule_urls: list of ICESat-2 granule urls from NSIDC """ #-- output list of granule ids and urls producer_granule_ids = [] granule_urls = [] #-- check that there are urls for request if ('feed' not in search_results) or ('entry' not in search_results['feed']): return (producer_granule_ids,granule_urls) #-- iterate over references and get cmr location for entry in search_results['feed']['entry']: producer_granule_ids.append(entry['producer_granule_id']) for link in entry['links']: if (link['type'] == request_type): granule_urls.append(link['href']) break #-- return the list of urls and granule ids return (producer_granule_ids,granule_urls)
757953aabe2a83040f8e2e206396b80076288242
29,070
def rectAt(cen, size): """Returns a rectangle of the given `size` centered at the given location. The coordinates are inclusive of borders.""" x, y = cen[:2] w, h = size[:2] return [x-w//2, y-h//2, x-w//2+w-1, y-h//2+h-1]
98fff599be25853eeb488dac58a22ec2f7caaa66
29,074
import re def parse_show_snmp_system(raw_result): """ Parse the 'show snmp system' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show snmp system\ command in a dictionary of the form :: { 'System description' : 'OpenSwitchsystem 'System location' : 'Bangalore' 'System contact' : '[email protected]' } """ snmp_system_re = ( r'\s*SNMP\ssystem\sinformation\s*' r'\s*-*\s*' r'\s*System\sdescription\s\:\s*(?P<system_description>.+)' r'\s*System\slocation\s\:\s*(?P<system_location>.+)' r'\s*System\scontact\s\:\s*(?P<system_contact>.+)' ) re_result = re.match(snmp_system_re, raw_result) if re_result is None: return re_result result = re_result.groupdict() return result
3132ab88965bc04198a7d9c6deb290d4ab035280
29,076
def required_columns(row_to_add, wanted_keys): """ :param row_to_add: Contains the rows from the input file. :param wanted_keys: List of required column header names to be displayed at the end. :return: Dict of keys,values formatted data. """ required_keys = dict((k, row_to_add[k]) for k in wanted_keys if k in row_to_add) return required_keys
8643a592662939cf8b00f009c4dc3f87d1df4e6c
29,080
def _parse_line(s): """Parses a line of a requirements.txt file.""" requirement, *_ = s.split("#") return requirement.strip()
5c0c96898c288a7c358bf978a4415c17c7fb19c4
29,081
def get_cpgs_orig_methylated(df, cells_to_use): """ Get a list of cpg which where methylated to begin with using a specific set of cells :param df: The df to work on :type df: pd.DataFrame :param cells_to_use: A list of cells to use, should match the rows names of the df :return: The indexes of CpG to use """ df_orig = df.filter(items=cells_to_use, axis=0) mean_values = df_orig.mean() return mean_values
569b0dcf2befd521a5328108ec725f35fdb60c7a
29,084
import torch from typing import Tuple def calculate_gae( values: torch.Tensor, rewards: torch.Tensor, dones: torch.Tensor, next_values: torch.Tensor, gamma: float, lambd: float ) -> Tuple[torch.Tensor, torch.Tensor]: """ Calculate generalized advantage estimator Parameters ---------- values: torch.Tensor values of the states rewards: torch.Tensor rewards given by the reward function dones: torch.Tensor if this state is the end of the episode next_values: torch.Tensor values of the next states gamma: float discount factor lambd: float lambd factor Returns ------- advantages: torch.Tensor advantages gaes: torch.Tensor normalized gae """ # calculate TD errors deltas = rewards + gamma * next_values * (1 - dones) - values # initialize gae gaes = torch.empty_like(rewards) # calculate gae recursively from behind gaes[-1] = deltas[-1] for t in reversed(range(rewards.shape[0] - 1)): gaes[t] = deltas[t] + gamma * lambd * (1 - dones[t]) * gaes[t + 1] return gaes + values, (gaes - gaes.mean()) / (gaes.std() + 1e-8)
be8deb444fdc8c708deab9e638ac9a223f85aba6
29,085
def enclose_string(text): """enclose text with either double-quote or triple double-quote Parameters ---------- text (str): a text Returns ------- str: a new string with enclosed double-quote or triple double-quote """ text = str(text) fmt = '"""{}"""' if len(text.splitlines()) > 1 else '"{}"' enclosed_txt = fmt.format(text.replace('"', r'\"')) return enclosed_txt
c0ce87633da75b019f2b59fa561172e851283fa8
29,089
def summarise_app(app): """Return a string the summarises the important information about the app""" state = app["state"] if state != "Installed": state = state.upper() pin_status = "Pinned" if app["is_pinned"] else "NOT PINNED" known_status = "UNKNOWN" if app["application_version"]["is_unknown"] else "Known" return f"{state}, {pin_status}, {known_status}"
547cf213843e1aa635def247c23e6d353d1ceaaf
29,091
import math def getExpectationValue(bitScore: float, searchSequenceLength: int, foundSequenceLength: int, numberOfSequencesInDatabase: int) -> float: """ Returns the E-value of a single query in a sequence database. Parameters ---------- bitScore : float Comparison score, normalised to base 2 (bits). searchSequenceLength : int Length of the sequence that was the input of the query. foundSequenceLength : int Length of the sequence that was the result of the query. If you have several results, run this function for each of them individually. numberOfSequencesInDatabase : int Count of all sequences that could have potentially be found. For example, a search in all genes of eco would mean the count of all sequenced genes for eco -> numberOfSequencesInDatabase = 4,498. A search in all organisms, however, would mean the count of all sequenced genes in KEGG -> numberOfSequencesInDatabase = 25,632,969. Returns ------- float Statistical E-value (expectation value) for the occurence of a match of the same confidence with a totally unrelated, e.g. random, sequence. """ return numberOfSequencesInDatabase * searchSequenceLength * foundSequenceLength * math.pow(2, -bitScore)
01c8440c4be67ada93daf4ebbb519e9a550ae34a
29,095
from typing import Any def get_type_name(t: Any) -> str: """Find the name of a type passed. It will look in sequence for "__name__" and "_name" and if both fail it will take the str(t) Args: t: Any object Returns: String: string with the name of the type """ return getattr(t, '__name__', getattr(t, '_name', str(t)))
bfd81cc4cb93dc636e93c44f7cd94706724d64c0
29,096
def mi2km(mi): """ Converts to miles to kilometers. """ if mi == None: return None return mi * 1.60934
2000ad884b375c525da5d11a9b948354b170d59d
29,097
def construct_path(relation, start, end): """ Constructs a path between two actors using a dictionary of child-parent relationships. Returns a list with actor IDs. """ path = [start] while end != start: path.append(relation[start]) start = relation[start] path.reverse() return path
f1cfb7cd1544406a32c2934956020a4ab768a7e0
29,099
def strfdelta(time_delta, fmt): """ A way to convert time deltas to string formats easily. :param time_delta: timedelta object to convert :param fmt: string format to output :return: String form of timedelta """ d = {"d": time_delta.days} d["h"], rem = divmod(time_delta.seconds, 3600) d["m"], d["s"] = divmod(rem, 60) return fmt.format(**d)
e9b2711dc09e4f0b6087938e9790d11adb5908cb
29,100
def _sleep_time(iter): """Return the time-to-sleep for the n'th iteration of a retry loop. This implementation increases exponentially. :param iter: iteration number :returns: number of seconds to sleep """ if iter <= 1: return 1 return iter ** 2
6abd614bbabc872758049ea35d9ee0ebafd0f2ba
29,102
from typing import OrderedDict def tag_pairs(tags, index=False): """ Return an OrderedDict whose keys are pairs of tags in the format "tag1:tag2" and whose values are a tuple of the two tags used to construct each key, or a tuple of the indices of the two tags in the original tag list, if ``index`` is True. If ``index`` is a list, then it should be a list the same length as ``tags``, and the tuple is populated by indexing into ``index`` using the two indices of the tags in the original tag list. Arguments --------- tags : list of strings Map tags from which to construct cross-spectrum keys like "tag1:tag2". index : bool If True, make values in dictionary the indices of the map tags, rather than the tags themselves. Returns ------- pairs : OrderedDict Dictionary whose keys are pairs of tags in the format "tag1:tag2" and whose values are a tuple of the two tags used to construct the key, or their indices, if index=True. Example ------- >>> tags = ['a', 'b'] >>> tag_pairs(tags) OrderedDict([('a:a', ('a', 'a')), ('a:b', ('a', 'b')), ('b:b', ('b', 'b'))]) >>> tag_pairs(tags, index=True) OrderedDict([('a:a', (0, 0)), ('a:b', (0, 1)), ('b:b', (1, 1))]) >>> tag_pairs(tags, index=['c', 'd']) OrderedDict([('a:a', ('c', 'c')), ('a:b', ('c', 'd')), ('b:b', ('d', 'd'))]) """ pairs = OrderedDict() for it0, t0 in enumerate(tags): for it1, t1 in zip(range(it0, len(tags)), tags[it0:]): xname = "{}:{}".format(t0, t1) if isinstance(index, list): pair = (index[it0], index[it1]) elif index is True: pair = (it0, it1) else: pair = (t0, t1) pairs[xname] = pair return pairs
97ce0a2815b5542275eb5b15c6b3ff434c456a6e
29,103
def lily(the_note): """Sets Lilypond accidentals.""" s = the_note.note.lower() if the_note.accidental == '+': s += 'qs' elif the_note.accidental == '#': s += 's' elif the_note.accidental == '++': s += 'tqs' elif the_note.accidental == '-': s += 'qf' elif the_note.accidental == 'b': s += 'b' elif the_note.accidental == '--': s += 'tqf' else: pass return s
e04c14bb13d91ccec5a83de36436aec7d55a7e30
29,113
import ast def get_test_functions(filename): """ Returns a list of test functions. ie. [{'id': 'test_empty_array', 'line': 1}, ...] """ with open(filename) as f: read_data = f.read() module_ast = ast.parse(read_data) funcs = [] for stmt in module_ast.body: if isinstance(stmt, ast.ClassDef): for base in stmt.bases: if isinstance(base, ast.Attribute) and base.attr == 'TestCase' and isinstance(base.value, ast.Name) and (base.value.id == 'unittest' or base.value.id == 'asynctest'): for inner_stmt in stmt.body: if (isinstance(inner_stmt, ast.FunctionDef) or isinstance(inner_stmt, ast.AsyncFunctionDef)) and inner_stmt.name.startswith('test'): funcs.append({ 'id': inner_stmt.name, 'line': inner_stmt.lineno, }) return funcs
2936e3a1e6759837a3a543f62727fb26303cb758
29,114
def pretty_ti_txt(line): """Make given TI TXT line pretty by adding colors to it. """ if line.startswith('@'): line = '\033[0;33m' + line + '\033[0m (segment address)' elif line == 'q': line = '\033[0;35m' + line + '\033[0m (end of file)' else: line += ' (data)' return line
91fc1cfb5dba9467ca58da3154e7343d9845f44a
29,116
import networkx def draw_graph(g: networkx.Graph, fname: str): """Draw a graph using pygraphviz and return the AGraph object. Parameters ---------- g : A graph to draw. fname : The name of the file to write the graph to. Returns ------- : A graphviz graph object. """ g = g.copy() for node in g.nodes: if "\\" in g.nodes[node]["label"]: g.nodes[node]["label"] = g.nodes[node]["label"].replace("\\", "[backslash]") ag = networkx.nx_agraph.to_agraph(g) # Add some visual styles to the graph ag.node_attr["shape"] = "plaintext" ag.graph_attr["splines"] = True ag.graph_attr["rankdir"] = "TD" ag.draw(fname, prog="dot") return ag
761eeb2d44e41ecf704f4aa09eb5e30fbe665030
29,117
def sfc_sw(swup_sfc, swdn_sfc): """All-sky surface upward shortwave radiative flux.""" return swup_sfc - swdn_sfc
de51cf2b3ad410788e041117df01e19e959e3abe
29,119
def read_runlist(filepath): """Read a list of runs from a txt file Parameters ---------- filepath : `str` The input file with the list of runs. Each line should contain raft and run number, e.g., RTM-004-Dev 6106D Returns ------- outlist : `list` A list of tuples with (raft, run) """ fin = open(filepath) lin = fin.readline() outlist = [] while lin: tokens = lin.split() if len(tokens) == 2: outlist.append(tokens) lin = fin.readline() return outlist
de106013ef1cb1de32ed7120b2c5aeab2bf1aafb
29,120
import pathlib def is_pardir(pardir, subdir): """ Helper function to check if the given path is a parent of another. """ return pathlib.Path(pardir) in pathlib.Path(subdir).parents
c7b14ef578f24f2565ae2e88aff248eea3abf963
29,121
def energy(layer, data): """ Finds total E deposited in a given layer for each event. """ return data[layer].sum(axis=(1, 2))
d8f73ac1b9fb9a03fdb89420f434a03306872d8a
29,127
import six def _get_query_dict(**kwargs): """Returns query dict by kwargs. Skip None-values, but keeps False etc. >>> res = _get_query_dict(url=None, test='a', page=True, full=False) >>> res == {'test': 'a', 'page': True, 'full': False} True """ def __get_quey_dict(**kwargs): for k, v in six.iteritems(kwargs): if v is not None: yield k, v return dict(__get_quey_dict(**kwargs))
6d4ba22bb82853356765498f45bd7fda82000921
29,130
import time def time_strptime(*args, **kwargs): """Version of time.strptime that always uses the C locale. This is because date strings are used internally in the database, and should not be localized. """ return time.strptime(*args, **kwargs)
5410a4b4154471834f70818bf2a0a2356bdc25dd
29,135
def prepare_text(text): """Remove unnecessary spaces and lowercase the provided string""" return text.strip().lower()
337859656670fa9bcd7b4296318ea9a3c4b730f1
29,139
def z_minus_its_reciprocal(z: complex) -> complex: """ The function z - 1/z. I define this function explicitly because it plays an important role in the model. Furthermore, we also want to plot it and test it. Note: I do not handle the case of zero (or infinite) argument here. Args: z (complex): a nonzero complex number Returns: complex: The value of z - 1/z. """ return z - (1 / z)
46de97ee9c628faf5fe626108af801827e2dfc8f
29,145
from typing import List def split_str(data_str: str) -> List[str]: """ Split a string into a list of strings. The string will be split at ',' or '\n' with '\n' taking precedence. :param data_str: A string to be split. :return: List[str] """ if '\n' in data_str: return list(str(data_str).splitlines()) elif ',' in data_str: return list(str(data_str).split(',')) else: i = list() i.append(data_str) return i
450b301f2b696e2240f9bb8fe49def815c59478d
29,147
def tokenization(tweet): """ DESCRIPTION: Tokenizes a tweet into words INPUT: tweet: a tweet as a python string OUTPUT: list of tweet's tokens (words) """ return list(tweet.split())
60c59fdbe775ea8178b3f2c4de6657e3fedd776a
29,155
import re def _clean_multirc_inputs(dataset_name, text): """Removes HTML markup from Multi-RC task input text.""" if dataset_name == "super_glue/multirc": # Remove HTML markup. text = re.sub(r"<br>", " ", text.decode("utf-8")) text = re.sub(r"<(/)?b>", " ", text) return text
71d0999ddb9a3942e6d53473c534d55924a0aaa1
29,165
def is_complex_parsing_required(value): """ Determine if the string being parsed requires complex parsing. Currently, this is solely determined by the presence of a colon (:). Args: value (str): A string that will be parsed. Returns: bool: Flag value to indicate whether the string requires complex parsing. """ return ":" in value
b5b13eb6f8a28d69a2a069fa8228ace0e842873b
29,166
import inspect def ismethod(func): """ Is func a method? Note that this has to work as the method is defined but before the class is defined. At this stage methods look like functions. """ signature = inspect.signature(func) return signature.parameters.get('self', None) is not None
bbc971ae9ccde0c44e12e89027cd7180bfeac178
29,171
def rolling_mean(ts, window): """Calculate rolling mean of time series. Uses pandas.DataFrame.rolling() to calculate rolling mean of a given window size. If more than one column of data in ts, returns rolling mean using given window size for each column of data. Returns nans for times before first window. :param ts: Time series data as a pandas DataFrame. :param window: Window size over which to calculate mean (int). :return ts_std: DataFrame with same columns as ts but with rolling mean in place of data column. """ ts_mean = ts.rolling(window).mean() ts_mean["time"] = ts["time"] # don't want mean of time! return ts_mean
8cd2933b1a9c285666a62a5edacae8477dec1d3d
29,173
def X1X2_to_Xs(X1, X2): """Convert dimensionless spins X1, X2 to symmetric spin Xs""" return (X1+X2)/2.
9938f188d766d7895986b8796bb8eeeafe3b5b7d
29,174
def repeat(l, n): """ Repeat all items in list n times repeat([1,2,3], 2) => [1,1,2,2,3,3] http://stackoverflow.com/questions/24225072/repeating-elements-of-a-list-n-times """ return [x for x in l for i in range(n)]
0a27596da9ef804a8a5badc0a0111b56c937aa35
29,178
from typing import IO import requests def download_file(url: str) -> IO: """Download a remote file. Parameters ---------- url: string Request URL. Returns ------- io.BytesIO """ r = requests.get(url, stream=True) r.raise_for_status() return r.raw
85c1e885573bee2619a473a2d9954781d46d3f9d
29,182
def get_filename(name): """Return filename for astrocats event.""" return name.replace('/', '_') + '.json'
e12598b6ca16299fd939bd3cfb619882bb50145c
29,194
def read_hex_digit(char: str) -> int: """Read a hexadecimal character and returns its positive integer value (0-15). '0' becomes 0, '9' becomes 9 'A' becomes 10, 'F' becomes 15 'a' becomes 10, 'f' becomes 15 Returns -1 if the provided character code was not a valid hexadecimal digit. """ if "0" <= char <= "9": return ord(char) - 48 elif "A" <= char <= "F": return ord(char) - 55 elif "a" <= char <= "f": return ord(char) - 87 return -1
071445b3c0ec7a5392a7b5b0b354b4d3c59a3687
29,195
import base64 def rest_md5_to_proto(md5): """Convert the REST representation of MD5 hashes to the proto representation.""" return base64.b64decode(md5)
64889241492ea5265c50b4ef95d55e864544d904
29,197
from pathlib import Path def get_timestep(path): """ Get the timestep from a filename. """ path = Path(path) fname = path.stem tstep = fname.split('.')[-1] return int(tstep)
f5eb746e06a6411008e609333cef8feb3250077c
29,199
import itertools def largest_group(iterable, key): """ Find a group of largest elements (according to ``key``). >>> s = [-4, 3, 5, 7, 4, -7] >>> largest_group(s, abs) [7, -7] """ it1, it2 = itertools.tee(iterable) max_key = max(map(key, it1)) return [el for el in it2 if key(el) == max_key]
3b3ead0361f3c1cc94bad8e8fdf3bddd73c36bb7
29,204
import jinja2 def render_template(env: jinja2.Environment, template: str, **context): """Render the given template with an additional context being made available in it.""" return env.get_template(template).render(**context)
99c71c306ee088e70a5168b4237709c4c71e8e80
29,205
def remove_non_ascii(text: str) -> str: """ Removes non ascii characters :param text: Text to be cleaned :return: Clean text """ return ''.join(char for char in text if ord(char) < 128)
94a003856809eb740b5c85af094095acb2e07dad
29,211
def group_by_keys(dict_list, keys): """ >>> data = [ ... {'a': 1, 'b': 2}, ... {'a': 1, 'b': 3} ... ] >>> group_by_keys(data, ['a', 'b']) {(1, 2): [{'a': 1, 'b': 2}], (1, 3): [{'a': 1, 'b': 3}]} """ groups = {} for d in dict_list: value = tuple((d[k] for k in keys)) groups.setdefault(value, []) groups[value].append(d) return groups
425d223eff828e24ebdab4900c0c461868228eb4
29,213
import functools import ctypes def hash_code(text: str) -> int: """Implements Java's hashCode in Python. Ref: https://stackoverflow.com/a/8831937 """ return functools.reduce(lambda code, char: ctypes.c_int32(31 * code + ord(char)).value, list(text), 0)
eadda940e3b2d63b8ff83f74816024160334288f
29,220
def ask_yes_no(prompt="[y/n] :", default="y", valid=["y", "n"]): """Display a yes/no question and loop until a valid answer is entered Keyword Arguments: prompt {str} -- the question message (default: {'[y/n] :'}) default {str} -- the default answer if there is no answer (default: {"y"}) valid {list} -- the list of appropriate answers (default: {["y", "n"]}) Returns: str -- the answer """ answer = input(prompt) if answer in valid: return answer elif answer == "": return default else: return ask_yes_no(prompt, default, valid)
f46dcd6ed7fefcb38c4bc5307b49cf59e0c813b4
29,221
def _process_line(request: dict, cost_price_delta: int): """The function that builds out the report line by line""" asset = request["asset"] created = request["created"] try: qty = int(asset["items"][0]["quantity"]) # [0] to filter out irrelevant skus except IndexError: # to handle some older requests without items qty = 0 return (asset["id"], asset["tiers"]["customer"]["id"], qty, cost_price_delta * qty, created)
f650dd5cc7592f700e9b3aa9824b8d79679e4bbb
29,222
import socket def iptoint(ip_v4: str) -> int: """ Convert an ip address to an integer. Adopted from http://goo.gl/AnSFV :param ip_v4: IPv4 address :returns: int of IPv4 hex """ return int(socket.inet_aton(ip_v4).hex(), 16)
bcdad9f575ee4bb33bb4716f95eb8a4124824fe7
29,223
import struct def long_long_int(value): """Decode a long-long integer value :param bytes value: Value to decode :return tuple: bytes used, int :raises: ValueError """ try: return 8, struct.unpack('>q', value[0:8])[0] except TypeError: raise ValueError('Could not unpack data')
76e36535f382eb6e143b0d1f2f17102a8a9b886f
29,225
import re def regex(pattern): """ Compile a case-insensitive pattern. """ return re.compile(pattern, re.I)
b30c594fc9d4134bf464d010d085bf8eb59157fc
29,226
import ast def make_attr_call(attr1, attr2, args=None): """ flor._attr1_._attr2_(arg) """ if args is None: return ast.Call( func=ast.Attribute( value=ast.Attribute( value=ast.Name('flor', ast.Load()), attr=attr1, ctx=ast.Load() ), attr=attr2, ctx=ast.Load() ), args=[], # arg is None keywords=[] ) else: return ast.Call( func=ast.Attribute( value=ast.Attribute( value=ast.Name('flor', ast.Load()), attr=attr1, ctx=ast.Load() ), attr=attr2, ctx=ast.Load() ), args=args, # arg is not None keywords=[] )
1b2b839ab8e76730d33405782283b2fbc6e326d3
29,229
def formation_temperature(surface_temperature, gradient, depth): """ Calculates formation temperature based on a gradient. Parameters ---------- surface_temperature : float Surface temperature (deg F or deg C) gradient : float Temperature gradient (degF/ft or degC/m) depth : float Depth at which temperature is required (ft or m) Returns ------- float Returns formation temperature at a entered depth """ form_temp = surface_temperature + gradient * depth return form_temp
13b55f67810775cbdd531036bb40780a4138af0a
29,232
def fn_url_p(fn): """check if fn is a url""" url_sw = ['http://', 'https://', 'ftp://', 'ftps://'] for u in url_sw: try: if fn.startswith(u): return(True) except: return(False) return(False)
0d56366d055b985bb819e0516c63acd117f371fd
29,234
import pickle import base64 def ObjectFromBase64EncodedString(EncodedObject): """Generate Python object from a bas64 encoded and pickled object string. Arguments: str: Base64 encoded and pickled object string. Returns: object : Python object or None. """ return None if EncodedObject is None else pickle.loads(base64.b64decode(EncodedObject))
ff82b5e3a130e563a11ed7ccfa4ce80b257b4ada
29,235
def insertion_sort(A): """Sort list of comparable elements into nondecreasing order.""" for i in range(1, len(A)): value = A[i] hole = i while hole > 0 and A[hole-1] > value: A[hole] = A[hole-1] hole -= 1 A[hole] = value return A
467f22572c775472d018f234be51d6ffb8effde2
29,237
def all_awards_are_reviewed(request): """ checks if all tender awards are reviewed """ return all([award.status != "pending" for award in request.validated["tender"].awards])
325fb138db00b8696fa424b2a4c95e1378ddd667
29,241
def _knapsack(weights, capacity): """ Binary knapsack solver with identical profits of weight 1. Args: weights (list) : list of integers capacity (int) : maximum capacity Returns: (int) : maximum number of objects """ n = len(weights) # sol : [items, remaining capacity] sol = [[0] * (capacity + 1) for i in range(n)] added = [[False] * (capacity + 1) for i in range(n)] for i in range(n): for j in range(capacity + 1): if weights[i] > j: sol[i][j] = sol[i - 1][j] else: sol_add = 1 + sol[i - 1][j - weights[i]] if sol_add > sol[i - 1][j]: sol[i][j] = sol_add added[i][j] = True else: sol[i][j] = sol[i - 1][j] return sol[n - 1][capacity]
158e96376bc3e7a60bbb24111b4b26636aaa86d5
29,242
def get_layer_save_path(layer): """Get custom HoudiniLayerInfo->HoudiniSavePath from SdfLayer. Args: layer (pxr.Sdf.Layer): The Layer to retrieve the save pah data from. Returns: str or None: Path to save to when data exists. """ hou_layer_info = layer.rootPrims.get("HoudiniLayerInfo") if not hou_layer_info: return save_path = hou_layer_info.customData.get("HoudiniSavePath", None) if save_path: # Unfortunately this doesn't actually resolve the full absolute path return layer.ComputeAbsolutePath(save_path)
16e20d0bcedf9717bb60af400548308ff05b0570
29,245
from typing import List from typing import Any from typing import Iterator from typing import Tuple import itertools def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]: """ Iterates over neighbors in a list. s -> (s0,s1), (s1,s2), (s2, s3), ... From https://stackoverflow.com/a/5434936 """ a, b = itertools.tee(s) next(b, None) return zip(a, b)
58bbd84005b8caef2535cf3477f8745d511b5a0b
29,249
def _znode_to_class_and_name(znode): """ Local helper function that takes a full znode path that returns it in the service_class/service_name format expected in this module """ znode = znode.split("/") znode.pop(0) return (znode[0], znode[1])
7039b35227d967978c073a41fa044a75c4a1670d
29,258
def meta_REstring(REstr): # generic """ get meta information of the RapidEye file name Parameters ---------- REstr : string filename of the rapideye data Returns ------- REtime : string date "+YYYY-MM-DD" REtile : string tile code "TXXXXX" REsat : string which RapedEye satellite Example ------- >>> REstr = '568117_2012-09-10_RE2_3A_Analytic.tif' >>> REtime, REtile, REsat = meta_REstring(REstr) >>> REtime '+2012-09-10' >>> REtile '568117' >>> REsat 'RE2' """ assert type(REstr)==str, ("please provide a string") REsplit = REstr.split('_') REtime = '+' + REsplit[1][0:4] +'-'+ REsplit[1][5:7] +'-'+ REsplit[1][8:10] REsat = REsplit[2] REtile = REsplit[0] return REtime, REtile, REsat
d325034dd8dc7e7033387e22796b1449ba14d4dc
29,265
def rest(s): """Return all elements in a sequence after the first""" return s[1:]
268d56ff3a24b3a5c9b1e4b1d1ded557afcdce8c
29,266
def get_title_from_vuln(vuln): """ Returns CVE ID from vulnerability document When there is no title, returns "No-title" string """ return vuln.get('title', "No-Title")
3c81cd0a873015d8e3d5a96819a7608dbfd8330f
29,272
import json from datetime import datetime def get_user_insert(user, event): """ Gets all insertion data for a single user Parameters ---------- user: dict Dictionary object of a Twitter user event: str Event name of query the user was retrieved from Returns ------- user_insert: dict Dictionary of values extracted and formatted for insertion into a PostgreSQL database """ # Description hashtags try: hashtags = [hashtag_info['tag'].replace('\x00', '') for hashtag_info in user['entities']['description']['hashtags']] except KeyError: hashtags = None # Description mentions try: mentions = [mention_info['tag'] for mention_info in user['entities']['description']['mentions']] except KeyError: mentions = None # Description URLs try: urls = user['entities']['description']['urls'] urls = [json.dumps(url) for url in urls] except KeyError: urls = None # Profile URL try: url = user['entities']['url']['urls'][0]['expanded_url'] except (KeyError, IndexError): url = None now = datetime.now() user_insert = { 'id': user['id'], 'event': event, 'inserted_at': now, 'last_updated_at': now, 'created_at': user['created_at'], 'followers_count': user['public_metrics']['followers_count'], 'following_count': user['public_metrics']['following_count'], 'tweet_count': user['public_metrics']['tweet_count'], 'url': url, 'profile_image_url': user['profile_image_url'], 'description_urls': urls, 'description_hashtags': hashtags, 'description_mentions': mentions, 'verified': user['verified'] } for f in ['description', 'location', 'pinned_tweet_id', 'name', 'username']: try: user_insert[f] = user[f].replace('\x00', '') except KeyError: user_insert[f] = None return user_insert
8c9a269fb7fd349d26899f63af5f8415b7d86210
29,273
def parse_number_input(user_input): """Converts a string of space-separated numbers to an array of numbers.""" lst_str = user_input.strip().split(' ') return list(map(int, lst_str))
5e03156c79814a7916d78203749cfd82ab6b98d5
29,275
def _is_finite(constraints): """ Return ``True`` if the dictionary ``constraints`` corresponds to a finite collection of ordered multiset partitions into sets. If either ``weight`` or ``size`` is among the constraints, then the constraints represent a finite collection of ordered multiset partitions into sets. If both are absent, one needs ``alphabet`` to be present (plus a bound on length or order) in order to have a finite collection of ordered multiset partitions into sets. EXAMPLES:: sage: from sage.combinat.multiset_partition_into_sets_ordered import _is_finite sage: W = {"weight": {1:3, 2:3, 4:1}, "length": 5} sage: S = {"size": 44, "min_length": 5} sage: AO = {"alphabet": range(44), "max_order": 5} sage: all(_is_finite(constr) for constr in (W, S, AO)) True sage: AL = {"alphabet": range(44), "min_order": 5} sage: _is_finite(AL) False """ if "weight" in constraints or "size" in constraints: return True elif "alphabet" in constraints: # Assume the alphabet is finite Bounds = set(["length", "max_length", "order", "max_order"]) return Bounds.intersection(set(constraints)) != set()
5802604f8a338b8e0c7b5a99e63661637350371f
29,277
import math def atanh(x): """Get atanh(x)""" return math.atanh(x)
b721fd642ac99dd7e790db4baa73792333d7af7c
29,278
def get_strata(creel_run): """Given a creel_run, return a list of tuples that represent the rows in the Strata table - each row contains the strata label, and foreign keys to the corresponding creel_run, season, space, daytype, period and fishing mode. Arguments: - `creel_run`: An FN011 creel_run object. """ all_strata = [] modes = creel_run.creel.modes.all() spots = creel_run.creel.spatial_strata.all() seasons = creel_run.creel.seasons.all() for season in seasons: daytypes = season.daytypes.all() for spot in spots: for daytype in daytypes: periods = daytype.periods.all() for period in periods: for mode in modes: strata = "{}_{}{}_{}_{}".format( season.ssn, daytype.dtp, period.prd, spot.space, mode.mode ) all_strata.append( ( creel_run.id, strata, season.id, spot.id, daytype.id, period.id, mode.id, ) ) return all_strata
845295eb27b1951e8cc90971419583fc41231b0f
29,279
from datetime import datetime from dateutil.tz import tzlocal import pytz def timestamp_to_datetime(timestamp): """Converts timestamp to datetime string.""" timeUTC = datetime.utcfromtimestamp(timestamp) timeLocal = pytz.utc.localize(timeUTC).astimezone(tzlocal()) return timeLocal.strftime("%Y-%m-%d %H:%M:%S.%f%z")
59f410f72beced48792fb4c40796da7393285c28
29,280
import json def _pretty_json(data): """ Pretty string of JSON data :param data: JSON data :return: Pretty string :rtype: str """ return json.dumps(data, sort_keys=True, indent=2)
5bc0bed881bbacd89caa320d47b81c71e2377211
29,282
def _get_harvester_connection_msg( farmer_id: str, harvester_id: str, ip_address: str, is_connected: bool, ) -> str: """Get the connection msg for a harvester Parameters ---------- farmer_id : str id of the farmer harvester_id : str id of the harvester ip_address : str harvester ip address is_connected: bool connection status Returns ------- msg : str harvester connection message """ # send message icon = "🟢" if is_connected else "🟠" connection_status = "connected" if is_connected else "disconnected" CONNECTION_MSG: str = "{icon} Farmer {farmer_id} {status} to Harvester {harvester_id} ({ip})." msg = CONNECTION_MSG.format( icon=icon, harvester_id=harvester_id[:10], ip=ip_address, status=connection_status, farmer_id=farmer_id, ) return msg
0e32ca3c4463d802a1c719441ae9bf6dabd5cb8d
29,287
def merge_pixel_values(meta_df, l_28_28_df, rgb_28_28_df): """ Merges metadata dataframe with RGB and luminance pixel values dfs Parameters ---------- meta_df metadata dataframe l_28_28_df 28 X 28 luminance dataframe rgb_28_28_df 28 X 28 RGB dataframe Returns ------- pandas.core.frame.DataFrame Merged dataframe """ # Add suffix to names to ensure they are unique after merge l_28_28_df.columns = [str(col) + '_l_28_28' for col in l_28_28_df.columns] rgb_28_28_df.columns = [str(col) + '_rgb_28_28' for col in rgb_28_28_df.columns] # Merge first with luminance then rgb using append with transpose # Transpose makes sure axis direction is correct merged_df_l = (l_28_28_df.T.append(meta_df.T, sort=False)).T merged_df_l_rgb = (rgb_28_28_df.T.append(merged_df_l.T, sort=False)).T return(merged_df_l_rgb)
1a12d6e3f84a80eeaf99125e4e144c2a8cd9e08c
29,290
def get_snapshots_list(response, is_aurora): """ Simplifies list of snapshots by retaining snapshot name and creation time only :param response: dict Output from describe_db_snapshots or describe_db_cluster_snapshots :param is_aurora: bool True if output if from describe_db_cluster_snapshots, False otherwise :return: Dict with snapshot id as key and snapshot creation time as value """ snapshots = {} response_list_key = "DBClusterSnapshots" if is_aurora else "DBSnapshots" identifier_list_key = "DBClusterSnapshotIdentifier" if is_aurora else "DBSnapshotIdentifier" for snapshot in response[response_list_key]: if snapshot["Status"] != "available": continue snapshots[snapshot[identifier_list_key]] = snapshot["SnapshotCreateTime"] return snapshots
349657e7face0287f5b0f5f0e02b8c5067acf53f
29,293
def assign_params(keys_to_ignore=None, values_to_ignore=None, **kwargs): """Creates a dictionary from given kwargs without empty values. empty values are: None, '', [], {}, () ` Examples: >>> assign_params(a='1', b=True, c=None, d='') {'a': '1', 'b': True} >>> since_time = 'timestamp' >>> assign_params(values_to_ignore=(15, ), sinceTime=since_time, b=15) {'sinceTime': 'timestamp'} >>> item_id = '1236654' >>> assign_params(keys_to_ignore=['rnd'], ID=item_id, rnd=15) {'ID': '1236654'} :type keys_to_ignore: ``tuple`` or ``list`` :param keys_to_ignore: Keys to ignore if exists :type values_to_ignore: ``tuple`` or ``list`` :param values_to_ignore: Values to ignore if exists :type kwargs: ``kwargs`` :param kwargs: kwargs to filter :return: dict without empty values :rtype: ``dict`` """ if values_to_ignore is None: values_to_ignore = (None, '', [], {}, ()) if keys_to_ignore is None: keys_to_ignore = tuple() return { key: value for key, value in kwargs.items() if value not in values_to_ignore and key not in keys_to_ignore }
e6bc55c91a1670d2dc2eb5ce998a55d78f342dd2
29,294
import string def file_name_for_term(term): """Return a valid filename that corresponds to an arbitrary term string.""" valid_characters = '-_' + string.ascii_letters + string.digits no_space = term.replace(' ', '_') return ''.join(c for c in no_space if c in valid_characters) + '.txt'
0a23bfca56310810ffe1d6e414eaf9237e9a9be4
29,296
from typing import Iterable from typing import List import re def split(delimiters: Iterable[str], s: str, maxsplit: int = 0) -> List[str]: """Split the string over an iterable of delimiters. Based on https://stackoverflow.com/a/13184791 """ pattern = "|".join(map(re.escape, delimiters)) return re.split(pattern, s, maxsplit)
42ab4216b6c24e28fb98970b85e7492612ac1a21
29,297
def remove_user_from_group(db, user_id, group_id): """Removes user from user group. Args: db (object): The db object user_id (int): User ID group_id (int): Group ID Returns: A boolean value indicating whether the given user was removed from the given group """ db.execute("""DELETE FROM user_group_has_user WHERE user_id = ? AND user_group_id=?""", (user_id, group_id,)) return db.rows_affected != 0
94a02c44878a7fe2f4f162b943ebf4f2b0254403
29,302
def _add_reciprocal_relations(triples_df): """Add reciprocal relations to the triples Parameters ---------- triples_df : Dataframe Dataframe of triples Returns ------- triples_df : Dataframe Dataframe of triples and their reciprocals """ # create a copy of the original triples to add reciprocal relations df_reciprocal = triples_df.copy() # swap subjects and objects cols = list(df_reciprocal.columns) cols[0], cols[2] = cols[2], cols[0] df_reciprocal.columns = cols # add reciprocal relations df_reciprocal.iloc[:, 1] = df_reciprocal.iloc[:, 1] + "_reciprocal" # append to original triples triples_df = triples_df.append(df_reciprocal) return triples_df
8ca96fc2162d80041c21db8e6b81718781784ffe
29,307
import binascii def adjust_get_sends_results(query_result): """Format the memo_hex field. Try and decode the memo from a utf-8 uncoded string. Invalid utf-8 strings return an empty memo.""" filtered_results = [] for send_row in list(query_result): try: if send_row['memo'] is None: send_row['memo_hex'] = None send_row['memo'] = None else: send_row['memo_hex'] = binascii.hexlify(send_row['memo']).decode('utf8') send_row['memo'] = send_row['memo'].decode('utf-8') except UnicodeDecodeError: send_row['memo'] = '' filtered_results.append(send_row) return filtered_results
7d2e6cb1b1e5781123fbfd0d953b9c22ffea1a37
29,309
def calc_ema(smoothed, new_data, N=22): """ INPUT: smoothed (Series) - Last record's smoothed EMA value (a copy) new_data (Series) - New values, prepended with last value N (int) - Number of time periods for smoothing OUTPUT: Exponential Moving Average as a pandas Series """ K = 2/(N + 1) # coefficient for incoming datum J = 1 - K # coefficient for fading data # if first smoothed, use last data value if not len(smoothed.index): smoothed[new_data.index.values[0]] = new_data[0] for n,index in enumerate(new_data[1:].index, start=1): smoothed[index] = K*new_data[n] + J*smoothed[n-1] return smoothed
0bc9d61d9c982c58ab176c9506553a8458a7b7fc
29,318
def find_difference(left: dict, right: dict) -> dict: """Accepts two dicts with list values. Check which items are present in left but not in right, similar to set difference. Args: left (dict): Dict with list values right (dict): Dict with list values Returns: dict: Dict with list values. Contains items in left but not in right """ diff = {} # Empty dict to store differences for key, values in left.items(): if key in right.keys(): right_values = [v for v in right[key]] diff[key] = set(values).difference( set(right_values) ) else: # If key doesn't exist in right, all values are new diff[key] = left[key] diff = {k:v for k,v in diff.items() if len(v) > 0} # Remove empty return diff
c8f523b0d6d8352c253e3c596e551ea4ff35b4f8
29,321
def get_gcp_zones(compute, project): """ Get all zones in GCP (needs compute engine) """ zones = [] details = compute.zones().list(project=str(project)).execute() if details.has_key('items'): for item in details['items']: zones.append(str(item['name'])) return zones
042a67f6b8a51ca4022f435adb205b5e919b0351
29,323