content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def pkcs7_pad(plaintext: bytes, block_size: int=0x10) -> bytes: """ Pad a message using the byte padding algorithm described in PKCS#7 This padding scheme appends n bytes with value n, with n the amount of padding bytes. The specification describing the padding algorithm can be found here: https://tools.ietf.org/html/rfc2315#section-10.3 """ assert 0 < block_size < 0x100 # If the plaintext is an exact multiple of block_size, # we need to append a whole block. remainder = block_size - (len(plaintext) % block_size) return plaintext + bytes([remainder] * remainder)
fc6e910b428622ac6a9d59b46f04b6c0f7b2a783
27,406
def get_arrival_from_pick(arrivals, pick): """ return arrival corresponding to pick :param arrivals: list of arrivals :type arrivals: list of either obspy.core.event.origin.Arrival or microquake.core.event.origin.Arrival :param pick: P or S pick :type pick: either obspy.core.event.origin.Pick or microquake.core.event.origin.Pick :return arrival :rtype: obspy.core.event.origin.Arrival or microquake.core.event.origin.Arrival """ arrival = None for arr in arrivals: if arr.pick_id == pick.resource_id: arrival = arr break return arrival
4b4e1df275601aa3990a22dc471541808a1bbbef
27,410
import gzip import json def load_json(filename): """ Load JSON from text file or gzip """ if filename.endswith("gz"): f = gzip.open(filename, "rt") else: f = open(filename, "rt") return json.load(f)
3601d835d394c00f79cf6c5900df810b33b2f11d
27,415
import json def load_evaluation(filename): """Load single evaluation file. Adds the filename for reference""" data = None with open(filename) as fin: try: data = json.loads(fin.read()) data['filename'] = filename except json.JSONDecodeError: print(f"Could not JSON decode {filename}") return data
472392989f96185d5071d51f10d7bcaa234cdf1e
27,417
def parse_program(line): """Parse line to tuple with name, weight and list of programs above.""" program, *above = line.split(' -> ') name, weight = program.split() return name, int(weight[1:-1]), above[0].split(', ') if above else []
0ebcec6f32602d0bfcbfe857d49bcd46a20f528a
27,422
import click def networkx_to_dict(ngraph, verbose=True): """Turn the networkx data into a dict structure that can be consumed by the dataviz Note -------- The network data is extracted as follows > xx = G.nodes(data=True) > for x in xx: print(x[1]) ('respiratory tract infections', {'frequency': 145, 'freq_normalized': 92.77215189873418, 'score_avg': 0.63237, 'score_bucket': 2, 'size': 10}) ('acute lung injury', {'frequency': 9, 'freq_normalized': 6.69620253164557, 'score_avg': 0.62226, 'score_bucket': 2, 'size': 10}) ..... > yy = G.edges(data=True) > for y in yy: print(y) ('respiratory tract infections', 'MERS-CoV infection', {'weight': 2}) ('respiratory tract infections', 'lower respiratory tract infections', {'weight': 53}) .... Parameters ---------- ngraph : networkx.Graph DSL data turned into a graph Returns ---------- tuple A tuple containing two dictionaries ready to be turned into visjs JSON data sources. For example nodes = [ {'id': 1, 'label': 'Knowledge Graphs'}, {'id': 2, 'label': 'RDF'}, {'id': "3 3", 'label': 'Linked Data'} ] edges = [ {'from': 1, 'to': "3 3"}, {'from': 1, 'to': 2}, {'from': "3 3", 'to': 2} ] """ if not ngraph: return [], [] # nodes, edges NODES = [] if verbose: click.secho(f"Creating Dict for visjs dataviz.. ", fg="green") if verbose: click.secho(f"..nodes.. ", dim=True) def safe_id(_id): return _id.replace(" ", "_").strip() # px.colors.diverging.Temps TEST_COLORS = ['rgb(0, 147, 146)', 'rgb(57, 177, 133)', 'rgb(156, 203, 134)', 'rgb(233, 226, 156)', 'rgb(238, 180, 121)', 'rgb(232, 132, 113)', 'rgb(207, 89, 126)'] for x in ngraph.nodes(data=True): # id and label, the same; freqn = size [TBC] _id, label, freq, freqn = safe_id(x[0]), x[0].capitalize(), x[1]['frequency'], x[1]['freq_normalized'] score_avg, score_bucket = x[1]['score_avg'], x[1]['score_bucket'] temp = {'id': _id, 'label': label, 'group': 1} temp['value'] = int(freqn) temp['freq'] = int(freq) temp.update(x[1]) # add all other features too # TEST COLORS hardcoded temp['color'] = TEST_COLORS[3*score_bucket] # HTML titles temp['title'] = f"<h4>Concept: {label}</h4><hr>Frequency Norm: {freqn}<br>Frequency: {freq}<br>Score avg: {score_avg}<br>Score bucket: {score_bucket}" # temp['title'] = json.dumps(x[1], sort_keys=True, indent=4) # title = original JSON contents NODES.append(temp) EDGES = [] if verbose: click.secho(f"..edges.. ", dim=True) for x in ngraph.edges(data=True): # id and label, the same temp = {'from': safe_id(x[0]), 'to': safe_id(x[1])} temp['value'] = int(x[2]['weight']) temp.update(x[2]) # add all other features too temp['title'] = f"Strength: {x[2]['weight']}" EDGES.append(temp) if verbose: click.secho(f"Done", dim=True) return NODES, EDGES
678a214c86b2810c79d3223dc5832b922a9fbfc4
27,423
def entry_check(entry): """ check if entry is a dict and has a key entry. If list return dict {'entry': entry} :param entry :return: entry_dict """ # print("entry type:", type(entry)) if isinstance(entry, dict): # print("keys:", entry.keys()) if 'entry' in entry: # print("returning entry") # print(entry) return entry # print('not an entry list') # print("entry:", entry) else: # print("not a dict so wrap as list in dict with key=entry") return {'entry': entry}
41e788a02edbf56938878fda045ebf65b7bb5df7
27,425
import re def check_state_keys(state, keys_regex): """check if keys exists in state using full python paths""" regex = re.compile(keys_regex) for k, v in state.items(): if regex.findall(k): return True return False
423bbd9a01c7240d0c73f1f7370e38164d3ae63f
27,427
def convert_fasta_to_string(filename): """Takes a genome FASTA and outputs a string of that genome Args: filename: fasta file Returns: string of the genome sequence """ assert filename.split('.')[-1] == 'fasta' # assert correct file type with open(filename) as f: sequence = ''.join(f.read().split('\n')[1:]).lower() # splits by lines, removes first line, joins lines return sequence
4bb2a8228a08b2debfb5d8fab9cf08c05390f24f
27,431
def calculate_gc(read): """Returns GC count.""" return (read.lower().count("g") + read.lower().count("c")) / len(read) * 100.0
aeae6346abbfb634ccd871e9ab191a404916961f
27,436
def identity(x): """Identity functions, useful as semantic action for rules.""" return x
359ef0c72e231f9b815e928a3f908d3cf5772f81
27,437
def pascal_triangle(n): """ Returns a list of lists of integers representing the Pascal’s triangle of n. """ if n <= 0: return [] result = [[1]] while len(result) is not n: tmp = [1] for i in range(len(result[-1]) - 1): tmp.append(result[-1][i] + result[-1][i + 1]) tmp.append(1) result.append(tmp) return result
1175816ec7b7e657543decf4be4478424bfabb79
27,443
def dsr_bc(D_eq): """ Beard and Chuang drop shape relationship function. Arguments: D_eq: Drop volume-equivalent diameter (mm) Returns: r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class expects horizontal to vertical, so you should pass 1/dsr_bc """ return 1.0048 + 5.7e-04*D_eq - 2.628e-02 * D_eq**2 + \ 3.682e-03*D_eq**3 - 1.677e-04 * D_eq**4
1687a0f762fa4b846563c0c327f0a93296c2a8f8
27,444
def snap(point,shape,snapRange=20): """ snap 'point' if within 'snapRange' from the border defined by 'shape' """ snapped = list(point) if snapped[0] < snapRange: snapped[0] = 0 if snapped[0] > shape[1]-snapRange: snapped[0] = shape[1] if snapped[1] < snapRange: snapped[1] = 0 if snapped[1] > shape[0]-snapRange: snapped[1] = shape[0] return tuple(snapped)
42c0b6e124944f327489767179fc4fc422cbaedc
27,451
def make_front_matter(title, weight): """Makes the front matter needed for Hugo.""" return f"""--- title: {title} linkTitle: {title} weight: {weight+1} type: docs --- """
8d51b39fae382a579fdbeedcc28fd9955368fe01
27,452
def collide_parent_tree(widget, x, y): """Returns whether (x, y) collide with the widget and all its parents. """ if not widget.collide_point(x, y): return False parent = widget.parent while parent and hasattr(parent, 'to_parent'): x, y = parent.to_parent(x, y) # transform to parent's parent's if not parent.collide_point(x, y): return False parent = parent.parent return True
5019692635194b9313811ed6a8caffe891965bb8
27,453
def get_median(distribution): """ Median lies exactly midway of your distribution, when arraged in an order. Parameter: a list containing the distribution of the sample or population Returns: the median of the distribution """ n = len(distribution) # distribution size # for median, first sort the list distribution.sort() # next, compute the median based on `n` mid = int(n/2) if not n%2: median = (distribution[mid] + distribution[mid-1]) / 2 else: median = distribution[mid] return median
06c8f3f5735d80a406e9db72aca3db73812f92aa
27,456
def get_src_path_root(src_path: str) -> str: """returns the root directory of a path (represented as a string)""" if "\\" in src_path: return src_path.split("\\", 1)[0] elif "/" in src_path: return src_path.split("/", 1)[0] return src_path
8df673721aa505f1647871b8df25ccabd0402fd9
27,460
def clean_nan(data, data_column): """Takes pandas dataframe of data and removes all rows containing NaN""" data.dropna(subset=[data_column, \ 'Quality'], inplace=True) return data
3b41fad6b60951f57dd309f5bc5920ca467652f1
27,464
import torch def calc_pairwise_distance(X, Y): """ computes pairwise distance between each element Args: X: [N,D] Y: [M,D] Returns: dist: [N,M] matrix of euclidean distances """ rx=X.pow(2).sum(dim=1).reshape((-1,1)) ry=Y.pow(2).sum(dim=1).reshape((-1,1)) dist=rx-2.0*X.matmul(Y.t())+ry.t() return torch.sqrt(dist)
3a8c502163ea2788cd4fc79d44d6f2d2f04d24d8
27,465
import time def createDateTimeString(now=None): """Return datetime as string (e.g. for saving results).""" if now is None: now = time.localtime() return str(now.tm_year) + str(now.tm_mon).zfill(2) + \ str(now.tm_mday).zfill(2) + '-' + \ str(now.tm_hour).zfill(2) + '.' + \ str(now.tm_min).zfill(2)
340d4c2e044622d3602b454bf601fcf1bd12ae12
27,471
def point2str ( pnt ): """ point2str( pnt ) format a 3d data point (list of 3 floating values) for output to a .scad file. Also used to do equality comparison between data points. @param pnt - list containing the x,y,z data point coordinates @returns '[{x}, {y}, {z}]' with coordinate values formatted by specifications """ # IDEA use command line parameter to set the precission # IDEA have call time precission, so internal use (for point comparison) can be higher return ''.join ([ '[', ', '.join ([ '%.9g' % c for c in pnt ]), ']' ])
8d47a8d3c29be082ec3a45250f845f1e00633fab
27,477
import hashlib def sha384(s): """Compute the SHA384 of a given string.""" return hashlib.sha384(s.encode("utf-8")).hexdigest()
e62256c30c781f85f6ed56a1086747b83637229f
27,481
def compute_center(detections): """Compute the center for each detection. Args: detections: A matrix of shape N * 4, recording the pixel coordinate of each detection bounding box (inclusive). Returns: center: A matrix of shape N I 2, representing the (x, y) coordinate for each input bounding box center. """ center = detections[:, [0, 1]] + detections[:, [2, 3]] center /= 2. return center
794b64de7ef7b1327bb0bb90982ac8d67c0a0fea
27,484
def frame_as_object(frame): """ Return a pandas DataFrame as NumPy `dtype` ``object``. Useful for casting from a categorical frame. """ return frame.apply(lambda x: x.astype('object'))
0d165e571e73ab320a25245ce9ab2e76ca1f8f97
27,489
def index(m, val): """Return the indices of all ``val`` in m""" m = list(m) idx = [] if m.count(val) > 0: idx = [i for i, j in enumerate(m) if j == val] return idx
703d1eab466a622325648935f88bac0f72f62e7a
27,493
def parse_packet3(filp): """ Parse PKT3 commands from the given header file. """ packets = [] for line in filp: if not line.startswith('#define '): continue line = line[8:].strip() if line.startswith('PKT3_') and line.find('0x') != -1 and line.find('(') == -1: packets.append(line.split()[0]) return packets
c2b7a1cbe94f06c8a9b822bdeabc12a2e4a10518
27,494
def get_hidden_fields(form): """ Returns all the hidden fields of the form. """ return form.hidden_fields()
b7d0315a232ab0199e9b575a954664e478151ab2
27,501
def lines_into_traces (lines): """Convert a list of split ASCII text lines into traces (a list of lists of floats)""" traces = [] num_of_traces = len(lines[0]) #work out how many traces from the no of columns ## make an empty list for i in range(num_of_traces): traces.append([]) ## transpose lines into traces made from columns for line in lines: #print (line) for i in range (num_of_traces): #NEW AP #print (line[i]) try: traces[i].append (float(line[i])) except: #element is empty or not a number, so skip continue return traces
a14c1926b529b4bb198a12da7a6f7fab05373b8d
27,502
def extract_yyyy_mm_dd_hh_mm_ss_from_datetime64(dt64): """ Extract separate fields for year, monday, day, hour, min, sec from a datetime64 object Parameters ---------- dt64 : numpy.datetime64 a datetime64 object Returns ------- year, mon, day, hh, mm, ss : int """ s = str(dt64) year = int(s[0:4]) mon = int(s[5:7]) day = int(s[8:10]) hh = int(s[11:13]) mm = int(s[14:16]) ss = int(s[17:18]) #print year, mon, day, hh, mm, ss return year,mon,day,hh,mm,ss
1cfca13ae4472d99df131acb07483d82aa8040c4
27,503
import platform def extra_link_args() -> list: """Platform dependent extras Returns: list: Extra link arguments """ args = [] if platform.system() == 'Darwin': # Something with OS X Mojave causes libstd not to be found args += ['-stdlib=libc++', '-mmacosx-version-min=10.12'] return args
86a4f6615a3bd67dd6740bc84fe1f4e7b44c8ab6
27,508
def overlapping(bins): """ Given a sorted list bins, check whether any are overlapping [....{;;;]----}. Touching is OK: [....]{----}""" s, e = 0, 0 for b in bins: if s < b[1] and b[0] < e: return True s, e = b[0], b[1] return False
a05d299dc2e25bfdbe9e50aa9dfe509b32635cbb
27,512
def _UTMLetterDesignator(Lat): """This routine determines the correct UTM letter designator for the given latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S Written by Chuck Gantz- [email protected]""" if 84 >= Lat >= 72: return 'X' elif 72 > Lat >= 64: return 'W' elif 64 > Lat >= 56: return 'V' elif 56 > Lat >= 48: return 'U' elif 48 > Lat >= 40: return 'T' elif 40 > Lat >= 32: return 'S' elif 32 > Lat >= 24: return 'R' elif 24 > Lat >= 16: return 'Q' elif 16 > Lat >= 8: return 'P' elif 8 > Lat >= 0: return 'N' elif 0 > Lat >= -8: return 'M' elif -8> Lat >= -16: return 'L' elif -16 > Lat >= -24: return 'K' elif -24 > Lat >= -32: return 'J' elif -32 > Lat >= -40: return 'H' elif -40 > Lat >= -48: return 'G' elif -48 > Lat >= -56: return 'F' elif -56 > Lat >= -64: return 'E' elif -64 > Lat >= -72: return 'D' elif -72 > Lat >= -80: return 'C' else: return 'Z' # if the Latitude is outside the UTM limits
c632a1af990ffa4c26c9f14cc9bfa82bd4825891
27,516
def insert_usid_policy(database, lr_dst, rl_dst, lr_nodes, rl_nodes, table=None, metric=None, l_grpc_ip=None, l_grpc_port=None, l_fwd_engine=None, r_grpc_ip=None, r_grpc_port=None, r_fwd_engine=None, decap_sid=None, locator=None): """ Insert a uSID policy into the 'usid_policies' collection of a Arango database. :param database: Database where the uSID policy must be saved. :type database: arango.database.StandardDatabase :param lr_dst: Destination (IP address or network prefix) for the left-to-right path. :type lr_dst: str :param rl_dst: Destination (IP address or network prefix) for the right-to-left path. :type rl_dst: str :param lr_nodes: List of nodes (names or uN sids) making the left-to-right path. :type lr_nodes: list :param rl_nodes: List of nodes (names or uN sids) making the right-to-left path. :type rl_nodes: list :param table: FIB table where the policy must be saved. :type table: int, optional :param metric: Metric (weight) to be used for the policy. :type metric: int, optional :param l_grpc_ip: gRPC IP address of the left node, required if the left node is expressed numerically in the nodes list. :type l_grpc_ip: str, optional :param l_grpc_port: gRPC port of the left node, required if the left node is expressed numerically in the nodes list. :type l_grpc_port: str, optional :param l_fwd_engine: forwarding engine of the left node, required if the left node is expressed numerically in the nodes list. :type l_fwd_engine: str, optional :param r_grpc_ip: gRPC IP address of the right node, required if the right node is expressed numerically in the nodes list. :type r_grpc_ip: str, optional :param r_grpc_port: gRPC port of the right node, required if the right node is expressed numerically in the nodes list. :type r_grpc_port: str, optional :param r_fwd_engine: Forwarding engine of the right node, required if the right node is expressed numerically in the nodes list. :type r_fwd_engine: str, optional :param decap_sid: uSID used for the decap behavior (End.DT6). :type decap_sid: str, optional :param locator: Locator prefix (e.g. 'fcbb:bbbb::'). :type locator: str, optional :return: True. :rtype: bool :raises arango.exceptions.arango.exceptions.DocumentInsertError: If insert fails. """ # Build a dict-representation of the uSID policy policy = { 'lr_dst': lr_dst, 'rl_dst': rl_dst, 'lr_nodes': lr_nodes, 'rl_nodes': rl_nodes, 'table': table, 'metric': metric, 'l_grpc_ip': l_grpc_ip, 'l_grpc_port': l_grpc_port, 'l_fwd_engine': l_fwd_engine, 'r_grpc_ip': r_grpc_ip, 'r_grpc_port': r_grpc_port, 'r_fwd_engine': r_fwd_engine, 'decap_sid': decap_sid, 'locator': locator } # Get the uSID policy collection # This returns an API wrapper for "usid_policies" collection usid_policies = database.collection(name='usid_policies') # Insert the policy # The parameter silent is set to True to avoid to return document metadata # This allows us to sav resources return usid_policies.insert(document=policy, silent=True)
c44e20e010b3c146e4eebe729a7bb8cadbda0646
27,518
def isArgumentlessJavaOption(line): """ Determine whether a given line contains a command line option that does not take arguments. Parameters ---------- line : str A line of the build output Returns ------- bool True if the line contains an option that doesn't take arguments """ argumentlessOptions = ["agentlib", "agentpath", "disableassertions", "D", "da", "enableassertions", "ea", "enablesystemassertions", "esa", "disablesystemassertions", "dsa", "javaagent", "jre-restrict-search", "no-jre-restrict-search", "showversion", "splash", "verbose", "version", "X"] for a in argumentlessOptions: if line.startswith("-{}".format(a)): return True return False
3e967cecba22413e25022e9e36154e2a78f7ad13
27,520
def curl(vect, coord_sys): """ Returns the curl of a vector field computed wrt the base scalars of the given coordinate system. Parameters ========== vect : Vector The vector operand coord_sys : CoordSysCartesian The coordinate system to calculate the curl in Examples ======== >>> R = CoordSysCartesian('R') >>> v1 = R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k >>> curl(v1, R) 0 >>> v2 = R.x*R.y*R.z*R.i >>> curl(v2, R) R.x*R.y*R.j + (-R.x*R.z)*R.k """ return coord_sys.delop.cross(vect).doit()
18af3e538e3d7a6d9970d2bc6f5c2e72b2ebb6c7
27,525
def substract(a,b): """Subtracts b from a and returns the result.""" return a - b
8c8c4ba7692671ea1c26ba8faceed0b04d5c16d7
27,526
import itertools def _grouper(n, iterable, fillvalue=None): """Collects data into fixed-length chunks or blocks. This private function is wrapped in :func:`_show_mallet_document_topics()`. Args: n (int): Length of chunks or blocks iterable (object): Iterable object fillvalue (boolean): If iterable can not be devided into evenly-sized chunks fill chunks with value. Returns: n-sized chunks """ args = [iter(iterable)] * n return itertools.zip_longest(*args, fillvalue=fillvalue)
c8747931fa430791927f5706f5380717fd4d51f8
27,531
def one_flash(lma_df, flash_id): """ Given a lma file and the flash id, it returns the lma dataframe with only the VHF sources with the specified flsah id. """ return lma_df[lma_df.flash_id == flash_id]
c9acea38ac399a8030b0616c74906ad3d6c1f915
27,534
def _mpas_to_netcdf_calendar(calendar): """ Convert from MPAS calendar to NetCDF4 calendar names. """ if calendar == 'gregorian_noleap': calendar = 'noleap' elif calendar != 'gregorian': raise ValueError('Unsupported calendar {}'.format(calendar)) return calendar
bdeaf343deeb4beb8c04654ab5104425396e98be
27,535
def aslist(item): """ aslist wraps a single value in a list, or just returns the list """ return item if type(item) == list else [item]
e9b3a9f189f74243d713e896dfbbd002e78abada
27,536
from typing import Any def is_operation(obj_or_type: Any) -> bool: """Return if object represents a resource operation.""" return getattr(obj_or_type, "_fondat_operation", None) is not None
57cd493fb99d0f0d54e6d032cf27c65f18f9c031
27,539
def edges_on_ring(ring): """ Get all the edges from a ring. :param ring: THe ring in which to obtain the edges. :return: A set of edges.. """ edges = set() prev = ring[-1] for c in ring: edges.add(frozenset({prev, c})) prev = c return edges
ab976c485f1424e7073f93bf429cfe4efdf65813
27,540
def negate(fn): """ negate(fn) -> not bool(fn) """ def wrap(*args, **kwargs): return not fn(*args, **kwargs) return wrap
532ab3e656ae4651a59be8ed735c3c35fdb447ba
27,543
def obj_label_to_kitti_fmt(obj_label): """Converts an object label to kitti format [type, truncation, occlusion, alpha(observation angle), (x1, y1, x2, y2), (h, w, l), (x, y, z), ry, score] """ return [ obj_label.type, -1, -1, obj_label.alpha, obj_label.x1, obj_label.y1, obj_label.x2, obj_label.y2, obj_label.h, obj_label.w, obj_label.l, obj_label.t[0], obj_label.t[1], obj_label.t[2], obj_label.ry, obj_label.score ]
9f9c1545cd0c098055014f52b980115fd7469d98
27,547
import six def fix_keys(fix_func, conf): """Apply fix_func on every key of a dict""" return {fix_func(field): value for field, value in six.iteritems(conf)}
0329661b5f2ccb4e1f260a92dc8fe36d9c945d31
27,555
def check_and_get_directory(files): """ Check if all provided files have the same directory and return it. :param files: A list of files to check and get directory from. :return: Base directory of the files. :raise: RuntimeException if files does not have same base directory. """ if not files: raise ValueError('Files must not be empty!') head, *tail = files if not tail: return head.parent tail_parent = check_and_get_directory(tail) if head.parent == tail_parent: return head.parent raise RuntimeError('Files do not have the same directory: {} != {}'.format(head.parent, tail_parent))
2e13d63fccaf18213c9b9108869774b15bdc801a
27,561
def subsetDf(data_input): """ Take a DataFrame and if it's under 500 unique objects, simply return it. If the DataFrame is over 500 unique objects, return the first 500 unique objects. """ unique_obj = data_input.index.unique() unique_obj_list = list(unique_obj) if len(unique_obj) <= 500: return data_input else: first500 = unique_obj_list[0:500] data_input = data_input[data_input.index.isin(first500)] return data_input
e077880b35941a3032cd799f43d979225f163bde
27,562
def create_response(status_code, status_description="", body=""): """Configure a response JSON object.""" response = {"isBase64Encoded": False, "headers": {"Content-Type": "text/html;"}} if not status_description: description = { 200: "200 OK", 400: "400 Bad Request", 401: "401 Unauthorized", 405: "405 Method Not Allowed", 500: "500 Internal Server Error", } status_description = description.get(status_code) response["statusCode"] = status_code response["statusDescription"] = status_description response["body"] = body return response
55732ec27a46dd0d971a1f5eccde33414de7af71
27,563
def HV(in_: list): """ Outputs H-V, computes difference of first 2 elements of a list. """ out = in_[0] - in_[1] return out
ed58cc24297fbe1b7f6a28ed98b9ffa0dcb80050
27,569
def should_attach_entry_state(current_api_id, session): """Returns wether or not entry state should be attached :param current_api_id: Current API selected. :param session: Current session data. :return: True/False """ return ( current_api_id == 'cpa' and bool(session.get('editorial_features', None)) )
c22c592c2b65f143d0df5c0735a0c21f7347ee71
27,573
def create_flanking_regions_fasta(genome, dataframe, flanking_region_size): """ Makes batch processing possible, pulls down small region of genome for which to design primers around. This is based on the chromosome and position of input file. Each Fasta record will contain: >Sample_Gene_chr:pos__ Seq of flanking region Args: genome (list): genome list of tuples (header, seq). dataframe (pandas object): dataframe with sample info. flanking_region_size (int): length of sequence upstream and downstream of input coordindate position to pull as sequence to design primers around. Returns: output (list): list of tuples with (header, seq) where seq is flanking region and header is sample ID. """ output = [] for headers, seqs in genome: chrm = str(headers) seq = str(seqs) for gene, sample, chrom, pos in zip(dataframe.Gene, dataframe.Sample, dataframe.Chr, dataframe.Pos): if str(chrom) == chrm: header = str(str(sample)+"_"+str(gene)+"_"+str(chrom)+":"+str(pos)+"__") flank_seq = seq[int(pos)-int(flanking_region_size):\ int(pos)+int(flanking_region_size)] output.append((header, flank_seq.upper())) return output
82a09f847c4533e95f7de542fc80654e19a96bf6
27,575
import random def get_random_choice(word_dist): """ Given a word distribution, pick one at random based on how common it is. Args: word_dist (FreqDist) - a frequency distribution of words Returns: string - a random word from word_dist """ total_samples = word_dist.N() random_sample = random.randint(0, total_samples) running_total = 0 # iterate over all the possible bins for word_bin in word_dist.most_common(word_dist.B()): # add the number of incidences of the current word to the total running_total += word_bin[1] # if the random number falls into the current bucket, return this word if random_sample <= running_total: return word_bin[0]
23f72c57fb7bbac8e896621fcd4e235c2efa9008
27,578
def sum_list(list_of_list): """Concatenates a list of python list.""" final_list = [] for l in list_of_list: final_list += l return final_list
243cc9e88f62a2b0323335703204c4e2b416a5e1
27,581
import pickle def pickleload(pkl_file): """ Load objects from file with pickle """ with open(pkl_file, 'rb') as output: data2 = pickle.load(output) return data2
4e4e8e97840fd3f1ab4933132c88b38a9618f8dd
27,582
def decode_to_string(obj): """ Convert any type to string """ if isinstance(obj, bytes): return obj.decode() return str(obj)
a55f9e0246a5eb3a84e7363f478962e134eb386a
27,584
def create_url_with_query_parameters(base_url: str, params: dict) -> str: """Creates a url for the given base address with given parameters as a query string.""" parameter_string = "&".join(f"{key}={value}" for key, value in params.items()) url = f"{base_url}?{parameter_string}" return url
e148540316cc38e344228ed82d7e5f9769de5fe6
27,586
def get_patched_get_url(patched_urlopen, testcase=None): """Get the URL of the GET request made to the patched urlopen() function. Expects that the patched function should have been called a single time with the url as the only positional argument and no keyword arguments. :param patched_urlopen: value returned when entering the context manager created by patch_urlopen. :type patched_urlopen: unittest.mock.Mock :param testcase: Test case currently being run, which is used to make asserts :type testcase: unittest.TestCase """ args, kwargs = patched_urlopen.call_args if testcase is not None: testcase.assertEqual(patched_urlopen.call_count, 1) testcase.assertEqual(len(args), 1) testcase.assertEqual(len(kwargs), 0) return args[0]
6d03cb418214d0f5d463a9cfe3fd6d8c31c7830e
27,588
import six def _check_input(idadf, target, features, ignore_indexer=True): """ Check if the input is valid, i.e. if each column in target and features exists in idadf. Parameters ---------- target: str or list of str A column or list of columns to be used as target features: str or list of str A column or list of columns to be used as feature ignore_indexer: bool, default: True If True, remove the indexer from the features set, as long as an indexer is defined in idadf """ #import pdb ; pdb.set_trace() if target is not None: if isinstance(target, six.string_types): if target not in idadf.columns: raise ValueError("Unknown target column %s"%target) target = [target] else: if hasattr(target, '__iter__'): target = list(target) for x in target: if x not in idadf.columns: raise ValueError("Unknown target column %s"%x) if features is not None: if isinstance(features, six.string_types): if features not in idadf.columns: raise ValueError("Unknown feature column %s"%features) features = [features] else: if hasattr(features, '__iter__'): features = list(features) for x in features: if x not in idadf.columns: raise ValueError("Unknown feature column %s"%x) if target is None: if len(features) == 1: raise ValueError("Cannot compute correlation coefficients of only one"+ " column (%s), need at least 2"%features[0]) else: if target is not None: if len(target) == 1: features = [x for x in idadf.columns if x not in target] else: features = list(idadf.columns) else: features = list(idadf.columns) ## Remove indexer from feature list # This is useless and expensive to compute with a primary key if ignore_indexer is True: if idadf.indexer: if idadf.indexer in features: features.remove(idadf.indexer) # Catch the case where users ask for the correlation between the two same columns #import pdb ; pdb.set_trace() if target == features: if len(target) == 1: raise ValueError("The correlation value of two same columns is always maximal") if target is None: if features is None: target = list(idadf.columns) else: target = features return target, features
1f133839bf0c10396bdcf036db30251d71c7ff3f
27,591
from typing import Iterable def _n_name(invars: Iterable[str]) -> str: """Make sure that name does not exist in invars""" name = "n" while name in invars: name = "n" + name return name
fc3b5da0e762e1b212248c403ceda66c311a60f9
27,592
def _rub_str_ ( bins ) : """Printout for RooUniformBinning""" l = bins. lowBound () h = bins.highBound () n = bins.numBoundaries () - 1 x = bins.GetName() if not x : return "RooUniformBinning(%s,%s,%d)" % ( l , h , n ) return "RooUniformBinning(%s,%s,%d,'%s')" % ( l , h , n , x )
d2ffdbf12e63dcc1f038f670a92ec1cf13da3fd7
27,593
import re def get_NT_from_str(string): """ Given a string of a snippet, return the NT of that snippet. in: '[1, cython_backup] <RE>' out: '<RE>' :param string: A string defining a snippet. :return: The NT of that snippet. """ # Get index portion of string index = re.findall("\<.+\>", string) return index[0]
3d62f7171e5a053a5f69b8dffb406ae7df215494
27,595
from typing import List from typing import Dict from typing import Any from typing import Set def get_human_readable_headers(outputs: List[Dict]) -> List[Any]: """ Retrieves all of the keys that their value is not dict recursively Args: outputs (List[Dict]): Input list of dictionaries. Returns: List with all of the keys that don't have inner dictionaries. """ def contains_dict(entry: Any) -> bool: if isinstance(entry, dict): return True elif isinstance(entry, list): return any(contains_dict(item) for item in entry) return False human_readable_keys: List[Set] = [{k for k, v in output.items() if not contains_dict(v)} for output in outputs] if not human_readable_keys: return [] return list(set.intersection(*human_readable_keys))
e1236b9b99dea9dd926a268a2b9210a78e8bd971
27,599
from typing import Sequence def _count_run(li: Sequence, lo: int, hi: int) -> int: """Count the length of the run beginning at lo, in the slice [lo, hi). lo < hi is required on entry. """ # "A run" is either the longest non-decreasing sequence, or the longest strictly # decreasing sequence. `descending` is False in the former case, True in the latter. # Note: This function is not required by tim_sort(), so we make it internal. assert lo < hi # descending = False lo += 1 if lo == hi: return 1 n = 2 # run count if li[lo] < li[lo-1]: # descending = True for lo in range(lo+1, hi): if li[lo] >= li[lo-1]: break n += 1 else: for lo in range(lo+1, hi): if li[lo] < li[lo-1]: break n += 1 return n
bf93346487d275152f221971ab1bd4d678cdf290
27,600
def get_ith_minibatch_ixs(i, num_data, batch_size): """Split data into minibatches. @param i: integer iteration index @param num_data: integer number of data points @param batch_size: integer number of data points in a batch @return batch_slice: slice object """ num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0) i = i % num_minibatches start = i * batch_size stop = start + batch_size return slice(start, stop)
2583cada078a0725e2dfe40c48111696bef48f8f
27,606
import tempfile import base64 def unpack_resource(data): """Convert base64 encoded data into a file handle, and a temporary file name to access the data""" file_handle = tempfile.NamedTemporaryFile() file_handle.write(base64.b64decode(data)) file_handle.seek(0) return (file_handle,file_handle.name)
372d1e48e8e67e71f3f0bbdd1e15e9cbc9369973
27,611
def has_same_letter_repeated(box, times): """Check if box has any letter repeated number of times.""" return any( box.count(letter) == times for letter in box )
9b0450d3ab8f276facde656dad9f9938d9fd1c20
27,612
def flatten_(structure): """Combine all leaves of a nested structure into a tuple. The nested structure can consist of any combination of tuples, lists, and dicts. Dictionary keys will be discarded but values will ordered by the sorting of the keys. Args: structure: Nested structure. Returns: Flat tuple. """ if isinstance(structure, dict): result = () for key in sorted(list(structure.keys())): result += flatten_(structure[key]) return result if isinstance(structure, (tuple, list)): result = () for element in structure: result += flatten_(element) return result return (structure,)
6d085c290603116d91d400a4e974956b6adc265e
27,613
def get_groups(groups_collection): """Returns a list of group names :param groups_collection: Mongo collection that maintains groups :return: List of group names """ groups = groups_collection.find() return list(groups)
75ff224e383eaf2f4fd9e4d345231aa4a7ea587f
27,617
def invert_angle(angle): """ Inverts the steering angle for the flip_img() :param angle: :return: inverted angle """ return angle * -1.0
8628d1b745b32a2b4cdf94b11a9453c87b2e6c2e
27,619
def channels(channel): """Return a mock of channels.""" return [channel("level", 8), channel("on_off", 6)]
fab1959c3f8f6f3f219c7f3c0b401707e9cbb5af
27,620
def OR(logical_expression, *logical_expressions): """ Returns True if any of the arguments is logically true, and false if all of the arguments are false. Same as `any([value1, value2, ...])`. >>> OR(1) True >>> OR(0) False >>> OR(1, 1) True >>> OR(0, 1) True >>> OR(0, 0) False >>> OR(0,False,0.0,"",None) False >>> OR(0,None,3,0) True """ return any((logical_expression,) + logical_expressions)
4033822021dd336283b6edeebb00fd4b19e42f82
27,633
import warnings def slab_filter(config, dask_dict): """ Filters a dask bag `dask_dict` of slabs according to rules specified in config yml `config` Args: config: dictionary of the config yaml dask_dict: a dictionary containing slab info Returns: boolean value (True -> retain slab, False -> reject slab) """ slab_filters = config["slab_filters"] keep = True for name, val in slab_filters.items(): if val != "None": if name == "filter_by_object_size": keep = keep and (dask_dict["slab_natoms"] <= val) elif name == "filter_by_max_miller_index": keep = keep and (dask_dict["slab_max_miller_index"] <= val) else: warnings.warn("Slab filter is not implemented: " + name) return keep
1c0298da792cb691964626dd6987c16be9dc2255
27,635
def get_checksum_type(checksum): """ Return the checksum type (ad32 or md5). The given checksum can be either be a standard ad32 or md5 value, or a dictionary with the format { checksum_type: value } as defined in the `FileSpec` class. In case the checksum type cannot be identified, the function returns 'unknown'. :param checksum: checksum string or dictionary. :return: checksum type (string). """ checksum_type = 'unknown' if type(checksum) == dict: for key in list(checksum.keys()): # Python 2/3 # the dictionary is assumed to only contain one key-value pair checksum_type = key break elif type(checksum) == str: if len(checksum) == 8: checksum_type = 'ad32' elif len(checksum) == 32: checksum_type = 'md5' return checksum_type
fd907bf7020449505e989ce8cd1f40567227ae96
27,637
def get_modes(name): """Extract userpatch modes.""" has_modes = name.find(': !') mode_string = '' if has_modes > -1: mode_string = name[has_modes + 3:] name = name[:has_modes] modes = { 'direct_placement': 'P' in mode_string, 'effect_quantity': 'C' in mode_string, 'guard_state': 'G' in mode_string, 'fixed_positions': 'F' in mode_string } return name, modes
1c6ddcf2e3205b9517962b1be79a42d0705db670
27,640
def _validate_translation_table_relations_dict(relations): """ Parameters ---------- relations : `dict` of (`str`, (`None`, `str`)) items Relations to validate. Returns ------- validated_relations : `None`, `dict` of (`str`, `str`) items Raises ------ TypeError - If an item's structure is incorrect. """ validated_relations = None for key, value in relations.items(): if not isinstance(key, str): raise TypeError( f'`relation` keys can be `str`, got {key.__class__.__name__}; {key!r}.' ) if value is None: continue if not isinstance(value, str): raise TypeError( f'`relation` values can be `str`, got {value.__class__.__name__}; {value!r}.' ) if (not key) or (not value): continue if validated_relations is None: validated_relations = {} validated_relations[key] = value continue return validated_relations
450aae70a221e9da611dd236f5440107a70639a2
27,642
def string_from_array(arr): """Encode an array as a string code. Parameters ---------- arr : (N, k) array_like *Numpy* array. Returns ------- str String code of an array. Examples -------- >>> string_from_array(np.array([1, 0, 0])) '100' >>> string_from_array(np.array([[1,0], [3,4]])) '1034' """ return ''.join(map(str, arr.flat))
3c938656b8e078e9fb1e5f6eef00790f155cd801
27,644
def get_unit_info(units): """Read in split file name and return data variable type (runoff, AET, etc) :param units: Name of file :type units: list :return: str; Name of variable """ if units[0] == "q": data_type = "Runoff" elif units[0] == "avgchflow": data_type = "Streamflow" elif units[0] == "aet": data_type = "Actual ET" elif units[0] == "pet": data_type = "Potential ET" else: data_type = "unknown" return data_type
35679ba9e0f8518c32572ee7cc46b54f07693c5c
27,653
def remove_krm_group(apitools_collection_guess, krm_group): """Remove krm_group prefix from krm_kind.""" if krm_group.lower() in apitools_collection_guess.lower(): apitools_collection_guess = apitools_collection_guess[len(krm_group):] return apitools_collection_guess
6f69449f06d3c7c148cf9cbf909bae25b2602707
27,656
def add_boilerplate(text: str) -> str: """Format text with boilerplate so readers know it's a bot Args: text (str): Text to add boilerplate to Return: str: Text with boilerplate """ if not isinstance(text, str): raise TypeError("text must be a str") source_code_on_github = ( "[source code on GitHub](https://github.com/vogt4nick/datascience-bot)" ) greeting = "_Bleep Bloop_. " footer = ( "\n\n---\n\n" "I am a bot created by the r/datascience moderators. " f"I'm open source! You can review my {source_code_on_github}." ) return "".join([greeting, text, footer])
c1f53650d9a22e6976d269ec458a130bbf71eea8
27,662
def format_time(t: float) -> str: """ Format a time duration in a readable format. :param t: The duration in seconds. :return: A human readable string. """ hours, remainder = divmod(t, 3600) minutes, seconds = divmod(remainder, 60) return '%d:%02d:%02d' % (hours, minutes, seconds)
e31b595b37172360ce6c5ea00ecca029ddb37651
27,664
def check_if_toxin(ingredients): """Checks if any of the ingredients are in the list of toxic ingrediets""" toxic_indredients = ['sodium nitrate', 'sodium benzoate', 'sodium oxide'] return any(item in ingredients for item in toxic_indredients)
68996807ace863edb6466e333f6f03b3e04fce12
27,666
def strip_namespace(tag_name): """ Strip all namespaces or namespace prefixes if present in an XML tag name . For example: >>> tag_name = '{http://maven.apache.org/POM/4.0.0}geronimo.osgi.export.pkg' >>> expected = 'geronimo.osgi.export.pkg' >>> assert expected == strip_namespace(tag_name) """ head, brace, tail = tag_name.rpartition('}') return tail if brace else head
425b41db7a75a17122e50208b64242d1aeeea5ce
27,670
def trivial_ineq(c): """ Assumes c is a ZeroComparison. Determines whether c is the trivial inequality 0 >= 0 """ return len(c.term.args) == 0 and not c.strong
65020f1ddc5686b43e21258e34fd51cbfdd0377f
27,672
def is_container(obj): """ Checks whether the object is container or not. Container is considered an object, which includes other objects, thus string is not qualified, even it implments iterator protocol. >>> is_container("text") False >>> is_container(tuple()) True """ if isinstance(obj, str): return False return hasattr(obj, '__iter__')
a6772793a24fc95f159df100c5e3ba19dce33281
27,673
def distance1(cell1, cell2): """Return Manhattan distance between cells.""" return abs(cell1[0] - cell2[0]) + abs(cell1[1] - cell2[1])
abbe94a7304854d3f1f90d06987e20b625f834ba
27,679
def flip_layers(nparray): """ Flip RGB to BGR image data (numpy ndarray). Also accepts rgbA/bgrA and single channel images without crashing. """ if nparray is None: return None if len(nparray.shape) == 3: if nparray.shape[2] == 4: # We got xyzA, make zyxA return nparray[..., [2, 1, 0, 3]] else: return nparray[:, :, ::-1] return nparray
d9d1951b968f655093c57f1827e9be27845731bd
27,680
import time def _make_tstamp(val): """ Converts a ``datetime`` object into a unix timestamp. """ if val: return int(time.mktime(val.timetuple()) * 1000)
ab59736ad811b8b1d8b40119f59ccc84f16503f2
27,681
def normalize(lst, maxval=1.): """ Normalizes a list of values with a specified value. **Parameters** lst: *list* List of values to be normalized maxval: *float*, optional The maximum value that the list will have after normalization. **Returns** normalized list: *list* A list of values normalized to the specified value. """ listmax = max(lst) for ind, val in enumerate(lst): lst[ind] = float(val) / float(listmax) * maxval return lst
9ae34b5b7a81d55de88c942806f0440040873165
27,682
import _pickle def pck(path): """ Reads a python/pickle format data file :param path: the path to the input pickle file :return: the object stored in the pickle file """ with open(path, "r") as f: return _pickle.load(f)
43c3c92e44745ce34b56c8f51b0af1ae5841f8fe
27,683
def get_power_plants_by_string_in_object_name(object_name_part, db_emlab_powerplants): """ Return list of power plants in which the string can be found. :param object_name_part: string :param db_emlab_powerplants: PowerPlants as queried from EMLAB SpineDB :return: List """ return {row['object_name']: row['parameter_value'] for row in db_emlab_powerplants if row['object_class_name'] == 'PowerPlants' and object_name_part in row['object_name'] and row['parameter_name'] == 'ON-STREAMNL'}
cbdc87ffa7d1e70edde20509987939eeb6f7ad85
27,687
import inspect import importlib def load_class(cls): """ Loads the given class from string (unless alrady a class). """ if inspect.isclass(cls): return cls module_name, class_name = cls.rsplit('.', 1) module = importlib.import_module(module_name) return getattr(module, class_name, None)
15992df96984f0aae130d4e5abd1563aca1852e0
27,694
def mock_get_provider(*args, **kwargs): """A mock function for the get_provider Qiskit function to record the arguments which it was called with.""" return (args, kwargs)
38989084d13bfd3dd51eb17f7aa1e760fcf7de34
27,695
import torch def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 1) -> torch.Tensor: """ For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]` for `num_classes` N number of classes. Example: For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0. Note that this will include the background label, thus a binary mask should be treated as having 2 classes. """ if labels.dim() <= 0: raise AssertionError("labels should have dim of 1 or more.") # if `dim` is bigger, add singleton dim at the end if labels.ndim < dim + 1: shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape)) labels = torch.reshape(labels, shape) sh = list(labels.shape) if sh[dim] != 1: raise AssertionError("labels should have a channel with length equal to one.") sh[dim] = num_classes o = torch.zeros(size=sh, dtype=dtype, device=labels.device) labels = o.scatter_(dim=dim, index=labels.long(), value=1) return labels
5eb414dcd03b46252348150cc6d2516d6564ed56
27,697
from math import gcd from random import randrange def fermat_primality_test(n: int, k: int = 3) -> bool: """ https://en.wikipedia.org/wiki/Fermat_primality_test >>> assert all(fermat_primality_test(i) for i in [2, 3, 5, 7, 11]) >>> assert not all(fermat_primality_test(i) for i in [4, 6, 8, 9, 10]) """ for _ in range(k): random_num = randrange(1, n) if gcd(n, random_num) != 1 or pow(random_num, n - 1, n) != 1: return False return True
75011a3003c4f32d6e1b7bf78c80536ae30e30f0
27,699
def _load_entry_point(entry_point, verify_requirements): """Based on the version of setuptools load an entry-point correctly. setuptools 11.3 deprecated `require=False` in the call to EntryPoint.load. To load entry points correctly after that without requiring all dependencies be present, the proper way is to call EntryPoint.resolve. This function will provide backwards compatibility for older versions of setuptools while also ensuring we do the right thing for the future. """ if hasattr(entry_point, 'resolve') and hasattr(entry_point, 'require'): if verify_requirements: entry_point.require() plugin = entry_point.resolve() else: plugin = entry_point.load(require=verify_requirements) return plugin
f83e00563561a80b469d397b70b278907438318e
27,703
def get_min_value(solution, field=0): """ Get the minimum value in a field in a solution. """ min_val = 1e38 for state in solution.states: min_temp = state.q[field, :, :].min() if min_temp < min_val: min_val = min_temp return min_val
719d426022b3f60520cb81f69918d113501fb21a
27,705
def bubble_sort(arr): """ Exchange sort (bubble sort) 1. Take the first element each time and compare it with the next element. If the former is larger than the latter, switch positions and compare with the latter. If the latter bit is greater, it is not exchanged. Then the next bit is compared with the next bit until the last indexed element. At this time, the last indexed element is the largest, and it is also the element that will never be moved after sorting. That is, L[n-i:n] is arranged so as not to move. 2. The above rule loops for n-1 rounds, then the sorting is complete. Average Time Complexity: O(n**2) Space Complexity: O(1) stability: Yes Best Time Complexity: O(n) Worst Time Complexity: O(n**2) :param arr: :return: """ for i in range(len(arr)-1): for j in range(len(arr)-1-i): if arr[j] > arr[j+1]: arr[j],arr[j+1] = arr[j+1],arr[j] return arr
404439098a1549f5d2f0553320dd21a210f28ab7
27,706
def percent_change(old, new): """Computes the percent change from old to new: .. math:: percent_change = 100 \frac{new - old}{abs(old)} """ return float(new - old) / abs(old)
cff7fc4e75f082d1d3665e1feb167c6ad601fa47
27,707
def _get_bytes_from_pem_file(fpath: str) -> bytes: """Returns bytes from a pem file. """ with open(fpath, "rb") as f: return f.read()
aff762b165f536b6c1830313bd28f73594c7ff9a
27,708