content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def conv_bright_ha_to_lib(brightness) -> int: """Convert HA brightness scale 0-255 to library scale 0-16.""" if brightness == 255: # this will end up as 16 which is max brightness = 256 return int(brightness / 16)
45782f53a41605b20230c71c7e2ccf713d10c6dc
700,582
def TreeImportanceArray(reg): """Get important array if `reg` is tree regression.""" return reg.feature_importances_
59f3b0bcc9a4c60af71b62163e3e087137c64f49
700,586
def get_middle_value(my_list): """Return the middle value from a list after sorting. :param list my_list: List of sortable values""" return sorted(my_list)[len(my_list) // 2] """Convert an integer resolution in base-pairs to a nicely formatted string. :param int window_size: Integer resolution in base pairs. :returns: Formatted resolution.""" return utils.format_genomic_distance(window_size, precision=0)
53e8724f1709429707766db45c34ebd0db62a686
700,588
def _params_to_ints(qs): """Convert a (string) list of string ids to a list of integers""" return [int(str_id) for str_id in qs.split(',')]
7568eeb6cf28f1c8e696e2fab6e7a89dcbbc07fc
700,593
def compose(f, g): """ Compose two filter f and g. :param f: Outer filter function. :type f: filter function. :param g: Inner filter function. :type g: filter function. :return: lambda x: f(g(x)) :rtype: filter function. """ def filter_fn(df): df = g(df) if len(df) > 0: return f(df) else: return df return filter_fn
d90cda63eb365219ce5f454265036c7d977da216
700,595
def stop_list_to_link_list(stop_list): """ [a, b, c, d] -> [(a,b), (b,c), (c,d)] """ return list(zip(stop_list[:-1], stop_list[1:]))
49046e60664cd9c19ab55c1254684932d937531a
700,596
def segmentPlaneIntersection(s0 = "const Dim<3>::Vector&", s1 = "const Dim<3>::Vector&", point = "const Dim<3>::Vector&", normal = "const Dim<3>::Vector&", tol = ("const double", "1.0e-8")): """Intersection of a line segment with a plane. The line segment is characterized by it's endpoints: seg = (s0, s1) The plane is characterized by a point in the plane and a unit normal: plane (point, normal) Return values are a tuple<char, Vector> The Vector is the intersection point (if any) The char is a code characterizing the intersection: "p" -> The segment lies in the plane (plane) "d" -> The p points do not define a unique plane (degenerate) "1" -> The segment intersects the plane properly "0" -> The segment does not intersect the plane""" return "py::tuple"
eb68974937de575702069565fc3ee34dfadf89cc
700,599
def constant_schedule_with_warmup(epoch, warmup_epochs=0, lr_start=1e-4, lr_max=1e-3): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between {lr_start} and {lr_max}. """ if epoch < warmup_epochs: lr = (lr_max - lr_start) / warmup_epochs * epoch + lr_start else: lr = lr_max return lr
e780c3946f0207f94065c3ce8333896c3513f25a
700,604
def lower_first(string): """Return a new string with the first letter capitalized.""" if len(string) > 0: return string[0].lower() + string[1:] else: return string
fc6fba78d15633f1ab21105fbd46883797444fb1
700,609
def make_default_config(project): """ Return a default configuration for exhale. **Parameters** ``project`` (str) The name of the project that will be searched for in ``testing/projects/{project}``. **Return** ``dict`` The global default testing configuration to supply to ``confoverrides`` with ``@pytest.mark.sphinx``, these are values that would ordinarily be written in a ``conf.py``. """ return { "breathe_projects": { project: "./_doxygen/xml" }, "breathe_default_project": project, "exhale_args": { # required arguments "containmentFolder": "./api", "rootFileName": "{0}_root.rst".format(project), "rootFileTitle": "``{0}`` Test Project".format(project), "doxygenStripFromPath": "..", # additional arguments "exhaleExecutesDoxygen": True, "exhaleDoxygenStdin": "INPUT = ../include" } }
7b913634f0df656a870d4886cf29391727eb4b21
700,612
def to_conll_iob(annotated_sentence): """ `annotated_sentence` = list of triplets [(w1, t1, iob1), ...] Transform a pseudo-IOB notation: O, PERSON, PERSON, O, O, LOCATION, O to proper IOB notation: O, B-PERSON, I-PERSON, O, O, B-LOCATION, O """ proper_iob_tokens = [] for idx, annotated_token in enumerate(annotated_sentence): tag, word, ner = annotated_token if ner != 'O': if idx == 0: ner = "B-" + ner elif annotated_sentence[idx - 1][2] == ner: ner = "I-" + ner else: ner = "B-" + ner proper_iob_tokens.append((tag, word, ner)) return proper_iob_tokens
92fd0904782d241c9729df8a167840e38dfde605
700,615
import unicodedata import re def normalize(text): """ Normalizes text before keyword matching. Converts to lowercase, performs KD unicode normalization and replaces multiple whitespace characters with single spaces. """ return unicodedata.normalize('NFKD', re.sub(r'\s+', ' ', text.lower()))
c03a3148d39161cfd5751a306ca842362c46fb28
700,617
def getCharsSegments(rows, bounds): """ Gets the char segments of text row images acording to chars bounds. rows and bounds are list of the same matching sizes. Parameters ---------- rows(list) : The list of segmented text row images bounds(list) : Bounds matching chars iamges sizes in a text rows Returns ------- list : List of segment char images in text row images """ charsSegments = [] boundsIndex = 0 for row in rows: rowCharBounds = bounds[boundsIndex] rowCharsSegments = [] for charBound in rowCharBounds: rowCharsSegments += [ row[(charBound[0]):(charBound[1]),(charBound[2]):(charBound[3])] ] boundsIndex += 1 charsSegments += [rowCharsSegments] rowCharsSegments = [] return charsSegments
d9a14ec47e5bc384cb2df1d5e69fc41613dd9d29
700,625
import json def load_cat_to_name(json_path): """Load Label Name from JSON file.""" with open(json_path, 'r') as f: cat_to_name = json.load(f) return cat_to_name
43c30e9dbe29e29d507a873e6cc37f22bc7984c4
700,626
import typing def _consts(fn: typing.Callable) -> tuple: """ Returns a tuple of the function's constants excluding the docstring. """ return tuple(x for x in fn.__code__.co_consts if x != fn.__doc__)
bd60a35cf5243fd158d6e65df6e0a16f6fd9051b
700,628
def x_y_to_name(x, y) -> str: """ Make name form x, y coords Args: x: x coordinate y: y cooridante Returns: name made from x and y """ return f"{x},{y}"
40ae99a789fdf029407ab0290ae58e85e911833c
700,630
def _lvl_error(level): """Get the lng/lat error for the hilbert curve with the given level On every level, the error of the hilbert curve is halved, e.g. - level 0 has lng error of +-180 (only one coding point is available: (0, 0)) - on level 1, there are 4 coding points: (-90, -45), (90, -45), (-90, 45), (90, 45) hence the lng error is +-90 Parameters: level: int Level of the used hilbert curve Returns: Tuple[float, float]: (lng-error, lat-error) for the given level """ error = 1 / (1 << level) return 180 * error, 90 * error
52f8653252de1120d34c9a0377e85c07111874d6
700,632
def atmDensPoly6th(ht, dens_co): """ Compute the atmosphere density using a 6th order polynomial. This is used in the ablation simulation for faster execution. Arguments: ht: [float] Height above sea level (m). dens_co: [list] Coeffs of the 6th order polynomial. Return: atm_dens: [float] Atmosphere neutral mass density in kg/m^3. """ # Compute the density rho_a = 1000*(10**(dens_co[0] + dens_co[1]*(ht/1000) + dens_co[2]*(ht/1000)**2 + dens_co[3]*(ht/1000)**3 + dens_co[4]*(ht/1000)**4 + dens_co[5]*(ht/1000)**5)) return rho_a
653b134d513c3fd9b55e72ee37a4c7116aadf8cf
700,634
from typing import Union from typing import Tuple def get_rationed_resizing( resized: Union[int, float], length: int, other_length: int ) -> Tuple[int, int]: """ Get resized lengths for `length` and `other_length` according to the ratio between `resized` and `length`. Parameters ---------- resized : int or float Already resized length. If float, it is the ratio. length : int Non-resized length related to `resized`. other_length : int Other non-resized length to resize according the ratio. Returns ------- resized : int First resized length according ratio. other_resized : int Other resized length according ratio. """ ratio = resized if type(resized) == float else resized / length resized = resized if type(resized) == int else round(ratio * length) other_resized = round(ratio * other_length) return resized, other_resized
dde1ce579c192178090fe07c145fd3e153d92599
700,641
import collections def reorder(fields, order, key): """ Reorders `fields` list sorting its elements in order they appear in `order` list. Elements that are not defined in `order` list keep the original order. :param fields: elements to be reordered :param order: iterable that defines a new order :param key: a function of one argument that is used to extract a comparison key from each element in `fields` :return: reordered elements list """ ordered = collections.OrderedDict() for field in fields: ordered[key(field)] = field for ord in reversed(order or ()): ordered.move_to_end(ord, last=False) return ordered.values()
48eb32841f5abec38c74a8ae88615a7c39d3087c
700,643
def extCheck( extention: str ) -> str: """ Ensures a file extention includes the leading '.' This is just used to error trap the lazy programmer who wrote it. :param extention: file extention :type extention: str :return: Properly formatted file extention :rtype: str """ if extention[ 0 ] != '.': extention = '.' + extention return extention
7055c23cd8d2fa0e74dd916aa6d06d9547d49b7f
700,644
import logging def load_data(in_file, max_example=None, relabeling=True): """ load CNN / Daily Mail data from {train | dev | test}.txt relabeling: relabel the entities by their first occurence if it is True. """ documents = [] questions = [] answers = [] num_examples = 0 f = open(in_file, 'r') while True: line = f.readline() if not line: break question = line.strip().lower() answer = f.readline().strip() document = f.readline().strip().lower() if relabeling: q_words = question.split(' ') d_words = document.split(' ') assert answer in d_words entity_dict = {} entity_id = 0 for word in d_words + q_words: if (word.startswith('@entity')) and (word not in entity_dict): entity_dict[word] = '@entity' + str(entity_id) entity_id += 1 q_words = [entity_dict[w] if w in entity_dict else w for w in q_words] d_words = [entity_dict[w] if w in entity_dict else w for w in d_words] answer = entity_dict[answer] question = ' '.join(q_words) document = ' '.join(d_words) questions.append(question) answers.append(answer) documents.append(document) num_examples += 1 f.readline() if (max_example is not None) and (num_examples >= max_example): break f.close() logging.info('#Examples: %d' % len(documents)) return (documents, questions, answers)
da81dcc56469aaccee3da36d1451ac8eaeb4a2b7
700,645
import copy def delete_items(dictionary, key_list): """ This function performs a deep copy of a dictionary, checks if the specified keys are included in the copy, and deletes key-value pairs accordingly. Parameters: dictionary (dict): a dictionary key_list (list): a list of the keys to be deleted Returns: dict_copy (dict): dictionary with key-value pairs removed """ dict_copy = copy.deepcopy(dictionary) for k in key_list: if k in dict_copy.keys(): del(dict_copy[k]) return dict_copy
45fb652661387ca5d40aea443c1d4a82f74db5bd
700,646
def backends_mapping(custom_backend, private_base_url, lifecycle_hooks): """ Create 2 separate backends: - path to Backend 1: "/echo-api" - path to Backend 2: "/httpbin" """ return {"/echo-api": custom_backend("backend_one", endpoint=private_base_url("echo_api"), hooks=lifecycle_hooks), "/httpbin": custom_backend("backend_two", endpoint=private_base_url("httpbin_go"), hooks=lifecycle_hooks)}
b98fb61afc00efc902e5bd511fefcea6727a7125
700,648
def support(shape1, shape2, direction): """Find support for the Minkowski difference in the given direction. PARAMETERS ---------- shape1, shape2: Shape The inputs for Minkowski difference. `shape1` is subtracted from `shape2`. direction: Point The direction for finding the support. RETURN ------ : Point Support for Minkowski difference in the given direction. """ return shape1.support(direction) - shape2.support(-direction)
4b9292116c9447549f36099d4a6928c4c6e74e28
700,650
from pathlib import Path def is_empty(path: Path) -> bool: """Checks if a directory has files Args: path: The path to the directory to check Returns: bool: True if the dir is empty, False if it contains any files """ return not any(path.iterdir())
b99045eee29922c7ef2e91cd1b8b71ab54181e1e
700,652
def partition(groups, train_part=0.8, val_part=0.1, test_part=0.1): """Splits groups into training, validation, and test partitions. Args: groups (list): list of units (e.g. dicts). train_part (float): proportion in [0, 1] of units for training. val_part (float): self-explanatory. test_part (float): self-explanatory. Returns: lists of data-chunks for each. """ assert train_part + val_part + test_part == 1. total_size = len(groups) train_part_end = int(total_size * train_part) val_part_end = train_part_end + int(total_size * val_part) train_groups = groups[:train_part_end] val_groups = groups[train_part_end:val_part_end] if test_part == 0.: val_groups += groups[val_part_end:] test_groups = [] else: test_groups = groups[val_part_end:] return train_groups, val_groups, test_groups
c2cf56e54809a7c8c3a75c8b8bdbb3764aa9b988
700,655
import csv def get_first_last_onset(csv_file): """Gets the first and last onset times.""" with open(csv_file) as f: full_track = list(csv.reader(f)) first_onset = float(full_track[0][0]) if first_onset < 0: first_onset = abs(first_onset) # we only store the first onset if it is # negative (i.e., starts in an upbeat) else: first_onset = 0 last_onset = float(full_track[-1][0]) return first_onset, last_onset
d567530abf15fa5e256f7f826a813ad5982e3b0e
700,656
def __check_if_alive(processes): """ Quickly check if at least one of the list of processes is alive. Returns True if at least one process is still running. """ c = set([x.exitcode for x in processes]) return None in c
22458fa4b2ca07fe8c1c21a60eac87d6546f8abd
700,657
import glob def test_data_list() -> list: """Return the list of paths to the test input data files Parameters: None Returns: list: The list of paths to the test input data files """ return glob.glob("test_data/*.rws.gz")
da3362910d0727fb21f433b34a77ad437ea0ccb1
700,658
def route_vm_logic(logicFn): """ Decorates a function to indicate the viewmodel logic that should be executed after security checks and business logic passes. :param logicFn: The viewmodel logic function to assign. :return: The decorated function. """ def decorator(fn): fn.route_vm_logic = logicFn return fn return decorator
6dcbd274bd35b34f9faffb455d121940821cfa04
700,659
def draw_turn(row, column, input_list, user): """ Draw the game board after user typing a choice. Arguments: row -- the row index. column -- the column index. input_list -- a two dimensional list for game board. user -- the user who type the choice Returns: input_list -- a two dimensional list for game board after changed. """ mark_dict = {'player1':'X', 'player2':'O'} input_list[row-1][column-1] = mark_dict[user] return input_list
6f44e770a2fa04b5992ffd21cad5a799c3423de5
700,660
import socket def is_open_port(port): """ Check if a port is open (listening) or not on localhost. It returns true if the port is actually listening, false otherwise. :param port: The port to check. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex(('127.0.0.1',port)) return result == 0
4eb8f52744cc7f330dd101b613d5db4ab8d0d0fc
700,661
def median(data_list): """ Finds the median in a list of numbers. :type data_list list """ data_list = list(map(float, data_list)) length = len(data_list) data_list.sort() # Test whether the length is odd if length & 1: # If is is, get the index simply by dividing it in half index = int(length / 2) return data_list[index] # If the length is even, average the two values at the center low_index = int(length / 2) - 1 high_index = int(length / 2) average = (data_list[low_index] + data_list[high_index]) / 2 return average
c41a1336d17e5e991101bd91c9c56cb59624dc82
700,664
import json def body_part_to_headers_and_data(part): """ convert part (of multi-part body) to headers dict and content. de-serializes json if content-type is application/json. :param part: BodyPart decoded by MultipartDecoder :return: tuple pair of headers dict and content """ if b'application/json' in part.headers[b'Content-Type'].lower(): return part.headers, json.loads(part.text) return part.headers, part.text
edd95f6dba9f4157c6a51b2dd6a6c4fb8a34e9db
700,670
def get_utm_zone(lon): """ Calculate UTM zone. Arguments: lon: float Longitude, in degrees. West: negative, East: positive. Returns: zone: int UTM zone number. """ zone = int(1+(lon+180.0)/6.0) return zone
cf8c0d596f146417ebf0d3a18cd0b70e825388aa
700,671
def get_num_params(vocab_size, num_layers, num_neurons): """Returns the number of trainable parameters of an LSTM. Args: vocab_size (int): The vocabulary size num_layers (int): The number of layers in the LSTM num_neurons (int): The number of neurons / units per layer Returns: int: The number of trainable parameters """ num_first_layer = 4 * (num_neurons * (vocab_size + num_neurons) + num_neurons) num_other_layer = 4 * (num_neurons * 2 * num_neurons + num_neurons) num_softmax = vocab_size * num_neurons + vocab_size return num_first_layer + (num_layers - 1) * num_other_layer + num_softmax
c9620e74206878cc3390895dacbf10c84da42829
700,672
def convert_to_nullable(input_val, cast_function): """For non-null input_val, apply cast_function and return result if successful; for null input_val, return None. Args: input_val (Any): The value to attempt to convert to either a None or the type specified by cast_function. The recognized null values are '.', None, '', and 'NULL' cast_function (Callable[[Any], Any]): A function to cast the input_val to some specified type; should raise an error if this cast fails. Returns: None if input is the null value. An appropriately cast value if input is not null and the cast is successful. Raises: Error: whatever error is provided by cast_function if the cast fails. """ if input_val in ['.', None, '', 'NULL']: result = None else: result = cast_function(input_val) return result
ba12f32d2bcced066257788188a2a9d91fcfec37
700,674
import math def odd_improvement(lst): """Calculates the improvement of odds compared to their base values. The higher above 0, the more the odds improved from base-value. The lower under 0, the more the odds deteriorated. Used https://en.wikipedia.org/wiki/Logit as a source for this formula. """ base_probability = lst[0] current_probability = lst[1] improvement = math.log(current_probability / (1 - current_probability)) - math.log( base_probability / (1 - base_probability)) return improvement
4f8607d452fc96b57c9573ed0b07bd5a38791876
700,678
def nearest(items, pivot): """Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot` """ return min(items, key=lambda x: abs(x - pivot))
0f8766e5680b3b271876a80055b99312bde8366f
700,679
def CODE(string): """ Returns the numeric Unicode map value of the first character in the string provided. Same as `ord(string[0])`. >>> CODE("A") 65 >>> CODE("!") 33 >>> CODE("!A") 33 """ return ord(string[0])
0f680fe1e45156c00d0a5839e24f1619a456773f
700,680
def format_call(__fn, *args, **kw_args): """ Formats a function call, with arguments, as a string. >>> format_call(open, "data.csv", mode="r") "open('data.csv', mode='r')" @param __fn The function to call, or its name. @rtype `str` """ try: name = __fn.__name__ except AttributeError: name = str(__fn) args = [ repr(a) for a in args ] args.extend( n + "=" + repr(v) for n, v in kw_args.items() ) return "{}({})".format(name, ", ".join(args))
0dce4bf0166f59f810063596f872b9f641f84234
700,684
def _get_value_pos(line, delim): """ Finds the first non-whitespace character after the delimiter Parameters: line: (string) Input string delim: (string) The data delimiter """ fields = line.split(delim, 1) if not len(fields) == 2: raise Exception(f"Expected a '{delim}' delimited field. Actual: {line}") return len(line) - len(fields[1].lstrip())
8337c92045f2d3ccb91479502b30c7d191e53f34
700,687
def format_size(size): """格式化大小 >>> format_size(10240) '10.00K' >>> format_size(1429365116108) '1.3T' >>> format_size(1429365116108000) '1.3P' """ if size < 1024: return '%sB' % size elif size < 1024 **2: return '%.2fK' % (float(size) / 1024) elif size < 1024 ** 3: return '%.2fM' % (float(size) / 1024 ** 2) elif size < 1024 ** 4: return '%.2fG' % (float(size) / 1024 ** 3) elif size < 1024 ** 5: return '%.2fT' % (float(size) / 1024 ** 4) else: return "%.2fP" % (float(size) / 1024 ** 5)
66aa2301350def395e32bae87dabccc18a126786
700,689
import getpass def get_username() -> str: """ Returns username lowercase >>> username = get_username() >>> assert len(username) > 1 """ _username = getpass.getuser().lower() return _username
aa7c5d2974502bd411cd1a77218ca74171d3dc71
700,691
def extract_id(source): """ Attempts to extract an ID from the argument, first by looking for an attribute and then by using dictionary access. If both fail, the argument is returned. """ try: return source.id except AttributeError: pass try: return source["id"] except (TypeError, KeyError) as err: pass return source
7ec169cfd6edf70c9d414ec61edc3bf514a80e02
700,692
import shutil def pytest_report_header(config): """Add header information for pytest execution.""" return [ "LAMMPS Executable: {}".format( shutil.which(config.getoption("lammps_exec") or "lammps") ), "LAMMPS Work Directory: {}".format( config.getoption("lammps_workdir") or "<TEMP>" ), ]
dc07ae457cc49a1fc1ac43643a193bb3b6a84399
700,693
def not_in(a, b): """Evalutes a not in b""" result = False if a in b else True return result
bf5f2fd22a48f4ba517c75de2d05c11ab585756b
700,700
def encode_imsi(imsi): """ Convert a IMSI string to a uint + length. IMSI strings can contain two prefix zeros for test MCC and maximum fifteen digits. Bit 1 of the compacted uint is always 1, so that we can match on it set. Bits 2-3 the compacted uint contain how many leading 0's are in the IMSI. For example, if the IMSI is 001010000000013, the first bit is 0b1, the second two bits would be 0b10 and the remaining bits would be 1010000000013 << 3 Args: imsi - string representation of imsi Returns: int representation of imsi with padding amount at end """ if imsi.startswith('IMSI'): imsi = imsi[4:] # strip IMSI off of string prefix_len = len(imsi) - len(imsi.lstrip('0')) compacted = (int(imsi) << 2) | (prefix_len & 0x3) return compacted << 1 | 0x1
31d025168ba2421be2b235a049983ee331437ffd
700,701
import typing import itertools def generate_fixed_permutations( base_name: str, locations: typing.List[str], perm_length: int, ) -> typing.List[typing.List[str]]: """ Generate path permutations of a specified length which always start and end at base_name. :param base_name: Name of base to begin from. :param locations: List of locations that can be visited. :param perm_length: Length of the trip in stops. :return: List of all possible paths of specified length. """ location_perms = itertools.permutations(locations, perm_length) return [[base_name] + list(perm) + [base_name] for perm in location_perms]
61c03cb166ca4dc7691d3c09eaedf26b5ff3c288
700,705
import typing import pathlib import hashlib from pathlib import Path def md5(p: typing.Union[pathlib.Path, str], bufsize: int = 32768) -> str: """ Compute md5 sum of the content of a file. """ hash_md5 = hashlib.md5() with Path(p).open('rb') as fp: for chunk in iter(lambda: fp.read(bufsize), b''): hash_md5.update(chunk) return hash_md5.hexdigest()
2399177dca4d64de231287f7bd25346df8437dbd
700,707
def load_module(name): """load a module Args: name: python dotted namespace path of the module to import Returns: imported module Raises: FailedImport if importing fails """ m = __import__(name) # __import__('foo.bar') returns foo, so... for bit in name.split('.')[1:]: m = getattr(m, bit) return m
6cfcb58fccbf7d0c6de22a561312aff8931b8317
700,710
def uniquify(iterable): """ Make unique list while preserving order. """ unique = [] for entry in iterable: if entry not in unique: unique.append(entry) return unique
a579c5e4cf8b38213fbc9cdcac2c122586bab97a
700,712
from typing import Dict from typing import Any def create_default_metadata() -> Dict[str, Any]: """Creates a dictionary with the default metadata.""" return { 'title': 'Default title', 'base_url': 'https://example.org', 'description': 'Default description', 'language': 'en-US', 'copyright': 'Licensed under the ' '<a href="http://creativecommons.org/licenses/by/4.0/"> ' 'Creative Commons Attribution 4.0 International License.' }
2ee6aeffaafd209cb93e33bd42291ac3c12b10d8
700,714
import re def extract_authorization_token(request): """ Get the access token using Authorization Request Header Field method. Or try getting via GET. See: http://tools.ietf.org/html/rfc6750#section-2.1 Return a string. """ auth_header = request.META.get('HTTP_AUTHORIZATION', '') if re.compile(r'^[Bb]earer\s{1}.+$').match(auth_header): access_token = auth_header.split()[1] else: access_token = request.GET.get('access_token', '') return access_token
9776df3ecd59ba3db15664259a6e65114ec61a07
700,716
from typing import List def _inv_shift_rows(s: List[List[bytes]]) -> List[List[bytes]]: """ Performs the inverted shift rows transformation as described in the standard :param s: the state matrix :return: the new state matrix with shifted rows """ s[0][1], s[1][1], s[2][1], s[3][1] = s[3][1], s[0][1], s[1][1], s[2][1] s[0][2], s[1][2], s[2][2], s[3][2] = s[2][2], s[3][2], s[0][2], s[1][2] s[0][3], s[1][3], s[2][3], s[3][3] = s[1][3], s[2][3], s[3][3], s[0][3] return s
bdb593e912275bfdf387334916123830e081ed50
700,720
def __args_to_weka_options(args): """ Function that creates list with options (args) in format approperiate for weka. :param args: dictionery with command line input :return: list of command line arguments """ result = [] for k,v in args.items(): if v: result.append("-" + k) result.append(v) elif v == "": result.append("-" + k) return result
1d480ffaf840ae67d805d7845684eef24d3da583
700,724
def zpadlist(values: list, inputtype: str, minval: int, maxval: int) -> list: """Return a list of zero padded strings and perform input checks. Returns a list of zero padded strings of day numbers from a list of input days. Invalid month numbers (e.g. outside of 1-31) will raise an exception. Parameters ---------- values: list(int) List of integers that will be zero-padded. inputttype: str String identifying the input data used in error messages. minval: int Minimum value that all elements in `values` are checked against. maxval: int Maximum value that all elements in `values` are checked against. Returns ------- list(str) List of zero-padded strings (e.g. ['01', '02',..., '31']). Raises ------ AssertionError If any value in the list is not within `minval<=value<=maxval`. """ returnlist = [] for value in values: assert (int(value) >= minval), ( 'invalid value specified for {}: {}'.format(inputtype, value)) assert (int(value) <= maxval), ( 'invalid value specified for {}: {}'.format(inputtype, value)) returnlist += [str(int(value)).zfill(2)] return returnlist
bcc06dfb36b93af69d031b44f64dfd3ee7d082c3
700,728
def _ExtStorageEnvironment(unique_id, ext_params, size=None, grow=None, metadata=None, name=None, uuid=None, snap_name=None, snap_size=None, exclusive=None): """Calculate the environment for an External Storage script. @type unique_id: tuple (driver, vol_name) @param unique_id: ExtStorage pool and name of the Volume @type ext_params: dict @param ext_params: the EXT parameters @type size: integer @param size: size of the Volume (in mebibytes) @type grow: integer @param grow: new size of Volume after grow (in mebibytes) @type metadata: string @param metadata: metadata info of the Volume @type name: string @param name: name of the Volume (objects.Disk.name) @type uuid: string @param uuid: uuid of the Volume (objects.Disk.uuid) @type snap_size: integer @param snap_size: the size of the snapshot @type snap_name: string @param snap_name: the name of the snapshot @type exclusive: boolean @param exclusive: Whether the Volume will be opened exclusively or not @rtype: dict @return: dict of environment variables """ vol_name = unique_id[1] result = {} result["VOL_NAME"] = vol_name # EXT params for pname, pvalue in ext_params.items(): result["EXTP_%s" % pname.upper()] = str(pvalue) if size is not None: result["VOL_SIZE"] = str(size) if grow is not None: result["VOL_NEW_SIZE"] = str(grow) if metadata is not None: result["VOL_METADATA"] = metadata if name is not None: result["VOL_CNAME"] = name if uuid is not None: result["VOL_UUID"] = uuid if snap_name is not None: result["VOL_SNAPSHOT_NAME"] = snap_name if snap_size is not None: result["VOL_SNAPSHOT_SIZE"] = str(snap_size) if exclusive is not None: result["VOL_OPEN_EXCLUSIVE"] = str(exclusive) return result
323c9fae9e6cbc1c1a107dd018bbe5f95520a8fe
700,737
import time def is_expired(epoch_time): """True if current time has passed the provided epoch_time""" return time.time() > epoch_time
b264fd1d73fe7f9c97592e6bffc27c81574d6bde
700,738
def stations_by_river(stations, river): """Takes a list of stations and returns a list of all the station names on a specific river in alphabetic order""" station_names = [] for station in stations: if station.river == river: station_names.append(station.name) station_names = sorted(station_names) return station_names
078e83affc54b90f2a58ad46cefdd895d9f8c1e6
700,740
import functools import inspect import six def map_arg(**maps): """ Apply a mapping on certains argument before calling the original function. Args: maps (dict): {key: map_func} """ def deco(func): @functools.wraps(func) def wrapper(*args, **kwargs): argmap = inspect.getcallargs(func, *args, **kwargs) for k, map_func in six.iteritems(maps): if k in argmap: argmap[k] = map_func(argmap[k]) return func(**argmap) return wrapper return deco
4b327d3167a6c9bd4da84671a661767db04bcb6b
700,742
import hashlib def create_hash(secret: str, url: str) -> str: """Create a hash of the secret and url.""" s = f'{secret}{url}' return str(hashlib.md5(s.encode()).hexdigest()[0:8])
891fcc45fe7706a984fb9282ab17887710e6da0a
700,745
def get_nth_digit(N, n): """ return the nth digit from an N digit number >>> get_nth_digit(12345, 3) 4 >>> get_nth_digit(12345, 7) Traceback (most recent call last): ... IndexError: string index out of range """ return int(str(N)[n])
25c01c14589fb091154e8509a84f98811946938f
700,747
def _strip_version(version): """Strip trailing characters that aren't digits or '.' from version names. Some OS versions look like "9.0gm", which is not useful for select() statements. Thus, we strip the trailing "gm" part. Args: version: the version string Returns: The version with trailing letters stripped. """ result = "" for ch in str(version): if not ch.isdigit() and ch != ".": break result += ch return result
483851b67347c2e23d1c625fc7bb925664f8e1e1
700,750
from unittest.mock import patch import builtins def patch_input(**kwargs): """A helper to provide mocked cm patching input function which was renamed in PY3""" return patch.object(builtins, 'input', **kwargs)
1f70b4b3507f914c5546fa823d6de084b3be8870
700,752
def _RGB2sRGB(RGB): """ Convert the 24-bits Adobe RGB color to the standard RGB color defined in Web Content Accessibility Guidelines (WCAG) 2.0 see https://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef for more references :param RGB: The input RGB color or colors shape== ... x 3 (R, G, and B channels) :type RGB: numpy.ndarray :return: converted sRGB colors :rtype: numpy.ndarray """ sRGB = RGB / 255 return sRGB
c2c5c25d64cc7b0cb9b004846b72aecada7c5d85
700,755
def method_authorizations(*scopes: str) -> dict: """ Return method security. Contains only one OAuth2 security. :param scopes: All scope names that should be available (as string). """ return {"security": [{"oauth2": scopes}]}
4bf2f93715b9798ef20119288178d69b3907e85e
700,758
import requests import json def list_tags(image, cli=False): """ Return a list of tags of a given Docker Hub image. Example: In : list_tags('google/debian') Out: ['jessie', 'wheezy'] In : list_tags('python') Out: ['31', 'rawhide', '30', '29', 'latest' ...] """ if cli: print("The image '{}' on Docker Hub got following tag(s):".format(image)) if image.find('/') == -1: image = 'library/'+image tags = [] page = 1 while True: url = "https://registry.hub.docker.com/v2/repositories/{}/tags/?page={}".format( image, page) request = requests.get(url) if request.status_code == 200: result = json.loads(request.text) for i in range(len(result["results"])): if cli: print(result["results"][i]["name"]) else: tags.append(result["results"][i]["name"]) page += 1 else: break if cli == False: return tags
aefa058e32b72911a0b97619edeee119914440b7
700,760
def get_slurm_script_gpu(output_dir, command): """Returns contents of SLURM script for a gpu job.""" return """#!/bin/bash #SBATCH -N 1 #SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-socket=1 #SBATCH --gres=gpu:tesla_p100:1 #SBATCH --cpus-per-task=4 #SBATCH --mem=64000 #SBATCH --output={}/slurm_%j.out #SBATCH -t 05:59:00 #module load anaconda3 cudatoolkit/10.0 cudnn/cuda-10.0/7.3.1 #source activate yumi {} """.format( output_dir, command )
3f9d587c2943cd821d000fab419d3591440c4d3d
700,761
import uuid import six def create_uuid3(namespace, name): """ Return new UUID based on a hash of a UUID namespace and a string. :param namespace: The namespace :param name: The string :type namespace: uuid.UUID :type name: six.text :return: :rtype: uuid.UUID """ return uuid.uuid3(namespace, six.ensure_str(name))
1a6898f80849a11f643a58798adc8a165e3b0e8d
700,763
def buildTypeTree(cls:type) -> dict: """ Return a tree of subclasses of a class Arguments: cls (type): Class from which to return descendants Returns: dict: Dict of all subclasses Example: buildTypeTree(MainClass) returns: { MainClass.SubClass1: { MainClass.SubClass1.SubClass11: {}, MainClass.SubClass1.SubClass12: {} }, MainClass.SubClass2: {} } """ typeTree = {} for subclass in cls.__subclasses__(): typeTree[subclass] = buildTypeTree(subclass) return(typeTree)
7937df4d9643f20c3e5379f84ef36a28226707ba
700,764
def isAVersionableResource(obj): """ True if an object is versionable. To qualify, the object must be persistent (have its own db record), and must not have an true attribute named '__non_versionable__'.""" if getattr(obj, '__non_versionable__', 0): return 0 return hasattr(obj, '_p_oid')
42dcd02b1f4e1c9f9ff555ec597d8011cfe64893
700,770
def fix_variable(problem, pivot, value): """ Return a new problem that is a copy of the one provided with the pivot variable set to value This function is used for branching, and prints the selection made. """ new_problem = problem.copy() new_problem['variables'] = problem['variables'].copy() new_problem['variables'][pivot] = value print(f'choosing: {pivot} {value}') return new_problem
40e7d358eff405c481aedd9d2b1505664fcd4d6e
700,771
def test_progress(arg1, arg2, kwd1, kwd2, progress): """Simple test target for submit_progress.""" return arg1, arg2, kwd1, kwd2
1d761c572b15c41e1a04aafbb53ca825792df8fe
700,772
def _get_shape_name(array_name, shape_name = None): """Either get shape name or create from array_name.""" return shape_name if shape_name else f'{array_name}/shape'
fe0065faa3e917bb6faef5189ec7ea85ed152c99
700,775
def getMinUnvisited(unvisited, dist): """ return the minimum distance vertex from the set of vertices not yet processed. Parameters: unvisited (set): the set containing all the vertex not yet processed dist (dict): a dictionary with vertex as key and the total distance from the source as value """ aux = {key: dist[key] for key in unvisited} minimum = min(aux.values()) for key in unvisited: if dist[key] == minimum: return key
5ccd7ab9e7e7b70c9aedecb56049332ae1f7b530
700,776
def _alloc_key(name): """Constructs allocation key based on app name/pattern.""" if '@' in name: key = name[name.find('@') + 1:name.find('.')] else: key = name[0:name.find('.')] return key
ca3182f52d780f94a6a18c51ad0b7d841ead20d1
700,778
def _derivative(f, a, method='central', h=0.01): """ Compute the difference formula for f'(a) with step size h. copied from: https://personal.math.ubc.ca/~pwalls/math-python/differentiation/differentiation/ Parameters ---------- f : function Vectorized function of one variable a : number Compute derivative at x = a method : string Difference formula: 'forward', 'backward' or 'central' h : number Step size in difference formula Returns ------- float Difference formula: central: f(a+h) - f(a-h))/2h forward: f(a+h) - f(a))/h backward: f(a) - f(a-h))/h """ if method == 'central': return (f(a + h) - f(a - h)) / (2 * h) elif method == 'forward': return (f(a + h) - f(a)) / h elif method == 'backward': return (f(a) - f(a - h)) / h else: raise ValueError("Method must be 'central', 'forward' or 'backward'.")
de02aaf132922c0be8aeb84bea5af5d09e850b9d
700,780
def insert_in_bst(root, node): """ Insert node in the binary search tree :param root: root node of the binary search tree :type root: TreeNode :param node: node to insert :type node: TreeNode :return: root node :rtype: TreeNode """ if root is None: root = node else: if root.val < node.val: if root.right is None: root.right = node else: insert_in_bst(root.right, node) else: if root.left is None: root.left = node else: insert_in_bst(root.left, node) return root
3c737d71c5793e7baa51d1ad75b6cc056abbda82
700,782
def format_bytes(b): """Format bytes as human-readable text.""" kb = 1024 mb = kb*1024 gb = mb*1024 if b < kb: return '%s b' % b elif b < mb: return '{0:.2f} kb'.format(float(b) / kb) elif b < gb: return '{0:.2f} mb'.format(float(b) / mb) else: return '{0:.2f} gb'.format(float(b) / gb)
4c41105449a8a07e3aca932d9ab3326176f6f1f6
700,783
def splice_before(base, search, splice, post_splice="_"): """Splice in a string before a given substring. Args: base: String in which to splice. search: Splice before this substring. splice: Splice in this string; falls back to a "." if not found. post_splice: String to add after the spliced string if found. If only a "." is found, ``post_splice`` will be added before ``splice`` instead. Defaults to "_". Returns: ``base`` with ``splice`` spliced in before ``search`` if found, separated by ``post_splice``, falling back to splicing before the first "." with ``post_splice`` placed in front of ``splice`` instead. If neither ``search`` nor ``.`` are found, simply returns ``base``. """ i = base.rfind(search) if i == -1: # fallback to splicing before extension i = base.rfind(".") if i == -1: return base else: # turn post-splice into pre-splice delimiter, assuming that the # absence of search string means delimiter is not before the ext splice = post_splice + splice post_splice = "" return base[0:i] + splice + post_splice + base[i:]
f8f5bf3c2355c38d16157836863e501cbc846d40
700,787
def text_from_doc_list(doc_list): """ extract a text from list of html elements. """ return [doc.text for doc in doc_list]
09370e07fc34c481091a5d34683e3668da12f5a4
700,789
def check_file_isvid(filename): """ checks if a file has a video extension, accepted files are: '.mp4', '.mpg', '.avi' :param filename: (str) name of the file :return: (bool) """ list_extensions = ['.mpg', '.MPG', '.mp4', '.MP4', '.AVI', '.avi'] if filename[-4:] in list_extensions: return True else: return False
5762f9020bce682b7eda948a92a41e85dedfe5c2
700,793
def get_attr_lookup(lines, attr_name): """ :arg lines: a list of :class:`TextLine` instances :arg attr_name: A string, e.g. ``"y0"``, an attribute of :class:`TextLine` :returns: A dictionary of strings mapping values of the given attribute to lists of :class:`TextLine` sharing that attribute. This function can be used to identify lines of text or rows in a table. Note that it relies on *exactly* matching coordinates. """ result = {} for l in lines: result.setdefault(getattr(l, attr_name), []).append(l) return result
3f87431edeb11e9edfe824bf58aeda93ad82d8ae
700,797
def encode_function_data(initializer=None, *args): """Encodes the function call so we can work with an initializer. Args: initializer ([brownie.network.contract.ContractTx], optional): The initializer function we want to call. args (Any, optional): The arguments to pass to the initializer function Returns: [bytes]: Return the encoded bytes. """ if not len(args): args = b"" if initializer: return initializer.encode_input(*args) return b""
303c297d8ea2b62d3ecb6ccc1e208fc54dd84e49
700,798
def subtract(x, y): """ Subtracts two number :param x: minuend :param y: subtrahend :return: difference between two numbers """ return x - y
cb9165d72a3aa0ec7b82f0ad89e53bcf8beffa3a
700,800
from typing import Dict def file_name_convention() -> Dict: """ This function returns the file name taxonomy which is used by ImageAutoOutput and Dataset class """ file_name_convention = {"CT": "image", "MR": "image", "RTDOSE_CT": "dose", "RTSTRUCT_CT": "mask_ct.seg", "RTSTRUCT_MR": "mask_mr.seg", "RTSTRUCT_PT": "mask_pt.seg", "PT_CT": "pet", "PT": "pet", "RTDOSE": "dose", "RTSTRUCT": "mask.seg"} return file_name_convention
f3c56306c8dd0f3c228064e8a72bef51b70a4d93
700,801
def mcb(l, bit, mlb, tiebreaker = "1"): """ l = list of bits, e.g. ["00100", "11110", "10110"] bit = index of the bit to consider, integer mlb = most ("1") or least ("0") bit tiebreaker = if there's an even split, default to this value. returns the most common occurrencs, subject to tiebreakers, "0" or "1" as a string """ s = 0 exact_split = len(l) / 2 for i in l: if i[bit] == "1": s += 1 if s == exact_split: return tiebreaker elif s > exact_split: return mlb else: return str(1 - int(mlb))
9256db43f5564ac62f8f83a27332a408a496fc3e
700,804
import logging def get_query_for_oracle_load_full(table_name, columns, owner): """ JDBC query for full ingestion of one table """ logging.info(f"BUILDING FULL QUERY for {table_name}") select_cols = ",".join(str(x) for x in columns) return f"select {select_cols} from {owner}.{table_name}"
e91497cae2cf5804c89b063e77943694397a2d62
700,806
def _inches_to_meters(length): """Convert length from inches to meters""" return length * 2.54 / 100.0
fccd2937b87c7b1c7eba793b66b4b8573de1e472
700,808
def area(box): """Calculates area of a given bounding box.""" return float((box[1][0]-box[0][0]) * (box[1][1] - box[0][1]))
3c2fac0d92c8b9cc05dff3cba59d2a83670392e0
700,809
def insert_clause(table_name, keys): """ Create a insert clause string for SQL. Args: table_name: The table where the insertion will happen. keys: An iterator with strings specifying the fields to change. Returns: The query as a string """ fields = list(keys) fields_str = ', '.join(fields) values_str = ', '.join(['?']*len(fields)) query = 'INSERT INTO {} ({}) VALUES ({})'.format(table_name, fields_str, values_str) return query
7c57bff8dec2242ed2ba1c7efa7543296ce1242f
700,813
def choose_pivot_first(_: list[int], left: int, __: int) -> int: """Choose first element as pivot""" return left
931c7a182feda076213ec85f2d7909e7ff2e87cb
700,816
def retag_from_strings(string_tag) : """ Returns only the final node tag """ valure = string_tag.rfind('+') if valure!= -1 : tag_recal =string_tag [valure+1:] else : tag_recal = string_tag return tag_recal
5bef884498efb19eb354bb6119450c9c25a19e1c
700,817
import pathlib def get_summit_config_path(config_dir_name=".summit"): """Returns the path to the summit config directory""" home = pathlib.Path.home() return home / config_dir_name
af89240c29b440d52e41676c07cd97fa642d288d
700,819
def merge_coco_results(existing_coco_results, new_coco_results, image_id_offset): """ Merges the two given coco result dicts into one. :param existing_coco_results: A dict describing the first coco results. :param new_coco_results: A dict describing the second coco results. :return: A dict containing the merged coco results. """ for res in new_coco_results: res['image_id'] += image_id_offset existing_coco_results += new_coco_results return existing_coco_results
78b8efe19b3f540b6b0943cacc7207a746232faf
700,820
def process_regex(regex): """ This function parse a regex string into a dictionary of fields and regexes Format: <field1> -> <regex1> <field2 -> <regex2> etc.""" res_dict = {} lines = regex.split("\n") for l in lines: tok = l.split("->") if len(tok) != 2: continue field = tok[0].strip(" \t\n\r") rg = tok[1].strip(" \t\n\r") res_dict[field] = rg return res_dict
ad8fb6cc1d2713de53442ce9c9defbe2a45da0a5
700,821
import logging def create_table(dynamodb, table_name, partition_key, sort_key={}, rcu=15, wcu=5): """ Purpose: Create an DynamoDB Table by name Args: dynamodb (DynamoDB Resource Object): DynamoDB Object owning the Table table_name (String): Name of table to return partition_key (Dict): Dict with name and type of the partition key e.g. {"name": "name_of_partition_key", "type": "S"} sort_key (Dict): Dict with name and type of the sort key e.g. {"name": "name_of_sort_key", "type": "S"} rcu (Int): Read Capacity Units for the table. Defaults to 15 wcu (Int): Write Capacity Units for the table. Defaults to 5 Return: table (DynamoDB Table Object): Created Table Object """ logging.info(f"Creating Table {table_name} with RCU={rcu} and WCU={wcu}") key_schema = [] attribute_definitions = [] key_schema.append({"AttributeName": partition_key["name"], "KeyType": "HASH"}) attribute_definitions.append( {"AttributeName": partition_key["name"], "AttributeType": partition_key["type"]} ) if sort_key: key_schema.append({"AttributeName": sort_key["name"], "KeyType": "RANGE"}) attribute_definitions.append( {"AttributeName": sort_key["name"], "AttributeType": sort_key["type"]} ) logging.info(f"Key Schema: {key_schema}") logging.info(f"Attribute Definitions: {attribute_definitions}") try: table = dynamodb.create_table( TableName=table_name, KeySchema=key_schema, AttributeDefinitions=attribute_definitions, ProvisionedThroughput={"ReadCapacityUnits": rcu, "WriteCapacityUnits": wcu}, ) except Exception as err: logging.exception(f"Exception Creating Table: {err}") raise return table
192a6a0d643d6bf6604d91517bc37a76cf61a9bd
700,824
def is_rm_textfile(filename): """Returns True if the given filename is a known remarkable-specific textfile.""" if filename.endswith('.json'): return True if filename.endswith('.content'): return True if filename.endswith('.pagedata'): return True if filename.endswith('.bookm'): return True return False
fd2d05fb1900d432c63d9b2bad0b802e5e00c601
700,825