content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def bool_tag(name, value):
"""Create a DMAP tag with boolean data."""
return name.encode('utf-8') + \
b'\x00\x00\x00\x01' + \
(b'\x01' if value else b'\x00') | dc914d262a20eed0732e477f75641daa4811fd9f | 699,812 |
def _concat(*lists):
"""Concatenates the items in `lists`, ignoring `None` arguments."""
concatenated = []
for list in lists:
if list:
concatenated += list
return concatenated | a1eea1c074fe1eee1ca454899bf9dec2719a333e | 699,813 |
def get_least_squares_size(modelform, r, m=0, affines=None):
"""Calculate the number of columns in the operator matrix O in the Operator
Inference least-squares problem.
Parameters
---------
modelform : str containing 'c', 'A', 'H', 'G', and/or 'B'
The structure of the desired reduced-order model. Each character
indicates the presence of a different term in the model:
'c' : Constant term c
'A' : Linear state term Ax.
'H' : Quadratic state term H(x⊗x).
'G' : Cubic state term G(x⊗x⊗x).
'B' : Input term Bu.
For example, modelform=="AB" means f(x,u) = Ax + Bu.
r : int
The dimension of the reduced order model.
m : int
The dimension of the inputs of the model.
Must be zero unless 'B' is in `modelform`.
affines : dict(str -> list(callables))
Functions that define the structures of the affine operators.
Keys must match the modelform:
* 'c': Constant term c(µ).
* 'A': Linear state matrix A(µ).
* 'H': Quadratic state matrix H(µ).
* 'G': Cubic state matrix G(µ).
* 'B': linear Input matrix B(µ).
For example, if the constant term has the affine structure
c(µ) = θ1(µ)c1 + θ2(µ)c2 + θ3(µ)c3, then 'c' -> [θ1, θ2, θ3].
Returns
-------
ncols : int
The number of columns in the Operator Inference least-squares problem.
"""
has_inputs = 'B' in modelform
if has_inputs and m == 0:
raise ValueError(f"argument m > 0 required since 'B' in modelform")
if not has_inputs and m != 0:
raise ValueError(f"argument m={m} invalid since 'B' in modelform")
if affines is None:
affines = {}
qc = len(affines['c']) if 'c' in affines else 1 if 'c' in modelform else 0
qA = len(affines['A']) if 'A' in affines else 1 if 'A' in modelform else 0
qH = len(affines['H']) if 'H' in affines else 1 if 'H' in modelform else 0
qG = len(affines['G']) if 'G' in affines else 1 if 'G' in modelform else 0
qB = len(affines['B']) if 'B' in affines else 1 if 'B' in modelform else 0
return qc + qA*r + qH*r*(r+1)//2 + qG*r*(r+1)*(r+2)//6 + qB*m | 86cf6a0b3e4b256eaccb3f061d21f7de74dcc604 | 699,814 |
def read(rows):
"""Reads the list of rows and returns the sudoku dict.
The sudoku dict maps an index to a known value. Unknown values are not written.
Indices go from 0 to 80.
"""
sudoku = {}
i = 0
for rn, row in enumerate(rows):
if rn in (3, 7):
continue
j = 0
for cn, c in enumerate(row.rstrip()):
if cn in (3, 7):
continue
if c != ".":
sudoku[i * 9 + j] = int(c)
j += 1
i += 1
return sudoku | 1f1a06a32d1be70f3d912bd42b9cca07f7d4879d | 699,815 |
def uniquify(iterable):
"""Remove duplicates in given iterable, preserving order."""
uniq = set()
return (x for x in iterable if x not in uniq and (uniq.add(x) or True)) | 563953cc6450a0136a4996d4a5f5a0057f6ad69b | 699,817 |
def badFormatting(s, charSet):
"""Tells if a character from charSet appears in a string s."""
for c in charSet:
if c in s:
return True
return False | 23baba28be306e0d0c1ccaa0df48e1a9f94bdc8c | 699,823 |
def column_to_width(df_in, column, width):
"""Pad the column header and the values in the column with whitespace to a
specific width.
"""
df = df_in.copy()
df[column] = df[column].apply(lambda x: ('{:>' + str(width) + '}').format(x))
df = df.rename(columns={column: ('{:>' + str(width) + '}').format(column)})
return df | 988f021c7ff2f296ecacd83ddbced0de6404e3fc | 699,824 |
def use_netrc(netrc, urls, patterns):
"""compute an auth dict from a parsed netrc file and a list of URLs
Args:
netrc: a netrc file already parsed to a dict, e.g., as obtained from
read_netrc
urls: a list of URLs.
patterns: optional dict of url to authorization patterns
Returns:
dict suitable as auth argument for ctx.download; more precisely, the dict
will map all URLs where the netrc file provides login and password to a
dict containing the corresponding login, password and optional authorization pattern,
as well as the mapping of "type" to "basic" or "pattern".
"""
auth = {}
for url in urls:
schemerest = url.split("://", 1)
if len(schemerest) < 2:
continue
if not (schemerest[0] in ["http", "https"]):
# For other protocols, bazel currently does not support
# authentication. So ignore them.
continue
host = schemerest[1].split("/")[0].split(":")[0]
if not host in netrc:
continue
authforhost = netrc[host]
if host in patterns:
auth_dict = {
"type": "pattern",
"pattern": patterns[host],
}
if "login" in authforhost:
auth_dict["login"] = authforhost["login"]
if "password" in authforhost:
auth_dict["password"] = authforhost["password"]
auth[url] = auth_dict
elif "login" in authforhost and "password" in authforhost:
auth[url] = {
"type": "basic",
"login": authforhost["login"],
"password": authforhost["password"],
}
return auth | 561ee1388dbdde74614fdef1fb29b78c7ecc687b | 699,825 |
def str_cutoff(string: str, max_length: int, cut_tail: bool = False) -> str:
"""
Abbreviate a string to a given length.
The resulting string will carry an indicator if it's abbreviated,
like ``stri#``.
Parameters
----------
string : str
String which is to be cut.
max_length : int
Max resulting string length.
cut_tail : bool
``False`` for string abbreviation from the front, else ``True``.
Returns
-------
str
Resulting string
"""
if max_length < 1:
raise ValueError("max_length < 1 not allowed")
if len(string) > max_length:
pos = max_length - 1
return string[:pos] + "#" if cut_tail else "#" + string[-pos:]
return string | 05fdab8700dd07710c31d4007c9bc6b3f9eb6155 | 699,826 |
def wireless(card, mode=None, apn=None):
"""Retrieve wireless modem info or customize modem behavior.
Args:
card (Notecard): The current Notecard object.
mode (string): The wireless module mode to set.
apn (string): Access Point Name (APN) when using an external SIM.
Returns:
string: The result of the Notecard request.
"""
req = {"req": "card.wireless"}
if mode:
req["mode"] = mode
if apn:
req["apn"] = apn
return card.Transaction(req) | 355256bd8123f0f749561f61a2df3be93b91db61 | 699,828 |
def filter_none(x):
"""
Recursively removes key, value pairs or items that is None.
"""
if isinstance(x, dict):
return {k: filter_none(v) for k, v in x.items() if v is not None}
elif isinstance(x, list):
return [filter_none(i) for i in x if x is not None]
else:
return x | c1c478b2c367dd9453b5504bbfece7dfd8c05376 | 699,829 |
def _dmet_orb_list(mol, atom_list):
"""Rearrange the orbital label
Args:
mol (pyscf.gto.Mole): The molecule to simulate.
atom_list (list): Atom list for IAO assignment (int).
Returns:
newlist (list): The orbital list in new order (int).
"""
newlist = []
for i in range(mol.natm):
for j in range(mol.nao_nr()):
if (atom_list[j] == i):
newlist.append(j)
return newlist | 7998d9cec104bc02ad3daf600d2e24d9b1f5f243 | 699,830 |
def _post_processing(metric_map: dict[str, float]) -> dict[str, float]:
"""
unit conversion etc...
time:
taskTime, executorDeserializeTime, executorRunTime, jvmGcTime are milliseconds
executorDeserializeCpuTime, executorCpuTime are nanoseconds
"""
metric_map["executorDeserializeCpuTime"] = metric_map[
"executorDeserializeCpuTime"] * 1e-6
metric_map["executorCpuTime"] = metric_map["executorCpuTime"] * 1e-6
return metric_map | 23ff301d55e0dc2d2208aca5761059fb8ade3e4e | 699,831 |
from bs4 import BeautifulSoup
import requests
def get_soup(url: str) -> BeautifulSoup:
"""Get an instance of BeautifulSoup for a specific url"""
response = requests.get(url)
if response.status_code != requests.codes.ok:
print(f"url request error: response.status_code is {response.status_code}")
raise requests.ConnectionError
html = response.text
soup: BeautifulSoup = BeautifulSoup(html, "html.parser")
return soup | 34f172c2d6d2d7928d93f3a11768cc45272fc399 | 699,833 |
def intersect(hrect, r2, centroid):
"""
checks if the hyperrectangle hrect intersects with the
hypersphere defined by centroid and r2
"""
maxval = hrect[1, :]
minval = hrect[0, :]
p = centroid.copy()
idx = p < minval
p[idx] = minval[idx]
idx = p > maxval
p[idx] = maxval[idx]
return ((p - centroid) ** 2).sum() < r2 | 6050742ae4527f5baba3c6cb8a484b04d32c0b3c | 699,836 |
def is_method_of(method, obj):
"""Return True if *method* is a method of *obj*.
*method* should be a method on a class instance; *obj* should be an instance
of a class.
"""
# Check for both 'im_self' (Python < 3.0) and '__self__' (Python >= 3.0).
cls = obj.__class__
mainObj = getattr(method, "im_self", getattr(method, "__self__", None))
return isinstance(mainObj, cls) | 554ab48effb7ce996846192786ce2141abf671a4 | 699,838 |
from typing import Counter
def number_of_pairs(gloves):
"""
Given an array describing the color of each glove,
return the number of pairs you can constitute,
assuming that only gloves of the same color can form pairs.
Examples:
input = ["red", "green", "red", "blue", "blue"]
result = 2 (1 red pair + 1 blue pair)
"""
count = 0
gloves = Counter(gloves)
for x in gloves.values():
count += x // 2
return count | e499f0e924b0154684ad2ecda51d7ed0ed63d183 | 699,854 |
def Rmax_Q11(Vmax):
""" Estimation of the radius of maximum wind according to the formula proposed
by Quiring et al. (2011); Vmax and Rmax are in nautical miles.
Expression herein converted in km"""
Vm= Vmax * 0.5399568
Rmax = ((49.67 - 0.24 * Vm)) * 1.852
return Rmax | e320acfd64abc9e7ae30ca70979cf057239bae09 | 699,855 |
def get_option_value(elem):
""" Get the value attribute, or if it doesn't exist the text
content.
<option value="foo">bar</option> => "foo"
<option>bar</option> => "bar"
:param elem: a soup element
"""
value = elem.get("value")
if value is None:
value = elem.text.strip()
if value is None or value == "":
msg = u"Error parsing value from {}.".format(elem)
raise ValueError(msg)
return value | b2a549d8b5ec3c895ff2b3c2978437a25afe99b1 | 699,856 |
def EI(sections, normal=None): # {{{
"""Calculate the bending stiffnes of a cross-section.
The cross-section is composed out of rectangular nonoverlapping sections
that can have different Young's moduli.
Each section is represented by a 4-tuple (width, height, offset, E).
The offset is the distance from the top of the section to the top of the
highest section. This should always be a positive value.
E is the Young's modulus of the material of this section.
Arguments:
sections: Iterable of section properties.
normal: The Young's modulus to which the total cross-section will be
normalized. (Not used anymore, retained for compatibility.)
Returns:
Tuple of EI, top and bottom. Top and bottom are with respect to the
neutral line.
Examples:
>>> E = 210000
>>> B = 100
>>> H = 20
>>> sections = ((B, H, 0, E),)
>>> EI(sections)
(14000000000.0, 10.0, -10.0)
>>> B = 100
>>> h = 18
>>> t = 1
>>> H = h + 2 * t
>>> E = 210000
>>> sections = ((B, t, 0, E), (B, t, h+t, E))
>>> EI(sections)
(3794000000.0, 10.0, -10.0)
>>> E1, E2 = 200000, 71000
>>> t1, t2 = 1.5, 2.5
>>> H = 31
>>> B = 100
>>> sections = ((B, t1, 0, E1), (B, t2, H-t2, E2))
>>> EI(sections)
(9393560891.143106, 11.530104712041885, -19.469895287958117)
"""
normal = sections[0][-1]
normalized = tuple((w * E / normal, h, offs) for w, h, offs, E in sections)
A = sum(w * h for w, h, _ in normalized)
S = sum(w * h * (offs + h / 2) for w, h, offs in normalized)
yn = S / A
# Find any geometry that straddles yn.
to_split = tuple(g for g in sections if g[2] < yn and g[1] + g[2] > yn)
geom = tuple(g for g in sections if g not in to_split)
# split that geometry.
# The new tuple has the format (width, height, top, bottom)
new_geom = []
for w, h, offs, E in to_split:
h1 = yn - offs
h2 = h - h1
new_geom.append((w, h1, h1, 0, E))
new_geom.append((w, h2, 0, -h2, E))
# Convert the remaining geometry to reference yn.
for w, h, offs, E in geom:
new_geom.append((w, h, yn - offs, yn - offs - h, E))
EI = sum(E * w * (top ** 3 - bot ** 3) / 3 for w, h, top, bot, E in new_geom)
top = max(g[-3] for g in new_geom)
bot = min(g[-2] for g in new_geom)
return EI, top, bot | 24b5ca79f0a3f041586e2f9d7fe8d7953cd96780 | 699,859 |
def filter_labels_by_class(obj_labels, classes):
"""Filters object labels by classes.
Args:
obj_labels: List of object labels
classes: List of classes to keep, e.g. ['Car', 'Pedestrian', 'Cyclist']
Returns:
obj_labels: List of filtered labels
class_mask: Mask of labels to keep
"""
class_mask = [(obj.type in classes) for obj in obj_labels]
return obj_labels[class_mask], class_mask | 854a32da802c794b0622a0a36895590823b7c780 | 699,860 |
def probably_reconstruction(file) -> bool:
"""Decide if a path may be a reconstruction file."""
return file.endswith("json") and "reconstruction" in file | fc5c20fe8fddc9f8ffaab0e746100a534e6a5f57 | 699,863 |
from typing import Counter
def check_author_count(counter: Counter) -> bool:
"""
Takes a set of documents and counts the number of authors. If less than
2, returns False otherwise True.
:param counter: a Counter object for author counts.
:return: a boolean indicating whether or not the document set can be
analyzed (True for yes, no for False).
"""
if len(counter) == 1:
return False
return True | 31b697cc0e5a395ebb0702c40e86f5b21760021d | 699,866 |
def default_destinations(iata_code_original_destination):
"""Get three default destinations different from original destination of query."""
# Paris, London, Rome, New York
defaults = ['CDG', 'LHR', 'FCO', 'JFK']
if iata_code_original_destination in defaults:
defaults.remove(iata_code_original_destination)
else:
defaults.remove('JFK')
return defaults | 904ebb69bdb3bb893580b201bbc50060a194ed7b | 699,867 |
import json
def get_boxnote_text(filepath):
"""Convert a boxnote to plain text.
Parameters:
filepath (str): the path to the boxfile
Returns: the text of the boxfile as a string
"""
f = open(filepath, encoding="utf8")
text = json.loads(f.read())["atext"]["text"]
f.close()
return text | ba41e36d534931b9e2d1a401d7317ee4f8956f13 | 699,880 |
from typing import Dict
def filter_val_not_none(d: Dict) -> Dict:
"""
Return a new dictionary composed of all key-value pairs (k, v) in d where v is not None.
:param d: original dictionary
:return: d, without key-value pairs where value is None
>>> filter_val_not_none({"a": 5, "b": 10, "c": None}) == {"a": 5, "b": 10}
True
"""
return {k: v for k, v in d.items() if v is not None} | 21aaf90a4407a690ce76d09dfb54d93c1293c953 | 699,884 |
def get_card_ids(db) -> list:
"""
Gets a list of all card IDs in the datase.
:return: List of card IDs.
"""
return db['cards'].distinct('_id') | 48e6e1880253603233ccdc55fb38269f75375f8f | 699,885 |
import time
def datetime_to_integer(datetime):
"""Convert datetime object to integer value.
'datetime' is the datetime object"""
return time.mktime(datetime.timetuple()) | 8d4d94fac947c3dd9e82ee3d60a1a57a6440457d | 699,886 |
def crop_to_ratio(im, desired_ratio=4 / 3):
""" Crop (either) the rows or columns of an image to match (as best as possible) the
desired ratio.
Arguments:
im (np.array): Image to be processed.
desired_ratio (float): The desired ratio of the output image expressed as
width/height so 3:2 (= 3/2) or 16:9 ( = 16/9).
Returns:
An image (np.array) with the desired ratio.
"""
height = im.shape[0]
width = im.shape[1]
if width / height < desired_ratio: # Crop rows
desired_height = int(round(width / desired_ratio))
to_crop = height - desired_height
top_crop = to_crop // 2
bottom_crop = to_crop - top_crop
cropped_image = im[top_crop:height - bottom_crop, :]
else: # Crop columns
desired_width = int(round(height * desired_ratio))
to_crop = width - desired_width
left_crop = to_crop // 2
right_crop = to_crop - left_crop
cropped_image = im[:, left_crop:width - right_crop]
return cropped_image | dd2301708aa514b2d9b87758ce38d7bd9f9d874c | 699,888 |
def grompp_npt(job):
"""Run GROMACS grompp for the npt step."""
npt_mdp_path = "npt.mdp"
msg = f"gmx grompp -f {npt_mdp_path} -o npt.tpr -c em.gro -p init.top --maxwarn 1"
return msg | 7afdf17586250a62106c67b2594d1aa057fef09e | 699,889 |
def uri_leaf(uri):
"""
Get the "leaf" - fragment id or last segment - of a URI. Useful e.g. for
getting a term from a "namespace like" URI.
>>> uri_leaf("http://purl.org/dc/terms/title") == 'title'
True
>>> uri_leaf("http://www.w3.org/2004/02/skos/core#Concept") == 'Concept'
True
>>> uri_leaf("http://www.w3.org/2004/02/skos/core#") # returns None
"""
for char in ('#', '/', ':'):
if uri.endswith(char):
break
if char in uri:
sep = char
leaf = uri.rsplit(char)[-1]
else:
sep = ''
leaf = uri
if sep and leaf:
return leaf | 3045806ac56124331c58b6daffb5c1b5c202c0eb | 699,890 |
import yaml
def load_yaml_config(filename):
"""Load a YAML configuration file."""
with open(filename, "rt", encoding='utf-8') as file:
config_dict = yaml.safe_load(file)
return config_dict | 771dbf8fdaca1575bc9bdb472d6aa1405c689e7a | 699,891 |
import uuid
def build_request_body(method, params):
"""Build a JSON-RPC request body based on the parameters given."""
data = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": str(uuid.uuid4())
}
return data | 372df70bd17e78f01de5f0e537988072ac9716cc | 699,892 |
def part1(input_data):
"""
>>> part1(["939","7,13,x,x,59,x,31,19"])
295
"""
timestamp = int(input_data[0])
bus_ids = input_data[1].split(',')
# Ignore bus_ids with 'x'
bus_ids = map(int, filter(lambda bus_id: bus_id != 'x', bus_ids))
# (id, time_to_wait)
# last_busstop = timestamp % id
# time_to_wait = id - last_busstop
bus_ids = [(bus_id, bus_id - (timestamp % bus_id)) for bus_id in bus_ids]
bus_ids.sort(key=lambda x: x[1])
next_bus_id, next_bus_time_to_wait = bus_ids[0]
return next_bus_id * next_bus_time_to_wait | 99c1928c5833f3c9773a28323f2bd2a903f626f3 | 699,893 |
import torch
def fetch_optimizer(lr, wdecay, epsilon, num_steps, params):
""" Create the optimizer and learning rate scheduler """
optimizer = torch.optim.AdamW(params, lr=lr, weight_decay=wdecay, eps=epsilon)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, lr, num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler | e6d028f4adf58c303e1e7f0cb0b5233cf4f6026c | 699,898 |
import re
def to_num(string):
"""Convert string to number (or None) if possible"""
if type(string) != str:
return string
if string == "None":
return None
if re.match("\d+\.\d*$", string):
return float(string)
elif re.match("\d+$", string):
return int(string)
else:
return string | 79a0740e298e33198dca2d7b7fcd53700f121869 | 699,901 |
def get_tidy_invocation(f, clang_tidy_binary, checks, build_path,
quiet, config):
"""Gets a command line for clang-tidy."""
start = [clang_tidy_binary]
# Show warnings in all in-project headers by default.
start.append('-header-filter=src/')
if checks:
start.append('-checks=' + checks)
start.append('-p=' + build_path)
if quiet:
start.append('-quiet')
if config:
start.append('-config=' + config)
start.append(f)
return start | 50e95d612f08ec5762bd2d0689a4fcbe9c699a11 | 699,903 |
def dataset_pre_0_3(client):
"""Return paths of dataset metadata for pre 0.3.4."""
project_is_pre_0_3 = int(client.project.version) < 2
if project_is_pre_0_3:
return (client.path / 'data').rglob(client.METADATA)
return [] | 892732100f46c8ad727b91d63d8563181d7a9dbb | 699,906 |
import copy
def sanitize_slicing(slice_across, slice_relative_position):
"""
Return standardized format for `slice_across` and `slice_relative_position`:
- either `slice_across` and `slice_relative_position` are both `None` (no slicing)
- or `slice_across` and `slice_relative_position` are both lists,
with the same number of elements
Parameters
----------
slice_relative_position : float, or list of float, or None
slice_across : str, or list of str, or None
Direction(s) across which the data should be sliced
"""
# Skip None and empty lists
if slice_across is None or slice_across == []:
return None, None
# Convert to lists
if not isinstance(slice_across, list):
slice_across = [slice_across]
if slice_relative_position is None:
slice_relative_position = [0]*len(slice_across)
if not isinstance(slice_relative_position, list):
slice_relative_position = [slice_relative_position]
# Check that the length are matching
if len(slice_across) != len(slice_relative_position):
raise ValueError(
'The argument `slice_relative_position` is erroneous: \nIt should have'
'the same number of elements as `slice_across`.')
# Return a copy. This is because the rest of the `openPMD-viewer` code
# sometimes modifies the objects returned by `sanitize_slicing`.
# Using a copy avoids directly modifying objects that the user may pass
# to this function (and live outside of openPMD-viewer, e.g. directly in
# a user's notebook)
return copy.copy(slice_across), copy.copy(slice_relative_position) | 1f7c3a0f70ecfc2bc66434d3acde684b499bb35c | 699,907 |
def get_traitset_map(pop):
"""
Utility method which returns a map of culture ID's (hashes) and the trait set
corresponding to a random individual of that culture (actually, the first one
we encounter).
"""
traitsets = {}
graph = pop.agentgraph
for nodename in graph.nodes():
traits = graph.node[nodename]['traits']
culture = pop.get_traits_packed(traits)
if culture not in traitsets:
traitsets[culture] = traits
return traitsets | c80f1f05e0dd5268990e62e6b87726d5349b53f7 | 699,911 |
def prefix(txt, pref):
"""
Place a prefix in front of the text.
"""
return str(pref) + str(txt) | e9b4efd78f9132f7855cccba84c8a2d4b58ae8bb | 699,912 |
def isinstance_all(iterable, class_or_tuple):
"""
Check if all items of an iterable are instance of
a class ou tuple of classes
>>> isinstance_all(['Hello', 'World'], str)
True
>>> isinstance_all([1, 'Hello'], (str, int))
True
>>> isinstance_all([True, 'Hello', 5], int)
False
"""
return all(
isinstance(obj, class_or_tuple)
for obj in iterable
) | 1ea1bf7d66e5436ac429123fef4b33ba92195292 | 699,914 |
import tarfile
import pathlib
def _strip_paths(tarinfo: tarfile.TarInfo) -> tarfile.TarInfo:
"""Ensure source filesystem absolute paths are not reflected in tar file."""
original_path = pathlib.Path(tarinfo.name)
tarinfo.name = f"{original_path.name}"
return tarinfo | 06e262f93b3c5d0b8beab36b2ae7320a2464ad5b | 699,922 |
import re
def re_cap(*regexes):
"""
Capture first of the supplied regex
:param regexes: list or regex strings
:return: captured string | None
"""
def go(string):
for reg in regexes:
matches = re.search(reg, string)
if matches:
return matches.group(1)
return None
return go | 8029a2d475c43b0873e676ba4049e970a2077664 | 699,926 |
def set_crs(gdf, crs):
"""
Set CRS in GeoDataFrame when current projection is not defined.
Parameters
----------
gdf : geopandas.GeoDataFrame
the geodataframe to set the projection
Returns
-------
gdf : geopandas.GeoDataFrame
the geodataframe with the projection defined """
gdf = gdf.set_crs(crs)
return gdf | 77fc8f303882116fb450149c61332879fb28f6db | 699,931 |
def GetDistinguishableNames(keys, delimiter, prefixes_to_remove):
"""Reduce keys to a concise and distinguishable form.
Example:
GetDistinguishableNames(['Day.NewYork.BigApple', 'Night.NewYork.BigMelon'],
'.', ['Big'])
results in {'Day.NewYork.BigApple': 'Day.Apple',
'Night.NewYork.BigMelon': 'Night.Melon'}.
If a key has all parts commonly shared with others, then include
the last shared part in the names. E.g.,
GetDistinguishableNames(['Day.NewYork', 'Day.NewYork.BigMelon'],
'.', ['Big'])
results in {'Day.NewYork.BigApple': 'NewYork',
'Night.NewYork.BigMelon': 'NewYork.Melon'}.
Args:
keys: The list of strings, each is delimited by parts.
delimiter: The delimiter to separate parts of each string.
prefixes_to_remove: The list of prefix strings to be removed from the parts.
Returns:
short_names: A dictionary of shortened keys.
"""
def RemovePrefix(part, prefixes_to_remove):
for prefix in prefixes_to_remove:
if part.startswith(prefix):
return part[len(prefix):]
return part
key_part_lists = [key.split(delimiter) for key in keys]
shortest_length = min(len(part_list) for part_list in key_part_lists)
# common_part[i] = True if all parts at position i are the same across keys.
common_part = [True] * shortest_length
for part_list in key_part_lists[1:]:
for i in range(shortest_length):
if part_list[i] != key_part_lists[0][i]:
common_part[i] = False
# The prefix list to add if one of the key happens to be the concatenation of
# all common parts.
prefix_list = ([key_part_lists[0][shortest_length - 1]]
if all(common_part) else [])
short_names = {}
for key, part_list in zip(keys, key_part_lists):
short_names[key] = delimiter.join(prefix_list + [
RemovePrefix(part, prefixes_to_remove)
for n, part in enumerate(part_list)
if n >= shortest_length or not common_part[n]])
return short_names | 13cc78b172d0ae074fa3bfa3d9ff93f5877c557d | 699,933 |
import binascii
import re
def humanhexlify(data, n=-1):
"""Hexlify given data with 1 space char btw hex values for easier reading for humans
:param data: binary data to hexlify
:param n: If n is a positive integer then shorten the output of this function to n hexlified bytes.
Input like
'ab\x04ce'
becomes
'61 62 04 63 65'
With n=3 input like
data='ab\x04ce', n=3
becomes
'61 62 04 ...'
"""
tail = b' ...' if 0 < n < len(data) else b''
if tail:
data = data[:n]
hx = binascii.hexlify(data)
return b' '.join(re.findall(b'..', hx)) + tail | 883323524ecc8b9f55138d290a38666e5c06bac3 | 699,934 |
def make_arc_consistent(Xj, Xk, csp):
"""Make arc between parent (Xj) and child (Xk) consistent under the csp's constraints,
by removing the possible values of Xj that cause inconsistencies."""
# csp.curr_domains[Xj] = []
for val1 in csp.domains[Xj]:
keep = False # Keep or remove val1
for val2 in csp.domains[Xk]:
if csp.constraints(Xj, val1, Xk, val2):
# Found a consistent assignment for val1, keep it
keep = True
break
if not keep:
# Remove val1
csp.prune(Xj, val1, None)
return csp.curr_domains[Xj] | 12f75686cf18fdb9b976f36c7e985593bc0aaf10 | 699,936 |
def get_reference_data(p):
"""Summarise the bibliographic data of an article from an ADS query
Returns dict of 'author' (list of strings), 'title' (string), and
'ref' (string giving journal, first page, and year).
"""
data = {}
try:
data['author'] = p.author
except:
data['author'] = 'Anon'
try:
data['title'] = p.title
except:
data['title'] = 'Untitled'
try:
refstring = p.pub
except:
refstring = 'Unknown'
try:
refstring += f' {p.volume}, {p.page[0]}'
except:
pass
try:
refstring += f' ({p.year})'
except:
pass
data['ref'] = refstring
return data | f8f792d15bade96881e87c3a7e1047dddc125d15 | 699,954 |
def _parse_config_args(args):
"""
Parse stub configuration arguments, which are strings of the form "KEY=VAL".
`args` is a list of arguments from the command line.
Any argument that does not match the "KEY=VAL" format will be logged and skipped.
Returns a dictionary with the configuration keys and values.
"""
config_dict = dict()
for config_str in args:
try:
components = config_str.split('=')
if len(components) >= 2:
config_dict[components[0]] = "=".join(components[1:])
except: # lint-amnesty, pylint: disable=bare-except
print(f"Warning: could not interpret config value '{config_str}'")
return config_dict | 6295f95ae798445d94ab4575bbb88a57b03b5df4 | 699,957 |
def add_pylivetrader_imports(code: str) -> str:
"""
add all imports from the pylivetrader api
"""
imports = """
from pylivetrader.api import *
\r\n
"""
return imports + code | 8077a28dff60e00c2f44fe2aaf5a226a5ae15c7c | 699,959 |
import re
def what_lang(filename):
"""
Judge what language the file is written by from filename extention.
"""
langs=[('c|cc|cpp|h', 'c/c++'),
('java', 'java'),
('sh', 'sh'),
('pl', 'perl'),
('rb', 'ruby'),
('py', 'python'),
('xml', 'xml'),
('txt|md', 'txt')]
for lang in langs:
reg = r'.+\.(' + lang[0] + r')$'
if re.match(reg, filename):
return lang[1]
return 'default' | 02316266746bfc59cbdc3ad4b84a8be667158109 | 699,963 |
def get_text(xml, tag):
"""Return the text from a given tag and XML element.
"""
elem = xml.find(tag)
if elem is not None:
return elem.text.strip() | ece7c28a98f8bf61a3d182a2109875b6a031dbaa | 699,964 |
from typing import Dict
from typing import Any
from typing import Iterable
from typing import Hashable
def get_in(
dictionary: Dict[Any, Any],
keys: Iterable[Hashable],
default: Any = None,
) -> Any:
"""Traverses a set of nested dictionaries using the keys in
kws, and returns the value assigned to the final keyword
in the innermost dictionary. Calling get_in(d, [X, Y])
is equivalent to calling d.get(X).get(Y), with the
difference that any missing keys causes the default value
to be returned.
Behavior on non-dictgionaries is undefined."""
keys = list(keys)
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary.get(keys[-1], default) | af8d88f7f0a9f8af6f201e6d626392aec3f94864 | 699,965 |
def _unique_in_order(seq):
"""
Utility to preserver order while making a set of unique elements.
Copied from Markus Jarderot's answer at
https://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-whilst-preserving-order
Args:
seq : sequence
Returns:
unique_list : list
List of unique elements in their original order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | e02624dee4275d79dbce8dd67b07429ba3828e86 | 699,967 |
def extra_node_ids(a, b):
"""
Return list of nodes in a which aren't in b by id attribute.
:param etree.ElementTree a: ElementTree representing first SVG document.
:param etree.ElementTree a: ElementTree representing second SVG document.
"""
ids_a = set([ el.attrib['id'] for el in a.iter() ])
ids_b = set([ el.attrib['id'] for el in b.iter() ])
return ids_a - ids_b | 94728ab3600df1ab31bd2c72e4712eb3540baa5a | 699,968 |
from typing import Iterable
def get_score(bits: Iterable[int]) -> int:
"""Get the score.
For each "1" add 1 for each "0" takeaway 1
"""
return sum(-1 if bit == 0 else 1 for bit in bits) | 440e831ca4d02be57a3e394d268b95f7af5fa22e | 699,969 |
def get_cli_fname(lon, lat, scenario=0):
"""Get the climate file name for the given lon, lat, and scenario"""
# The trouble here is relying on rounding is problematic, so we just
# truncate
lon = round(lon, 2)
lat = round(lat, 2)
return "/i/%s/cli/%03ix%03i/%06.2fx%06.2f.cli" % (
scenario,
0 - lon,
lat,
0 - lon,
lat,
) | 2310bcfc10c2ae5b80d67bed147625e365388fff | 699,971 |
def conv_repoids_to_list(repo_ids):
"""
Convert repo ids seperated by "\n" to list.
"""
if not repo_ids:
return []
repoid_list = []
for repo_id in repo_ids.split("\n"):
if repo_id == '':
continue
repoid_list.append(repo_id)
return repoid_list | 6a76a8ae4f565ac27839478f068f9e9a13276263 | 699,977 |
def rename(term):
"""
Re-format feature terms after they've been formated by the vectorizer.
Parameters:
----------
term : str
Mutilated term in string format.
Returns
-------
str
The normalised term.
"""
term = term.upper()
if 'IPR' in term:
return term
elif 'PF' in term:
return term
elif 'GO' in term:
return term.replace("GO", 'GO:').replace('::', ':')
else:
return term | ec7f963ea37a0057f9a5b92ed3f4d9fc37167d17 | 699,981 |
def bpstr(ts, accum):
"""Make a string representation of this breakpoint and accumulation"""
return "%02i.%02i %6.2f" % (ts.hour, ts.minute / 60.0 * 100.0, accum) | bd2ae124b5ef094ea7927124b86f100749bf0405 | 699,984 |
def find_direction(start, end):
"""
Find direction from start to end
"""
if start[0] == end[0]:
if start[1] < end[1]:
return 5
else:
return 1
elif start[1] == end[1]:
if start[0] < end[0]:
return 3
else:
return 7
elif start[0] < end[0]:
if start[1] < end[1]:
return 4
else:
return 2
elif start[0] > end[0]:
if start[1] < end[1]:
return 6
else:
return 8 | ff282de669832159d236cd5fe805b1832b990bb6 | 699,999 |
def _pad_digits(text: str) -> str:
"""A str method with hacks to support better lexicographic ordering.
The output strings are not intended to be human readable.
The returned string will have digit-runs zero-padded up to at least 8
digits. That way, instead of 'a10' coming before 'a2', 'a000010' will come
after 'a000002'.
Also, the original length of each digit-run is appended after the
zero-padded run. This is so that 'a0' continues to come before 'a00'.
"""
was_on_digits = False
last_transition = 0
chunks = []
def handle_transition_at(k):
chunk = text[last_transition:k]
if was_on_digits:
chunk = chunk.rjust(8, '0') + ':' + str(len(chunk))
chunks.append(chunk)
for i in range(len(text)):
on_digits = text[i].isdigit()
if was_on_digits != on_digits:
handle_transition_at(i)
was_on_digits = on_digits
last_transition = i
handle_transition_at(len(text))
return ''.join(chunks) | 7e842669747919a3bbc9fd40e45a4bfc7641cc3a | 700,004 |
from typing import List
def get_missing_settings(settings_class) -> List[str]:
"""Used to validate required settings.
Verifies that all attributes which don't start with ``_`` and aren't named
in ``_optional_settings`` are not set to None.
Args:
settings_class: The global settings class to validate settings on.
Returns: List of setting names that should not be ``None``. If the list is
empty then all required settings are defined.
"""
missing_settings = []
for attr in vars(settings_class):
if attr.startswith("_") or \
attr in getattr(settings_class, "_optional_settings", ()):
continue
if getattr(settings_class, attr) is None:
missing_settings.append(attr)
return missing_settings | efbb2dc3078fc5221e8ce327b078d42eff167d65 | 700,005 |
def _check_equal_list(iterator):
""" Check that all elements in list are equal """
return len(set(iterator)) <= 1 | 94bd94a203819965d95105e7f978ecb496ce97bc | 700,006 |
def find_layer(model, layer_class):
"""
Find all layers in model that are instances of layer_class
"""
layers = []
for layer in model.layers:
if isinstance(layer, layer_class):
layers.append(layer)
elif hasattr(layer, "layers"):
# search in nested layers
layers += find_layer(layer, layer_class)
return layers | d240d916f26e087edb7ccef8006b91b9c539bd76 | 700,009 |
def in_region(pos, regions):
"""Find whether a position is included in a region.
Parameters
----------
pos : int
DNA base position.
regions : list of tuples
List of (start, end) position integers.
Returns
-------
bool
True if the position is within an of the regions, False otherwise.
Examples
--------
# Empty list
>>> in_region(1, [])
False
# In list
>>> in_region(10, [(3, 5), (9, 12)])
True
# Not in list
>>> in_region(10, [(3, 5), (11, 12)])
False
"""
for region in regions:
if (pos >= region[0]) and (pos <= region[1]):
return True
return False | 07154584fe3fadf93f16bf858810e4484828eb31 | 700,014 |
import six
def zero_lpad(number, length):
"""
Fill 0 on the left of number
:param number: number to be padded
:param length: length of result string
:return:
"""
if six.PY3:
return str(number).ljust(length, '0')
return '{number:0>{length}}'.format(number=number, length=length) | d8682249a7094e2c0de9ff015df3c1c7525b7427 | 700,017 |
def line0(x,a):
"""
Straight line through origin: a*x
Parameters
----------
x : float or array_like of floats
independent variable
a : float
first parameter
Returns
-------
float
function value(s)
"""
return a*x | 247a9ac56418ec34089bab0d9a914c69eb216f31 | 700,021 |
import json
def to_json(msg):
"""Pretty-print a dict as a JSON string
Use Unicode and 2-space indents.
"""
return json.dumps(msg, ensure_ascii=False, indent=2) | e10bf04ce54482f1892aa7a7452a7004024ea6d4 | 700,022 |
import inspect
def _filter_module_all(path, root, children):
"""Filters module children based on the "__all__" arrtibute.
Args:
path: API to this symbol
root: The object
children: A list of (name, object) pairs.
Returns:
`children` filtered to respect __all__
"""
del path
if not (inspect.ismodule(root) and hasattr(root, '__all__')):
return children
module_all = set(root.__all__)
children = [(name, value) for (name, value) in children if name in module_all]
return children | 9cbc86a6a0321722910fb2f48b8857d5d6488511 | 700,026 |
def contains_pattern(input: str, pattern: str) -> bool:
"""Check if the `input` contains all signals of the given `pattern`"""
assert len(input) >= len(pattern)
return all([p in input for p in pattern]) | 0fd2b5d35145fe21f855358c061995e04ad695a9 | 700,027 |
def nbest_oracle_eval(nbest, n=None):
"""Return the evaluation object of the best sentence in the nbest list."""
return nbest.oracle_hyp(n=n).eval_ | fe7641f6ccbaae7d85f620f4772e3a8b506880f5 | 700,034 |
def reaction_class_from_data(class_typ, class_spin,
class_radrad, class_isc):
""" Build a full-class description including the following useful
descriptors of the reaction class:
typ: type of reaction (e.g., abstraction, addition, migration)
spin: whether the reaction occurs on a low- or high-spin state
radrad: whether the reactants or products are two-or-more radicals
:param class_typ: reaction type designation
:type class_typ: str
:param class_spin: spin type designation
:type class_spin: str
:param class_radrad: radical-radical type designation
:type class_radrad: bool
:param class_isc: intersystem-crossing designation
:type class_isc: bool
:rtype: (str, str, bool, bool)
"""
return (class_typ, class_spin, class_radrad, class_isc) | 7ab3b7713c252e4dc3f2f9410d0021f24141a901 | 700,036 |
import pickle
def get_one_hot_encodings(filepath='../data/one-hot.pkl'):
"""
Gets the one_hot encodings of the verses of the Quran, along with mappings of characters to ints
:param filepath: the filepath to the one_hot encoding pickled file
:return:
"""
with open(filepath, 'rb') as one_hot_quran_pickle_file:
one_hot_obj = pickle.load(one_hot_quran_pickle_file)
return one_hot_obj | f255ba44018ae1d38694ba12ad9e733ac4fb433f | 700,037 |
def flatten(lst):
"""Flatten a nested list lst"""
return [i for subl in lst for i in subl] | 5835f05ca6b098c096fdb2bbface034a3c3bee26 | 700,041 |
import random
def extract_words(text, word_count):
"""
Extract a list of words from a text in sequential order.
:param text: source text, tokenized
:param word_count: number of words to return
:return: list list of words
"""
text_length = len(text)
if word_count > text_length:
raise RuntimeError('Cannot extract {} words from a text of {} words.'.format(word_count, text_length))
# Determine start index
max_range = text_length - word_count
start_range = random.randrange(max_range)
return text[start_range:start_range + word_count] | f84f8b4148380d6c6e29dc0742e42481dda2d11a | 700,042 |
def rgb_to_hex(color):
"""Helper function for converting RGB color to hex code
Args:
color (list): List of R,G,B value
Returns:
str: Hex code for the RGB value
"""
r,g,b = color
#print('%02x%02x%02x' % (r,g,b))
return '#%02x%02x%02x' % (r,g,b) | 9308fa029cb2bfd75495c92a2e145f3439e3b60b | 700,045 |
def sort_queryset(queryset, request, allowed_sorts, default=None):
""" Sorts the queryset by one of allowed_sorts based on parameters
'sort' and 'dir' from request """
sort = request.GET.get('sort', None)
if sort in allowed_sorts:
direction = request.GET.get('dir', 'asc')
sort = ('-' if direction == 'desc' else '') + sort
queryset = queryset.order_by(sort)
elif default:
queryset = queryset.order_by(default)
return queryset | 7d4ef00e0d345d4636caaa9ca69ade0a09e33ea4 | 700,046 |
def extract_path_from_filepath(file_path):
"""
ex: 'folder/to/file.txt' returns 'folder/to/'
:param file_path:
:return:
"""
st_ind=file_path.rfind('/')
foldern = file_path[0:st_ind]+'/'
return foldern | 7014ac6d4fa47edff3315f7e23688cfe2e28a820 | 700,047 |
def fname_to_string(fname):
"""Return given file as sring
Parameters
----------
fname : str
absolute path to file.
"""
with open(fname) as fid:
string = fid.read()
return string | f9a3f94dc4a63c27cadb5c5f9a41eaa942332937 | 700,048 |
def global_color_table(color_depth, palette):
"""
Return a valid global color table.
The global color table of a GIF image is a 1-d bytearray of the form
[r1, g1, b1, r2, g2, b2, ...] with length equals to 2**n where n is
the color depth of the image.
----------
Parameters
color_depth: color depth of the GIF.
palette: a list of rgb colors of the format [r1, g1, b1, r2, g2, b2, ...].
The number of colors must be greater than or equal to 2**n where n is
the color depth. Redundant colors will be discarded.
"""
try:
palette = bytearray(palette)
except:
raise ValueError('Cannot convert palette to bytearray.')
valid_length = 3 * (1 << color_depth)
if len(palette) < valid_length:
raise ValueError('Invalid palette length.')
if len(palette) > valid_length:
palette = palette[:valid_length]
return palette | 4fc8b0cad668724b0a6d5735f70dcb16b6b9b140 | 700,050 |
import math
def arclen(angle, radius, rad=False):
"""Calculates the size of an arc of a circle"""
if rad:
angle = math.degrees(angle)
return (angle / 360) * (2 * math.pi * radius) | c94e3a0f838a4ee635da4997da3ac89867d03366 | 700,051 |
import hashlib
def hash_seqs(sequences):
"""
Generates hexdigest of Sha1 hash for each sequence in a list of sequences.
This function is useful for generating sequence specific identifiers that allow for easier comparison of features
from multiple sequencing runs or sequence processing runs.
"""
new_sequences = list()
for seq in sequences:
# get sequence string and encode using UTF-8 for consistency
seq = seq.encode('UTF-8')
# get sha1 hash of sequence
hash_ = hashlib.sha1()
hash_.update(seq)
hash_hex = hash_.hexdigest()
new_sequences.append(hash_hex)
return new_sequences | 35c3291a58ebc7e053250f7234faacd0356f7df5 | 700,055 |
def get_paths(link, nb):
"""
Generate a list containing all URLs
Args:
link [str]: Base HTML link
nb [int]: Number of pages usingHTML link
Returns:
url [str]: [List containing all URLs]
"""
url = []
for si in range(2000, 2020):
for ti in range(1, nb+1):
result = link + str(si) + "-" + str(si+1) + "&teamId=" + str(ti)
url.append(result)
return url | 8fd0a947eeb5435f0df48dc928feb3a10786c2cc | 700,056 |
def heaviside(x, bias=0):
"""
Heaviside function Theta(x - bias)
returns 1 if x >= bias else 0
:param x:
:param bias:
:return:
"""
indicator = 1 if x >= bias else 0
return indicator | b325a862cbc2cac97b8e4808c6d77b54a0f1d643 | 700,057 |
def biggest_differences_words(prunedTable):
""" Finds the words that are most different from their most frequent alternative across each semantic dimension
Parameters
----------
prunedTable : a data frame
The data frame representing arousal, valence, and dominance ratings for words and their most frequent
alternatives across time within a paragraph
Returns
-------
a dictionary mapping from a semantic dimension to row indexing information about the word with the greatest
difference for that dimension
"""
prunedTable = prunedTable.assign(absADiff = (prunedTable['WordA'] - prunedTable['AltA']).abs(),
absVDiff = (prunedTable['WordV'] - prunedTable['AltV']).abs(),
absDDiff = (prunedTable['WordD'] - prunedTable['AltD']).abs())
biggestDifferencesWords = {'Arousal': prunedTable.loc[prunedTable['absADiff'].idxmax()],
'Valence': prunedTable.loc[prunedTable['absVDiff'].idxmax()],
'Dominance': prunedTable.loc[prunedTable['absDDiff'].idxmax()]}
return biggestDifferencesWords | 2b39a717fbdf7d823a381ff3320e2ac487f65ec3 | 700,058 |
def build_help_text(command_class):
"""Generate help text from a command class."""
command = command_class()
parser = command.create_parser({})
return parser.format_help() | 3fc8491e37db2f0b96144ad0b34723ceb71a51ca | 700,060 |
import io
def read_names(f):
"""Read names, specified one per line, from a file."""
return (line.strip() for line in io.open(f, 'r', encoding='utf-8')) | 98724005ef5c647aa31205bc8afd07da50ece002 | 700,061 |
def format_cols_2digit(df, skip_last_col=True):
"""Formats a dataframes columns so that numbers are always two-digits (padded with 0)
Parameters
----------
df : pandas DataFrame
Input DataFrame.
skip_last_col : bool
A special case, where the very last column contains text, rather than a number, and should be excluded.
Returns
-------
reformatted_cols : list
The DataFrame columns, reformatted with padded 0 to make 2 digits.
"""
if skip_last_col:
# reformat the columns to be padded stringnumbers. (exclude the last "Contains_Data")
reformatted_cols = ["%02d" % col for col in df.columns[:-1]]
# add last column back to list
reformatted_cols.append(df.columns[-1])
else:
# reformat the columns to be padded stringnumbers. (exclude the last "Contains_Data")
reformatted_cols = ["%02d" % col for col in df.columns]
return reformatted_cols | 5c9bc98ed6298f8f3d181432320cc69dc4c30ea2 | 700,062 |
def reverse_complement_no_loops(seq):
"""Return WC complement of a base without loops"""
# Initialize rev_seq to a lowercase seq
rev_seq = seq.lower()
# Substitute bases
rev_seq = rev_seq.replace('t','A')
rev_seq = rev_seq.replace('a','T')
rev_seq = rev_seq.replace('g','C')
rev_seq = rev_seq.replace('c','G')
return rev_seq[::-1] | 2a94d38827cbda95272776b0bea652af9d30b64d | 700,064 |
from collections import OrderedDict
def update(data_df, cal_dict, param, bound, start, end):
"""Update calibration times for give parameter and boundary"""
if param not in cal_dict["parameters"]:
cal_dict["parameters"][param] = OrderedDict()
if bound not in cal_dict["parameters"][param]:
cal_dict["parameters"][param][bound] = OrderedDict()
cal_dict["parameters"][param][bound]["start"] = start
cal_dict["parameters"][param][bound]["end"] = end
return cal_dict | f8c63a528ad6533938f215179c17bd236eefeb67 | 700,065 |
def viaCombusta(obj):
""" Returns if an object is in the Via Combusta. """
return 195 < obj.lon < 225 | 531ab5b5725bf3ed6dc8c8ff99c3980b3533e558 | 700,066 |
def copresence(acc, w1, w2):
"""Results 1 if a pair of figures is on stage at the same time, and 0
otherwise."""
return int(acc + w1 + w2 > 0) | 3056b25df4a59bc421a3aec3d33e25db8ccb98bd | 700,068 |
def gen_bool_parse(val):
"""Convert a string, as a human might type it, to a boolean. Unrecognized
values raise an exception.
"""
val = val.strip()
if not val:
return False
try:
return bool(int(val))
except:
pass
ch = val[0]
if ch in {'t', 'T', 'y', 'Y'}:
return True
if ch in {'f', 'F', 'n', 'N'}:
return False
raise ValueError('"%s" does not look like a boolean' % (val,)) | 3c1944c7633f329848569c6cb29af11e145dfa5c | 700,070 |
import glob
def _expand_glob_path(file_roots):
"""
Applies shell globbing to a set of directories and returns
the expanded paths
"""
unglobbed_path = []
for path in file_roots:
try:
if glob.has_magic(path):
unglobbed_path.extend(glob.glob(path))
else:
unglobbed_path.append(path)
except Exception:
unglobbed_path.append(path)
return unglobbed_path | f765e3063f098d2bf185df619783b428b192b37a | 700,071 |
def nth_triangle_number(n):
"""
Compute the nth triangle number
"""
return n * (n + 1) // 2 | 76ebd412200a04ae8a1bf5c4d18122db01cee526 | 700,073 |
import re
def check_ignore(item, ignores=[]):
"""
take a string (item)
and see if any of the strings in ignores list are in the item
if so ignore it.
"""
ignore = False
for i in ignores:
if i and re.search(i, str(item)):
# print "ignoring item: %s for ignore: %s" % (item, i)
ignore = True
return ignore | 0d31b2ef2ddbe48a4de7f743c412b1a72a19b774 | 700,074 |
def all_segments(N):
"""
Return (start, end) pairs of indexes that orm segments of tour of length N
"""
return [(start, start + length)
for length in range(N, 2-1, -1)
for start in range(N - length + 1)] | d1b70d4f52c930e97ff82920abbf49f5b6d0af56 | 700,075 |
import torch
def epe(input_flow, target_flow):
"""
End-point-Error computation
Args:
input_flow: estimated flow [BxHxWx2]
target_flow: ground-truth flow [BxHxWx2]
Output:
Averaged end-point-error (value)
"""
return torch.norm(target_flow - input_flow, p=2, dim=1).mean() | ff68a331c1f3323585c6a351b4a3da50209ab9b9 | 700,077 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.