content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def inside(resource1, resource2):
"""Is ``resource1`` 'inside' ``resource2``? Return ``True`` if so, else
``False``.
``resource1`` is 'inside' ``resource2`` if ``resource2`` is a
:term:`lineage` ancestor of ``resource1``. It is a lineage ancestor
if its parent (or one of its parent's parents, etc.) is an
ancestor.
"""
while resource1 is not None:
if resource1 is resource2:
return True
resource1 = resource1.__parent__
return False | 906a05912bba8b299e42fdb3a3b4547a1b160bb4 | 17,011 |
def ask(question, no_input=False):
"""Display a Y/n question prompt, and return a boolean"""
if no_input:
return True
else:
input_ = input('%s [Y/n] ' % question)
input_ = input_.strip().lower()
if input_ in ('y', 'yes', ''):
return True
if input_ in ('n', 'no'):
return False
print('Invalid selection') | b7eed52f3fa3eb65ed99d2076cce6520489269a1 | 17,013 |
def electrical_mobility_from_D(D, charge, T, constants=None, units=None):
"""
Calculates the electrical mobility through Einstein-Smoluchowski relation.
Parameters
----------
D: float with unit
Diffusion coefficient
charge: integer
charge of the species
T: float with unit
Absolute temperature
constants: object (optional, default: None)
if None:
T assumed to be in Kelvin and b0 = 1 mol/kg
else:
see source code for what attributes are used.
Tip: pass quantities.constants
units: object (optional, default: None)
attributes accessed: meter, Kelvin and mol
Returns
-------
Electrical mobility
"""
if constants is None:
kB = 1.38064852e-23
e = 1.60217662e-19
if units is not None:
kB *= units.joule / units.kelvin / units.mol
e *= units.coulomb
else:
kB = constants.Boltzmann_constant
e = constants.elementary_charge
return D*charge*e/(kB*T) | ec6d60ead515baf0a2faad59661f94067a7f3f7f | 17,015 |
def get_index_by_node_id(data):
""" Indexes a Dynalist data object by node for easy navigation. """
index = {}
for node in data["nodes"]:
index[node["id"]] = node
return index | 2a630c7468258625c9a3193e6b9906ad23293375 | 17,016 |
import yaml
def dump(data):
"""
Dump a YAML file.
"""
return yaml.dump(data) | 37845ceb70fa0fddcbf6a2fbbec51212bc70c897 | 17,017 |
def subset_on_taxonomy(dataframe, taxa_level, name):
"""
Return only rows of the datframe where the value in column taxa_level
matches the specified name.
:param dataframe: Pandas DataFrame with columns like 'Kingdom',
'Phylum', 'Class', ...
:param taxa_level: a taxagenetic label such as "Genus" or "Order"
:param name: taxa_level name to match
:return: subset of Pandas DataFrame matching the selection
"""
#print(dataframe.columns)
return dataframe[dataframe[taxa_level] == name] | a1d72a96b277791d677e2bf81073ed7f4daa423f | 17,021 |
from typing import Optional
from typing import Union
from datetime import datetime
def fix_time_retrieved(s: str) -> Optional[Union[str, datetime]]:
"""
Fixes timestamps of the format: 15-Jul-2021 (22:29:25.643316)
"""
if not s or s == "None":
return s
return datetime.strptime(s, "%d-%b-%Y (%H:%M:%S.%f)") | 6d6a163490bbfe312c4ca1a4e2508ba1f71f096d | 17,027 |
def effective_stiffness_from_base_shear(v_base, disp):
"""
Calculates the effective stiffness based on the base shear and displacement.
Typically used in displacement based assessment
:return:
"""
return v_base / disp | a6b48e4dc970c19d0cab3d3c798633512ca62d9a | 17,028 |
def expand_box(box, img_shape, scale=None, padding=None):
"""Expand roi box
Parameters
----------
box : list
[x, y, w, h] order.
img_shape : list
[width, height]
scale : float, optional
Expand roi by scale, by default None
padding : int, optional
Expand roi by padding, by default None
Returns
-------
expanded roi: list
[x, y, w, h] order.
"""
x, y, w, h = box
wmax, hmax = img_shape
if scale is not None:
xo = max([x - (scale - 1) * w / 2, 0])
yo = max([y - (scale - 1) * h / 2, 0])
wo = w * scale
ho = h * scale
elif padding is not None:
xo = max(x - padding, 0)
yo = max(y - padding, 0)
wo = w + padding * 2
ho = h + padding * 2
else:
xo, yo, wo, ho = x, y, w, h
if xo + wo >= wmax:
wo = wmax - xo - 1
if yo + ho >= hmax:
ho = hmax - yo - 1
return [int(xo), int(yo), int(wo), int(ho)] | 3fd9c97b8baa70a89b898d3e9d14e8c930d0045e | 17,029 |
def parse_by_prefix(input_string, prefix, end_char=[" "]):
"""searches through input_string until it finds the prefix. Returns
everything in the string between prefix and the next instance of
end_char"""
start = input_string.find(prefix) + len(prefix)
end = start
while input_string[end] not in end_char:
end += 1
return input_string[start:end] | 8cc80c9c359ae155ed4f8f197c1e9bd604cebf1d | 17,030 |
def FindPosition(point, points):
"""Determines the position of point in the vector points"""
if point < points[0]:
return -1
for i in range(len(points) - 1):
if point < points[i + 1]:
return i
return len(points) | 11ccabcade65053ccfa6751813d90a0eeaccc339 | 17,040 |
def check_digit10(firstninedigits):
"""Check sum ISBN-10."""
# minimum checks
if len(firstninedigits) != 9:
return None
try:
int(firstninedigits)
except Exception: # pragma: no cover
return None
# checksum
val = sum(
(i + 2) * int(x) for i, x in enumerate(reversed(firstninedigits)))
remainder = int(val % 11)
if remainder == 0:
tenthdigit = 0
else:
tenthdigit = 11 - remainder
if tenthdigit == 10:
tenthdigit = 'X'
return str(tenthdigit) | 33d8da015a471e5e9f29eb4c9b2b0173979d8130 | 17,044 |
def _load_captions(captions_file):
"""Loads flickr8k captions.
Args:
captions_file: txt file containing caption annotations in
'<image file name>#<0-4> <caption>' format
Returns:
A dict of image filename to captions.
"""
f_captions = open(captions_file, 'rb')
captions = f_captions.read().decode().strip().split('\n')
data = {}
for row in captions:
row = row.split("\t")
row[0] = row[0][:len(row[0])-2]
try:
data[row[0]].append(row[1])
except:
data[row[0]] = [row[1]]
f_captions.close()
return data | 89a00a5befe1162eda3918b7b6d63046fccd4c70 | 17,046 |
from typing import List
def text_to_bits(
text: str,
encoding: str = "utf-8",
errors: str = "surrogatepass",
) -> List[int]:
"""
Takes a string and returns it's binary representation.
Parameters
----------
text: str
Any string.
Returns
-------
A list of 0s and 1s.
"""
bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:]
bits_list = []
for bit in bits.zfill(8 * ((len(bits) + 7) // 8)):
bits_list.append(int(bit))
return bits_list | cc9ab6497ab3b797625016176b74a6660ed59a80 | 17,047 |
def get_the_written_file_list(writefile_cursor):
"""Return the written files (W)."""
written_files_query = '''
SELECT process, name, mode
FROM opened_files
WHERE mode == 2
'''
writefile_cursor.execute(written_files_query)
return writefile_cursor.fetchall() | 8abe57fd88d569d5cf48b13dfbcfc142fa6c1504 | 17,049 |
def final_nonzero(L):
"""
Return the index of the last non-zero value in the list.
"""
for index, val in reversed(list(enumerate(L))):
if val:
return(index)
return(0) | 1064987732146a9f6c12a2cab1dc84d2657fa321 | 17,052 |
def pattern_count(text: str, pattern: str) -> int:
"""Count the number of occurences of a pattern within text
Arguments:
text {str} -- text to count pattern in
pattern {str} -- pattern to be counted within text
Returns:
int -- The number of occurences of pattern in the test
Example:
>>> pattern_count("ACAACTATGCATACTATCGGGAACTATCCT", "ACTAT")
3
"""
count = 0
pattern_size = len(pattern)
for i in range(len(text) - pattern_size + 1):
if text[i:i+pattern_size] == pattern:
count += 1
return count | e6fcd2f0645141a3ddf211facb49058deb6dc1fd | 17,055 |
def _Divide(x, y):
"""Divides with float division, or returns infinity if denominator is 0."""
if y == 0:
return float('inf')
return float(x) / y | dee5ef0c4160c45ee9c8ee6aee651d60c3e70252 | 17,067 |
def get_results(m):
"""
Extract model results as dict
Parameters
----------
m : Pyomo model instance
Model instance containing solution (post-solve)
Returns
-------
results : dict
Dictionary containing model results
"""
results = {
"x": m.x.value,
"y": m.y.value
}
return results | 1dcb35bac7fe2379b096bb2fd838ed53a7ebaca4 | 17,068 |
def get_number_coluna(janela, chuva_height):
"""Determina o numero de linhas com gotas que cabem na tela."""
availble_space_y = (janela[1] - (3 * chuva_height))
number_coluna = int(availble_space_y / (2 * chuva_height))
return number_coluna | a41c2ae23da33149c88a507cf900b9f8e2772622 | 17,074 |
def _readSatCatLine(line):
"""Returns the name, international designator (id), nad NORAD catalog number
(catNum) from a line in the satellite catalog.
"""
name = line[23:47].strip()
id = line[0:11].strip()
catNum = line[13:18].strip()
return name, id, catNum | 7d30ab9836f30cb7c10285ad86ca70cad7965b9c | 17,082 |
def _argsort(it, **kwargs):
"""
Renvoie une version triée de l'itérable `it`, ainsi que les indices
correspondants au tri.
Paramètres :
------------
- it : itérable
- kwargs
Mots-clés et valeurs utilisables avec la fonction built-in `sorted`.
Résultats :
-----------
- indexes : itérable d'indices
Indices des éléments de `it`, dans l'ordre dans lequel les éléments
apparaissent dans `sortedit`.
- sortedit : itérable
Version triée de `it`.
Exemples :
----------
>>> it = [2, 1, 3]
>>> indexes, sortedit = _argsort(it)
>>> indexes
(1, 0, 2)
>>> sortedit
(1, 2, 3)
>>> [it[x] for x in indexes]
[1, 2, 3]
>>> indexes, sortedit = _argsort(it, reverse=True)
>>> indexes
(2, 0, 1)
>>> sortedit
(3, 2, 1)
"""
indexes, sortedit = zip(*sorted(enumerate(it), key=lambda x: x[1],
**kwargs))
return indexes, sortedit | f36e0ac863c3861ba7f1e222ac3712c977364d98 | 17,090 |
def _create_postgres_url(db_user, db_password, db_name, db_host,
db_port=5432, db_ssl_mode=None,
db_root_cert=None):
"""Helper function to construct the URL connection string
Args:
db_user: (string): the username to connect to the Postgres
DB as
db_password: (string): the password associated with the
username being used to connect to the Postgres DB
db_name: (string): the name of the Postgres DB to connect
to
db_host: (string): the host where the Postgres DB is
running
db_host: (number, optional): the port to connect to the
Postgres DB at
db_ssl_mode: (string, optional): the SSL mode to use when
connecting to the Postgres DB
db_root_cert: (string, optional): the root cert to use when
connecting to the Postgres DB
Returns:
[string]: Postgres connection string
"""
ssl_mode = ''
if db_ssl_mode:
# see
# https://www.postgresql.org/docs/11/libpq-connect.html#
# LIBPQ-CONNECT-SSLMODE
ssl_mode = '?sslmode=%s' % (db_ssl_mode)
if db_root_cert:
ssl_mode += '&sslrootcert=%s' % (db_root_cert)
return ('postgresql://%(user)s:%(password)s@%(host)s:%(port)s/'
'%(db)s%(ssl)s' % {
'user': db_user,
'password': db_password,
'db': db_name,
'host': db_host,
'port': db_port,
'ssl': ssl_mode}) | f617f7f85545fcf2a1f60db8c9c43e0209c32c4f | 17,096 |
def to_case_fold(word: str):
"""
The casefold() method is an aggressive lower() method which
convert strings to casefolded strings for caseless matching.
The casefold() method is removes all case distinctions
present in a string. It is used for caseless matching
(i.e. ignores cases when comparing).
For example, German lowercase letter ß is equivalent to ss.
However, since ß is already lowercase, lower() method
does nothing to it. But, casefold() converts it to ss.
:param word: the string to be casefolded
:return: case-folded string
"""
return word.casefold() | c917ab8661859ae29d8abecd9a7663b0b5112a63 | 17,099 |
import torch
def predict_raw(loader, model):
"""Compute the raw output of the neural network model for the given data.
Arguments
----------
loader : pyTorch DataLoader instance
An instance of DataLoader class that supplies the data.
model: subclass instance of pyTorch nn.Module
The class containing the network model to evaluate, as per convention.
Returns
----------
The network output tensor.
"""
model.eval()
out = []
for i, (input, target) in enumerate(loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
with torch.no_grad():
input_var = torch.autograd.Variable(input)
# compute output
output = model(input_var)
out.append(output.data)
return out | cb812c0792629c46d5774d9f1f4090369e047b78 | 17,100 |
def funql_template_fn(target):
"""Simply returns target since entities are already anonymized in targets."""
return target | a5f95bd6b7feabb4826fff826e6638cd242e04d6 | 17,102 |
import torch
def to_tensor(im, dims=3):
""" Converts a given ndarray image to torch tensor image.
Args:
im: ndarray image (height x width x channel x [sample]).
dims: dimension number of the given image. If dims = 3, the image should
be in (height x width x channel) format; while if dims = 4, the image
should be in (height x width x channel x sample) format; default is 3.
Returns:
torch tensor in the format (channel x height x width) or (sample x
channel x height x width).
"""
assert (dims == 3 or dims == 4)
if dims == 3:
im = im.transpose((2, 0, 1))
elif dims == 4:
im = im.transpose((3, 2, 0, 1))
else:
raise NotImplementedError
return torch.from_numpy(im) | d19a0c0104f4dc9401f70235cadb7266ffd01332 | 17,103 |
def _get_size_verifier(min_x, min_y, mode):
"""
Depending on what the user wants, we need to filter image sizes differently.
This function generates the filter according to the user's wishes.
:param min_x: Minimal x-coordinate length of image.
:param min_y: Minimal y-coordinate length of image.
:param mode: If equal to 'area': Only filter images whose area is below min_x*min_y.
If equal to 'individual' or anything else: Both sides of the image must be bigger than the
given x and y coordinates. Automatically compares long to long and short to short side.
:returns function that decides wether an image should be kept or discarded according to the size constraints.
"""
def by_area(width, height):
return width * height >= min_x * min_y
def by_both(width, height):
long_side = max(width, height)
short_side = min(width, height)
long_given = max(min_x, min_y)
short_given = min(min_x, min_y)
return long_given <= long_side and short_given <= short_side
def anything_goes(width, height):
return True
if mode == "area":
return by_area
elif mode == "individual":
return by_both
else:
return anything_goes | 86919399a94caa60ff780ccf5959fe2d43d6d2eb | 17,104 |
import random
def weighted(objs, key='weight', generator=random.randint):
"""Perform a weighted select given a list of objects.
:param objs: a list of objects containing at least the field `key`
:type objs: [dict]
:param key: the field in each obj that corresponds to weight
:type key: str
:param generator: a number generator taking two ints
:type generator: function(int, int) -> int
:return: an object
:rtype: dict
"""
acc = 0
lookup = []
# construct weighted spectrum
for o in objs:
# NOTE(cpp-cabrera): skip objs with 0 weight
if o[key] <= 0:
continue
acc += o[key]
lookup.append((o, acc))
# no objects were found
if not lookup:
return None
# NOTE(cpp-cabrera): select an object from the lookup table. If
# the selector lands in the interval [lower, upper), then choose
# it.
gen = generator
selector = gen(0, acc - 1)
lower = 0
for obj, upper in lookup:
if lower <= selector < upper:
return obj
lower = upper | ea8b0ada198ae26a7ac54092c10a11daba3d18e0 | 17,112 |
def get_parameter_list_from_parameter_dict(pd):
"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""
pl = []
for key in pd:
pl.append(pd[key])
return pl | 38ab987fd2959c789f69a804f27e30bc86c7279b | 17,113 |
def all_accept_criteria(candidate_paraphrases, **kargs):
"""Always accept proposed words.
"""
return candidate_paraphrases, None | 745459e4fc432f666b2c763baafb69ea19a6181c | 17,114 |
import string
def camel_to_underscore(name):
""" convert a camel case string to snake case
"""
for char in string.ascii_uppercase:
name = name.replace(char, '_{0}'.format(char))
return name.lower() | db88bd3938073ec65e58344ba7228c75fef646a5 | 17,118 |
import inspect
def get_signature(obj):
"""
Get signature of module/class/routine
Returns:
A string signature
"""
name = obj.__name__
if inspect.isclass(obj):
if hasattr(obj, "__init__"):
signature = str(inspect.signature(obj.__init__))
return "class %s%s" % (name, signature)
else:
signature = "%s()" % name
elif inspect.ismodule(obj):
signature = name
else:
signature = str(inspect.signature(obj))
return name + signature
return signature | 9da9d7e431783b89a5e65b4940b118cd5538799c | 17,119 |
def abs(x):
"""
Computes the absolute value of a complex-valued input tensor (x).
"""
assert x.size(-1) == 2
return (x ** 2).sum(dim=-1).sqrt() | 3b3a23873923597767c35eb4b5f6da1bb054705b | 17,121 |
def gcd_looping_with_divrem(m, n):
"""
Computes the greatest common divisor of two numbers by getting remainder from division in a
loop.
:param int m: First number.
:param int n: Second number.
:returns: GCD as a number.
"""
while n != 0:
m, n = n, m % n
return m | 5b50692baa396d0e311b10f2858a1278a9366d09 | 17,122 |
def _round_bits(n: int, radix_bits: int) -> int:
"""Get the number of `radix_bits`-sized digits required to store a `n`-bit value."""
return (n + radix_bits - 1) // radix_bits | 3e03385ee69f28b11e63885a80af48faa337697a | 17,123 |
import functools
def ignores(exc_type, returns, when=None):
"""Ignores exception thrown by decorated function.
When the specified exception is raised by the decorated function,
the value 'returns' is returned instead.
The exceptions to catch can further be limited by providing a predicate
which should return 'True' for exceptions that should be ignored.
Parameters
----------
exc_type : type
The exception type that should be ignored.
returns : T
The value that is returned when an exception is ignored.
when : callable, optional
A predicate that can be used to further refine
the exceptions to be ignored.
Examples
--------
Ignore all `ValueError`s:
>>> @ignores(ValueError, returns=1)
... def foo(e):
... raise e
>>> foo(ValueError)
1
>>> foo(TypeError)
Traceback (most recent call last):
...
TypeError
Ignore `ValueError`s with a specific message:
>>> @ignores(ValueError, returns=1, when=lambda e: str(e) == "Not so bad.")
... def bar(e):
... raise e
>>> bar(ValueError("Bad!"))
Traceback (most recent call last):
...
ValueError: Bad!
>>> bar(ValueError("Not so bad."))
1
"""
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except exc_type as e:
if when is None or when(e):
pass
else:
raise e
return returns
return wrapper
return decorator | 0c839c73218124fb988cea95fb5ee73abe7d5833 | 17,127 |
def build_hsts_header(config):
"""Returns HSTS Header value."""
value = 'max-age={0}'.format(config.max_age)
if config.include_subdomains:
value += '; includeSubDomains'
if config.preload:
value += '; preload'
return value | 9f94d87b1949f5c9e2f898466a8f5191f2327357 | 17,128 |
def argv_to_module_arg_lists(args):
"""Converts module ldflags from argv format to per-module lists.
Flags are passed to us in the following format:
['global flag', '--module', 'flag1', 'flag2', '--module', 'flag 3']
These should be returned as a list for the global flags and a list of
per-module lists, i.e.:
['global flag'], [['flag1', 'flag2'], ['flag1', 'flag3']]
"""
modules = [[]]
for arg in args:
if arg == '--module':
modules.append([])
else:
modules[-1].append(arg)
return modules[0], modules[1:] | 847597d09e56af4221792a9a176bddfea334e622 | 17,132 |
import time
def get_template_s3_resource_path(prefix, template_name, include_timestamp=True):
"""
Constructs s3 resource path for provided template name
:param prefix: S3 base path (marts after url port and hostname)
:param template_name: File name minus '.template' suffix and any timestamp portion
:param include_timestamp: Indicates whether to include the current time in the file name
:return string: Url of S3 file
"""
if include_timestamp:
key_serial = str(int(time.time()))
template_name += "." + key_serial
return "%s/%s.template" % (prefix, template_name) | 61f1ef829cbe83e1032dd5995cedf33a4809f787 | 17,140 |
import torch
def adj_to_seq(adj, device='cpu'):
"""
Convert a dense adjacency matrix into a sequence.
Parameters
----------
adj : torch.Tensor
The dense adjacency tensor.
device : str, optional
The device onto which to put the data. The default is 'cpu'.
Returns
-------
adj_seq : torch.Tensor
The sequence representing the input adjacency tensor.
"""
B, N = adj.shape[0], adj.shape[1]
adj_seq = torch.zeros(B,int(((N-1)*N)/2)).to(device)
for b in range(B):
for i in range(1,N):
for j in range(i):
adj_seq[b,i+j] = adj[b,i,j]
return adj_seq | 6b967962d5ba61a0ad45d5197ca23a7278fccca9 | 17,145 |
def makeUnique(list):
""" Removes duplicates from a list. """
u = []
for l in list:
if not l in u:
u.append(l)
return u | 02834bf5633c82f5f7428c03519ca68bee8916d4 | 17,151 |
def sample_labels(model, wkrs, imgs):
"""
Generate a full labeling by workers given worker and image parameters.
Input:
- `model`: model instance to use for sampling parameters and labels.
- `wkrs`: list of worker parameters.
- `imgs`: list of image parameters.
Output:
1. list [img id, wkr id, label] as provided by `model.sample_label`.
"""
labels = [[ii, wi, model.sample_label(wkrs[wi], imgs[ii])] \
for ii in range(len(imgs)) for wi in range(len(wkrs))]
return labels | 1abd2d0d087f7ce452db7c899f753366b148e9e6 | 17,152 |
def default_state_progress_report(n_steps, found_states, all_states,
timestep=None):
"""
Default progress reporter for VisitAllStatesEnsemble.
Note that it is assumed that all states have been named.
Parameters
----------
n_steps : int
number of MD frames generated so far
found_states : iterable
the set of states that have been found
all_states : iterable
the set of all states of interest
timestep : float or quantity
the timestep (optional). If given, the amount of time simulated will
be reported along with the number of MD frames.
Returns
-------
str :
formatted string with information about progress so far
"""
report_str = "Ran {n_steps} frames"
if timestep is not None:
report_str += " [{}]".format(str(n_steps * timestep))
report_str += (". Found states [{found_states}]. "
"Looking for [{missing_states}].\n")
found_states_str = ",".join([s.name for s in found_states])
# list comprehension instead of sets (to preseve order)
missing_states = [s for s in all_states if s not in found_states]
missing_states_str = ",".join([s.name for s in missing_states])
return report_str.format(n_steps=n_steps,
found_states=found_states_str,
missing_states=missing_states_str) | b0f740d18218dd9542704d03edcd4b6575a2c14e | 17,159 |
def childrenList(cursor,cd_tax):
"""
Retrieve all the children of a taxon in the database
Parameters:
----------
cursor: Psycopg2 cursor
cursor for the database connection
cd_tax: Int
idenfier of the taxon for which we search the children taxa
Returns:
-------
all_children: List(Int)
list of identifiers of the children taxa
"""
foundMore = True
all_children=[cd_tax]
new_children = [cd_tax]
SQL = "SELECT cd_tax FROM taxon WHERE cd_sup IN (SELECT UNNEST( %s ))"
while foundMore:
cursor.execute(SQL,[new_children])
res=cursor.fetchall()
new_children = [r[0] for r in res]
all_children=all_children+new_children
if(len(new_children)==0):
foundMore=False
all_children.sort()
return all_children | ca50ac590674d19321144f77b54ec57d8dd49bb4 | 17,162 |
def path_sum(root, target_sum):
"""
Given a binary tree and a sum, determine if the tree has
a root-to-leaf path such that adding up all the values along
the path equals the given sum.
"""
def is_leaf(node):
return node.left is None and node.right is None
def leaf_nodes(node, parent_path_sum):
if node is None:
return
new_sum = parent_path_sum + node.val
if is_leaf(node):
yield (node, new_sum)
for n in leaf_nodes(node.left, new_sum):
yield n
for n in leaf_nodes(node.right, new_sum):
yield n
for node, path_sum in leaf_nodes(root, 0):
if path_sum == target_sum:
return True
return False | 0971227d42abb3a0cde1c9050dcca39731858679 | 17,164 |
def IsAioNode(tag):
"""Returns True iff tag represents an AIO node."""
return tag.startswith('aio_nodes.') | 6603f4bca75a463ca651b44615a11c3dd29ca487 | 17,168 |
def _parse_name(wot_identifier):
"""
Parse identifier of the forms: nick
nick@key
@key
:Return: nick, key. If a part is not given return an empty string for it.
>>> _parse_name("BabcomTest@123")
('BabcomTest', '123')
"""
split = wot_identifier.split('@', 1)
nickname_prefix = split[0]
key_prefix = (split[1] if split[1:] else '')
return nickname_prefix, key_prefix | 7a33f5247e345175bad92fc8bf040eddc8b65804 | 17,171 |
import re
def baselineNumber (Title):
"""Extract the processing baseline number from the given product title."""
return re.sub(r".+_N(\d{4})_.+", r"\1", Title) | c69d099c6173cb771d14d35f520f12f32079229f | 17,178 |
def _is_expecting_event(event_recv_list):
""" check for more event is expected in event list
Args:
event_recv_list: list of events
Returns:
result: True if more events are expected. False if not.
"""
for state in event_recv_list:
if state is False:
return True
return False | befbe6a614ede6a7e5b59d3bda9c148c04ddadde | 17,182 |
def get_orders_dict(orders):
"""Form a dictionary of current order buys and sells
"""
list_orders = list(orders)
orders_dict = {}
orders_dict["sells"] = []
orders_dict["buys"] = []
for order in list_orders:
if order["side"] == "sell":
temp_price = round(float(order["price"]), 2)
orders_dict["sells"].append(temp_price)
if order["side"] == "buy":
temp_price = round(float(order["price"]), 2)
orders_dict["buys"].append(temp_price)
return orders_dict | 9d126d759dd0b3da7c584f6d4163243d8b2cee43 | 17,183 |
import re
def get_offer_total_floors(html_parser, default_value=''):
"""
This method returns the maximal number of floors in the building.
:param html_parser: a BeautifulSoup object
:rtype: string
:return: The maximal floor number
"""
# searching dom for floor data
floor_raw_data = html_parser.find(class_="param_floor_no")
if hasattr(floor_raw_data, 'span'):
floor_data = floor_raw_data.span.text
else:
return default_value
# extracting information about floor
match = re.search(r"\w+\s(?P<total>\d+)", floor_data)
total_floors = default_value
if match:
total_floors = match.groupdict().get("total")
return total_floors | 170e75a04104f6fa1c544788c3d25324edd6b2e8 | 17,185 |
def rreplace(s, old, new, occurrence):
"""This function performs a search-and-replace on a string for a given
number of occurrences, but works from the back to the front.
:param s: string to manipulate
:param old: substring to search for
:param new: substring to replace with
:param occurrence: number of occurrences
:return: Modified string.
"""
li = s.rsplit(old, occurrence)
return new.join(li) | eac6d5ffb8adb7940e6d3374eec130cafcc311e7 | 17,188 |
import requests
def get_all_episodes(cfg, series_id):
"""
Request all episodes within a series.
:param series_id: Unique identifier for series
:param cfg: Opencast configuration
:return: List of pair of eventIds and titles for episodes
"""
url = cfg['uri'] + "/api/events"
params = {"filter": "is_part_of:" + series_id,
"sort": "start_date:ASC"}
result = []
r = requests.get(url=url, params=params, auth=(cfg['user'], cfg['password']))
json_data = r.json()
for elem in json_data:
result.append([elem['identifier'], elem['title']])
return result | 05ea8c7c36641ec5ed3dacebe9579a903ef01fe7 | 17,189 |
def default_sid_function(id, rsid):
"""
The default function for turning a Bgen (SNP) id and rsid into a
:attr:`pysnptools.distreader.DistReader.sid`.
If the Bgen rsid is '' or '0', the sid will be the (SNP) id.
Otherwise, the sid will be 'ID,RSID'
>>> default_sid_function('SNP1','rs102343')
'SNP1,rs102343'
>>> default_sid_function('SNP1','0')
'SNP1'
"""
if rsid == "0" or rsid == "":
return id
else:
return id + "," + rsid | f13b8adab14eb4476a151059938eddf9763d35ef | 17,190 |
def filter_BF_matches(matches: list, threshold=45) -> list:
"""
filter matches list and keep the best matches according to threshold
:param matches: a list of matches
:param threshold: threshold filtering
:return: matches_tmp: list of the best matches
"""
matches_tmp = []
sorted_matches = sorted(matches, key=lambda x: x.distance)
threshold_percent = int(len(sorted_matches) * threshold / 100)
for match_index in range(threshold_percent):
matches_tmp.append([sorted_matches[match_index].queryIdx, sorted_matches[match_index].trainIdx])
return matches_tmp | cce90d95f72b00148355552d0284b87cb16a40c1 | 17,192 |
def _convert_key_and_value(key, value):
"""Helper function to convert the provided key and value pair (from a dictionary) to a string.
Args:
key (str): The key in the dictionary.
value: The value for this key.
Returns:
str: The provided key value pair as a string.
"""
updated_key = f'"{key}"' if isinstance(key, str) else key
updated_value = f'"{value}"' if isinstance(value, str) else value
return f"{updated_key}: {updated_value}, " | 075c0a9a7fe54c35c19296e3da827484b579d4c8 | 17,194 |
def max_value_bits(b):
"""
Get maximum (unsigned) value of a given integer bit size variable.
Parameters
----------
b : int
Number of bits (binary values) that are used to describe a putative variable.
Returns
-------
max_value : int
Maximum value that putative variable can hold (integer unsigned).
"""
return (2 ** b) - 1 | 24041ed8833da09c1ecc8dea1c12f63ca7b29ed0 | 17,195 |
def aligned_output(cols, indent, tab_size=4):
"""
Pretty printing function to output tabular data containing multiple
columns of text, left-aligned.
The first column is aligned at an indentation of "indent". Each
successive column is aligned on a suitable multiple of the "tab_size"
with spaces for all indentation.
"cols" is assumed to be a list of columns, with each column holding
an equal length list of string values.
"""
# Check the input data
ncols = len(cols)
if ncols == 0: return ""
nrows = len(cols[0])
if nrows == 0: return ""
# Work out the indentations and widths of each column
indents = [ indent ]
widths = []
for col in range(1, ncols):
width = max(len(x) for x in cols[col-1])
indents.append(((indents[col-1]+width+tab_size) / tab_size) * tab_size)
widths.append(indents[col] - indents[col-1])
# Now output the actual tabular values
result = ""
for row in range(0, nrows):
if row > 0:
result += ",\n" + (" " * indent)
if len(cols) > 1:
for col in range(0, ncols-1):
result += cols[col][row].ljust(widths[col])
result += cols[-1][row]
return result | 856391043607c4570f75b804917c10b4c4b42dc1 | 17,197 |
import re
def remove_blank(text):
"""
Args:
text (str): input text, contains blank between zh and en, zh and zh, en and en
Returns:
str: text without blank between zh and en, zh and zh, but keep en and en
Examples:
>>> text = "比如 Convolutional Neural Network,CNN 对应中 文是卷 积神 经网络。"
>>> remove_blank(text)
"比如Convolutional Neural Network,CNN对应中文是卷积神经网络。"
"""
# filter blank space between Chinese characters
text = re.sub(r'([^a-zA-Z])([\u0020]*)', r'\1', text)
# remove blank space between English characters and Chinese characters
text = re.sub(r'([\u0020]*)([^a-zA-Z])', r'\2', text)
return text | 8b2093254aeefc26e72c507f0ec5f9e7400a41ea | 17,199 |
def last_of_list(the_list):
"""Return the last item of the provided list."""
if len(the_list) == 0:
return None
return the_list[len(the_list) - 1] | b88bf4c2f55497093888cebe703a14c1eb45199d | 17,202 |
import torch
def get_mean_std(loader):
"""Calculate mean and standard deviation of the dataset
Args:
loader (instance): torch instance for data loader
Returns:
tensor: mean and std of data
"""
channel_sum, channel_squared_sum, num_batches = 0,0,0
for img,_ in loader:
channel_sum += torch.mean(img/255., dim=[0,1,2])
channel_squared_sum += torch.mean((img/255.)**2, dim=[0,1,2])
num_batches += 1
mean = channel_sum / num_batches
std = (channel_squared_sum/num_batches - mean**2)**0.5
print("The mean of dataset : ", mean)
print("The std of dataset : ", std)
return mean,std | f5ee2a66edc5925aec3f78811c8ec6b8b943a1d3 | 17,203 |
from typing import Tuple
def get_sweep_time_ascii(
data: str, sweep_b: Tuple[int, int], time_b: Tuple[int, int]
) -> Tuple[int, int]:
"""Get sweep and time from a given ASCII string.
:param data: ASCII string
:param sweep_b: Boundaries of sweep
:param time_b: Boundaries of time
:return: sweep, time
"""
sweep_val = int(data[sweep_b[0] : sweep_b[1]], 2)
time_val = int(data[time_b[0] : time_b[1]], 2)
return sweep_val, time_val | f1848b70439314dff5d4a5e50ae0706f64315378 | 17,204 |
def bubbles_from_fixed_threshold(data, threshold=0, upper_lim=True):
"""
@ Giri at al. (2018a)
It is a method to identify regions of interest in noisy images.
The method uses a fixed threshold.
Parameters
----------
data : ndarray
The brightness temperature or ionization fraction cube.
threshold : float
The fixed threshold value (Default: 0).
upper_lim : bool
This decides which mode in the PDF is to be identified.
'True' identifies ionized regions in brightness temperature, while
'False' identifies in the xfrac data (Default: True).
Returns
-------
Binary cube where pixels identified as region of interest are the True.
"""
if upper_lim: return (data<=threshold)
else: return (data>=threshold) | 63977ae51eaa80a99b8325124e2d78b98f61b549 | 17,206 |
import re
def lower_case_all_tags(s):
"""
Change all the tags to lower-case
"""
return re.sub(r'(<.*?>)', lambda pat: pat.group(1).lower(), s,
flags=re.IGNORECASE) | 54b8dfdeb81e7cc21c930fd97e1787616a2a8939 | 17,209 |
def check(file1, file2):
"""Compare file1 and file2.
Ignore leading and trailing whitespaces of the file"""
with open(file1) as f1:
test_output = f1.read()
with open(file2) as f2:
ref_output = f2.read()
#p = subprocess.run(['diff', file1,file2], stdout=subprocess.PIPE)
#print(p.stdout)
return test_output.strip() == ref_output.strip()
#return test_output == ref_output | 3a10065ea681188fd4453bade2a04207aadf954a | 17,213 |
def trim_lcs(lcs : dict, cut_requirement : int = 0) -> dict :
"""
Remove epochs from a lightcurve that occur before object discovery, which
is defined as the first mjd with SNR >= 3.0.
Args:
lcs (dict): dictionary from a lightcurve file
cut_requirement (int, default=0): cut number to require
Returns:
copy of lcs with pre-discovery epochs removed from each lightcurve
"""
out_lcs = {}
for snid, info in lcs.items():
# Skip if the lightcurve will get cut during feature extraction
if not (info['cut'] == -1 or info['cut'] > cut_requirement):
continue
# Get discovery MJD, skip if light curve is never discovered
flux = info['lightcurve']['FLUXCAL'].values.astype(float)
fluxerr = info['lightcurve']['FLUXCALERR'].values.astype(float)
detection_mask = ((flux / fluxerr) >= 3.0)
if sum(detection_mask) == 0:
continue
mjds = info['lightcurve']['MJD'].values.astype(float)
mjd0 = mjds[detection_mask].min()
# Trim lightcurve
lc = info['lightcurve'][mjds >= mjd0].copy().reset_index(drop=True)
# Store results
out_lcs[snid] = {'lightcurve': lc, 'cut': info['cut']}
return out_lcs | 0071682cebb6cca56d93eb808d3d662d8e908787 | 17,214 |
def format_data_types(s):
"""
apply the correct data type to each value in the list created from a comma
separated sting "s"
x1: PDB ID (string)
x2: Macro molecule overlaps (int)
x3: Symmetry overlaps (int)
x4: All overlaps (int)
x5: Macro molecule overlaps per 1000 atoms (float)
x6: Symmetry overlaps per 1000 atoms (float)
x7: All overlaps per 1000 atoms (float)
x8: year model deposited in PDB (int)
x9: experiment type (string)
"""
d = [x.strip() for x in s.split(',')]
if len(d) == 9:
# integer values
for i in [1,2,3,7]:
d[i] = int(d[i])
# float values
for i in [4,5,6]:
d[i] = round(float(d[i]),1)
return d
else:
return None | b027e98e3fcba4439c9073936cc9bcfc6df93b9d | 17,215 |
import math
def choose(n,k):
"""Standard Choose function.
:param n: The total sample size.
:type n: int
:param k: The number of elements you're choosing.
:type k: int
:return: n choose k
:rtype: int
"""
return (math.factorial(n)/(math.factorial(k)*math.factorial(n-k))) | faf862e502971ec55a34eb8ee2909b9790252a32 | 17,217 |
import re
def sanitize_target(target: str = "") -> str:
"""
Format target, allow only numeric character e.g.
- "012345678901234" => "012345678901234"
- "080-123-4567" => "0801234567"
- "1-1111-11111-11-1" => "1111111111111"
- "+66-89-123-4567" => "66891234567"
:param target:
:return:
"""
result = re.sub(r"\D", "", target)
return result | ee490997a96a409967f7c8eebcb0e8daa28180b6 | 17,218 |
def create_label_dict(signal_labels, backgr_labels, discard_labels):
""" Create label dictionary, following the convetion:
* signal_labels are mapped to 1,2,3,...
* backgr_labels are mapped to 0
* discard_labels are mapped to -1
Args:
signal_labels: list, or list of lists
Labels of interest. Will be mapped to 1,2,3,...
Several labels can be mapped to the same integer by using nested lists. For example,
signal_labels=[A,[B,C]] would result in A being mapped to 1 and B and C both being mapped
to 2.
backgr_labels: list
Labels will be grouped into a common "background" class (0).
discard_labels: list
Labels will be grouped into a common "discard" class (-1).
Returns:
label_dict: dict
Dict that maps old labels to new labels.
"""
label_dict = dict()
for l in discard_labels: label_dict[l] = -1
for l in backgr_labels: label_dict[l] = 0
num = 1
for l in signal_labels:
if isinstance(l, list):
for ll in l:
label_dict[ll] = num
else:
label_dict[l] = num
num += 1
return label_dict | 75ef46153cb1cd1c5bc2b56ea540e89f8c5fa4b5 | 17,220 |
from typing import Optional
import torch
def check_nans_(x, warn: Optional[str] = None, value: float = 0):
"""Mask out all non-finite values + warn if `warn is not None`"""
msk = torch.isfinite(x)
if warn is not None:
if ~(msk.all()):
print(f'WARNING: NaNs in {warn}')
x.masked_fill_(msk.bitwise_not(), value)
return x | 5817f1488e7f187af6f68f97f3e943115a08e066 | 17,230 |
def get_element_text(element):
"""Builds the element text by iterating through child elements.
Parameters
----------
element: lxml.Element
The element for which to build text.
Returns
-------
text: str
The inner text of the element.
"""
text = ''.join(element.itertext())
return text | 41409e83a23927a5af0818c5f3faace0ca117751 | 17,234 |
def _sum_counters(*counters_list):
"""Combine many maps from group to counter to amount."""
result = {}
for counters in counters_list:
for group, counter_to_amount in counters.items():
for counter, amount in counter_to_amount.items():
result.setdefault(group, {})
result[group].setdefault(counter, 0)
result[group][counter] += amount
return result | 0806ec754397d00a72ed985ef44081c9deb025c7 | 17,235 |
def solution(A, target): # O(N)
"""
Similar to src.arrays.two_sum, find all the combinations that can be added
up to reach a given target. Given that all values are unique.
>>> solution([1, 2, 3, 4, 5], 5)
2
>>> solution([3, 4, 5, 6], 9)
2
"""
remaining = {} # O(1)
combinations = [] # O(1)
for count, value in enumerate(A): # O(N)
if value in remaining: # O(1)
combinations.append([remaining[value], count]) # O(1)
else: # O(1)
remaining[target - value] = value # O(1)
return len(combinations) # O(1) | adcdaa6825569a0d589b6abc01a514ce9d5f38f5 | 17,237 |
def exponential_ease_in_out(p):
"""Modeled after the piecewise exponential
y = (1/2)2^(10(2x - 1)) ; [0,0.5)
y = -(1/2)*2^(-10(2x - 1))) + 1 ; [0.5,1]
"""
if p == 0.0 or p == 1.0:
return p
if p < 0.5:
return 0.5 * pow(2, (20 * p) - 10)
else:
return -0.5 * pow(2, (-20 * p) + 10) + 1 | 8d528b7628b735e1dd2e3e8b89d0ef398c696ed6 | 17,239 |
def xor_bytes(a, b):
"""Returns a byte array with the values from XOR'ing each byte of the input arrays."""
if len(a) != len(b):
raise ValueError("Both byte arrays must be the same length.")
return bytes([a[i] ^ b[i] for i in range(len(a))]) | 7cf107be20d916eeef6414118b8da35926997814 | 17,248 |
def _is_numeric(obj):
"""Return True if obj is a number, otherwise False.
>>> _is_numeric(2.5)
True
>>> _is_numeric('spam')
False
"""
try:
obj + 0
except TypeError:
return False
else:
return True | 8122eea635fd5ed9b2d0e42bda284631cc6cd07b | 17,249 |
def identity(x):
"""
恒等関数
Args:
x(np.array): 入力値
Returns: 入力値と同じ配列
"""
return x | 76a5d06675e9244b49acf74cf955a1dd9c6462c4 | 17,253 |
def GetGLGetTypeConversion(result_type, value_type, value):
"""Makes a gl compatible type conversion string for accessing state variables.
Useful when accessing state variables through glGetXXX calls.
glGet documetation (for example, the manual pages):
[...] If glGetIntegerv is called, [...] most floating-point values are
rounded to the nearest integer value. [...]
Args:
result_type: the gl type to be obtained
value_type: the GL type of the state variable
value: the name of the state variable
Returns:
String that converts the state variable to desired GL type according to GL
rules.
"""
if result_type == 'GLint':
if value_type == 'GLfloat':
return 'static_cast<GLint>(round(%s))' % value
return 'static_cast<%s>(%s)' % (result_type, value) | aa2c283985fb824c603efe76b69479667c4fdd96 | 17,266 |
def list_to_csv(row):
"""
Takes a list and converts it to a comma separated string.
"""
format_string = ",".join(["{}"] * len(row))
return format_string.format(*row) | 104cc3e75d9c5d39fbdc7b7decd274a50b6e1b08 | 17,267 |
def add_link(s):
"""
if `s` is a url, then adds anchor tags for html representation in ipynb.
"""
if s.startswith('http'):
a = '<a href="{0}" target="_blank">'.format(s)
a += s
a += '</a>'
return a | 5a85592b9c976e2f20874849287e9eded552c98c | 17,271 |
def GenerateCompareBuildsLink(build_ids, siblings):
"""Return the URL to compare siblings for this build.
Args:
build_ids: list of CIDB id for the builds.
siblings: boolean indicating whether sibling builds should be included.
Returns:
The fully formed URL.
"""
params = ['buildIds=%s' % ','.join([str(b) for b in build_ids])]
if siblings:
params.append('includeSiblings=true')
return 'http://go/buildCompare?%s' % '&'.join(params) | c309d71cff85becaa0d9cac26dd2a0481475a6ff | 17,272 |
def int_to_bool(value):
"""Turn integer into boolean."""
if value is None or value == 0:
return False
else:
return True | aa8f0f15be18f0c682ad1df4ed0f710880d5ecd5 | 17,278 |
def deep_merge(base, changes):
"""
Create a copy of ``base`` dict and recursively merges the ``changes`` dict.
Returns merged dict.
:type base: dict
:param base: The base dictionary for the merge
:type changes: dict
:param changes: The dictionary to merge into the base one
:return: The merged ``result`` dict
"""
def merge(result, changes):
for k, v in changes.items():
if not isinstance(v, dict):
result[k] = v
else:
if k not in result or not isinstance(result[k], dict):
result[k] = v.copy()
else:
result[k] = result[k].copy()
merge(result[k], changes[k])
result = base.copy()
merge(result, changes)
return result | b74ac0e4213e8bfb0792f9e84053a96af3bb29f0 | 17,280 |
from typing import Optional
def admin_obj_url(obj: Optional[object], route: str = "", base_url: str = "") -> str:
"""
Returns admin URL to object. If object is standard model with default route name, the function
can deduct the route name as in "admin:<app>_<class-lowercase>_change".
:param obj: Object
:param route: Empty for default route
:param base_url: Base URL if you want absolute URLs, e.g. https://example.com
:return: URL to admin object change view
"""
if obj is None:
return ""
if not route:
route = "admin:{}_{}_change".format(obj._meta.app_label, obj._meta.model_name) # type: ignore
path = reverse(route, args=[obj.id]) # type: ignore
return base_url + path | 19601794a2455cf6f76231fd3a1c932fdbe09eae | 17,283 |
def contains_subsets(iter_of_sets):
"""
Checks whether a collection of sets contains any sets which are subsets of
another set in the collection
"""
for si in iter_of_sets:
for sj in iter_of_sets:
if si != sj and set(sj).issubset(si):
return True
return False | 2b5055f0a31f5f00d975b49b08a4976c3c251fc5 | 17,285 |
import random
import string
def random_numeric_token(length):
"""Generates a random string of a given length, suitable for typing on a numeric keypad.
"""
return ''.join(random.choice(string.digits) for i in range(length)) | b63ac76ff32b86d01fb3b74772340cf1ebfcc321 | 17,295 |
def tensor_to_string_list(tensor):
"""Convert a tensor to a list of strings representing its value"""
scalar_list = tensor.squeeze().numpy().tolist()
return ["%.5f" % scalar for scalar in scalar_list] | 4c8844c5401850e6fb3364b4efbe745d7e5f0dad | 17,296 |
def to_brackets(field_name, format_spec):
"""Return PEP 3101 format string with field name and format specification.
"""
if format_spec:
format_spec = ':' + format_spec
return '{' + field_name + format_spec + '}'
return '{' + field_name + '}' | b699b664d1d6bee8c5009bc04513e67c3c15755b | 17,299 |
import re
def _get_valid_filter_terms(filter_terms, colnames):
"""Removes any filter terms referencing non-existent columns
Parameters
----------
filter_terms
A list of terms formatted so as to be used in the `where` argument of
:func:`pd.read_hdf`.
colnames :
A list of column names present in the data that will be filtered.
Returns
-------
The list of valid filter terms (terms that do not reference any column
not existing in the data). Returns none if the list is empty because
the `where` argument doesn't like empty lists.
"""
if not filter_terms:
return None
valid_terms = filter_terms.copy()
for term in filter_terms:
# first strip out all the parentheses - the where in read_hdf
# requires all references to be valid
t = re.sub('[()]', '', term)
# then split each condition out
t = re.split('[&|]', t)
# get the unique columns referenced by this term
term_columns = set([re.split('[<=>\s]', i.strip())[0] for i in t])
if not term_columns.issubset(colnames):
valid_terms.remove(term)
return valid_terms if valid_terms else None | a47dff6d9c34e6fc75a77ecc2f9828bb1667f7bb | 17,313 |
def dekker(
t: float,
x1: float,
y1: float,
x2: float,
y2: float,
x3: float,
y3: float,
x4: float,
y4: float,
) -> float:
"""
Estimates the root using Dekker's method.
Uses a secant line from (x2, y2) to either (x1, y1) or (x3, y3), depending on
which point is closest.
Note
----
If x3 is closer to x2 but using it does not result in a value
between x1 and x2, then it is rejected and bisection is used.
Division by 0 is checked here, and the solver checks if 0 < t < 1
before defaulting to bisection.
"""
# If x2 is closer to x1, then use (x1, y1).
if abs(x2 - x1) <= abs(x2 - x3):
return y2 / (y2 - y1)
# If division by 0, then use bisection.
elif y2 == y3:
return 0.5
# If x2 is closer to x3 and using (x3, y3) does
# not result in division by 0, then use (x3, y3).
else:
return y2 * (x3 - x2) / ((y2 - y3) * (x1 - x3)) | a50d57f6961dc11293975f03eb047a659598abcb | 17,316 |
def paren(iterable):
"""Return generator that parenthesizes elements."""
return ('(' + x + ')' for x in iterable) | c841c9145c35a0f600a39845484176a01b6492be | 17,323 |
def all_suffixes(li):
"""
Returns all suffixes of a list.
Args:
li: list from which to compute all suffixes
Returns:
list of all suffixes
"""
return [tuple(li[len(li) - i - 1:]) for i in range(len(li))] | ff1a2cf4fa620d50ecb06e124a4c2ca192d5d926 | 17,324 |
from typing import List
from typing import Dict
from typing import Union
import warnings
def _get_group_id(
recorded_group_identifier: List[str],
column_links: Dict[str, int],
single_line: List[str],
) -> Union[str, None]:
"""Returns the group_name or group_id if it was recorded or "0" if not.
Favors the group_name over the group_id.
Parameters
----------
recorded_group_identifier: List[str]
List of all recorded group identifiers. Group identifiers are "group_id" or
"group_name".
column_links: Dict[str, int]
Dictionary with column index for relevant recorded columns.
'column_links[column] = index'
The following columns are currently considered relevant:
floodlight id: 'column name in Kinexon.csv-file
- time: 'ts in ms'
- sensor_id: 'sensor id'
- mapped_id: 'mapped id'
- name: 'full name'
- group_id: 'group id'
- x_coord: 'x in m'
- y_coord: 'y in m'
single_line: List[str]
Single line of a Kinexon.csv-file that has been split at the respective
delimiter, eg. ",".
Returns
-------
group_id: str
The respective group id in that line or "0" if there is no group id.
"""
# check for group identifier
has_groups = len(recorded_group_identifier) > 0
if has_groups:
# extract group identifier
if "group_name" in recorded_group_identifier:
group_identifier = "group_name"
elif "group_id" in recorded_group_identifier:
group_identifier = "group_id"
else:
warnings.warn("Data has groups but no group identifier!")
return None
group_id = single_line[column_links[group_identifier]]
# no groups
else:
group_id = "0"
return group_id | ea8e64d8377513d00205cb8259cb01f8711bf135 | 17,325 |
import warnings
def parse_sacct(sacct_str):
"""Convert output of ``sacct -p`` into a dictionary.
Parses the output of ``sacct -p`` and return a dictionary with the full (raw)
contents.
Args:
sacct_str (str): stdout of an invocation of ``sacct -p``
Returns:
dict: Keyed by Slurm Job ID and whose values are dicts containing
key-value pairs corresponding to the Slurm quantities returned
by ``sacct -p``.
"""
result = {}
cols = []
for lineno, line in enumerate(sacct_str.splitlines()):
fields = line.split('|')
if lineno == 0:
cols = [x.lower() for x in fields]
else:
record = {}
jobidraw = fields[0]
if jobidraw in result:
warnings.warn("Duplicate raw jobid '%s' found" % jobidraw)
for col, key in enumerate(cols):
if key:
record[key] = fields[col]
result[jobidraw] = record
return result | 2719e08dea13305c6e6fcef19bd4320072ad7647 | 17,326 |
import pytz
def format_date(dt):
"""
Format a datetime into Zulu time, with terminal "Z".
"""
return dt.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S.%fZ') | 524ee5803e5a5e26e7b9f8a1b6b680d7d739b3b1 | 17,328 |
import random
import string
def generate_random_string(len: int):
""" Creates a randomly generated string of uppercase letters
Args:
len (int): The desired length
Returns:
random_string (str)
"""
return ''.join(random.choices(string.ascii_uppercase, k=len)) | 5414ecb7a6e212379000e43fef07e4642c0e63a0 | 17,329 |
def modular_exponentiation(b, e, m):
"""produced modular exponentiation.
https://en.wikipedia.org/wiki/Modular_exponentiation
:param b: a base number.
:param e: an exponent.
:param m: a modulo.
:return: a reminder of b modulo m.
"""
x = 1
y = b
while e > 0:
if e % 2 == 0:
x = (x * y) % m
y = (y * y) % m
e = int(e / 2)
return x % m | 389cab70e83bb2c2972c39583edbb2bca8efeacb | 17,330 |
def expected_bfw_size(n_size):
"""
Calculates the number of nodes generated by a single BFW for a single root node.
:param n_size: <list> The number of neighbours at each depth level
:return: The size of the list returned by a single BFW on a single root node
"""
total = []
for i, d in enumerate(n_size):
if i == 0:
total.append(d)
else:
total.append(total[-1] * d)
return sum(total) + 1 | f275af1c152c4d4704c0be5ee43f3d0f8802900b | 17,332 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.