content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _parse_aggregation_feat(aggregating_in, features):
"""Parse aggregation information and format in a correct standard way.
Parameters
----------
aggregating_in: tuple
the information for aggregating features
features: pst.BaseFeatures
the features we want to aggregate.
Returns
-------
agg_f_ret: function
the aggregation function for the retriever part.
desc_in: pst.BaseDescriptorModel
the descriptormodel to compute the aggregated features.
pars_feat_in: dict
the parameters of the featuresmanager to compute the aggregate
features.
pars_feats: dict
the parameters in order to instantiate the new aggregated features.
desc_out: pst.BaseDescriptorModel
the descriptormodel to use in the new aggregated features.
"""
assert(type(aggregating_in) == tuple)
if len(aggregating_in) == 5:
agg_f_ret, desc_in, pars_feat_in, pars_feats, desc_out = aggregating_in
elif len(aggregating_in) == 4:
agg_f_ret, desc_in, pars_feat_in, pars_feats = aggregating_in
desc_out = features.descriptormodel
elif len(aggregating_in) == 3 and type(aggregating_in[1]) == dict:
agg_f_ret, pars_feat_in, pars_feats = aggregating_in
desc_in = features.descriptormodel
desc_out = features.descriptormodel
elif len(aggregating_in) == 3 and type(aggregating_in[1]) != dict:
agg_f_ret, desc_in, desc_out = aggregating_in
pars_feat_in, pars_feats = {}, {}
else:
agg_f_ret = aggregating_in[0]
pars_feat_in, pars_feats = {}, {}
desc_in = features.descriptormodel
desc_out = features.descriptormodel
return agg_f_ret, desc_in, pars_feat_in, pars_feats, desc_out | e909cb032d97f5598ceb4321b44a8ecb4f0463fd | 14,517 |
def exc_isinstance(exc_info, expected_exception, raise_not_implemented=False):
"""
Simple helper function as an alternative to calling
`~.pytest.ExceptionInfo.errisinstance` which will take into account all
the "causing" exceptions in an exception chain.
Parameters
----------
exc_info : `pytest.ExceptionInfo` or `Exception`
The exception info as returned by `pytest.raises`.
expected_exception : `type`
The expected exception class
raise_not_implemented : bool, optional
Whether to re-raise a `NotImplementedError` – necessary for tests that
should be skipped with ``@skip_if_not_implemented``. Defaults to
``False``.
Returns
-------
correct_exception : bool
Whether the exception itself or one of the causing exceptions is of the
expected type.
"""
if exc_info is None:
return False
if hasattr(exc_info, 'value'):
exc_info = exc_info.value
if isinstance(exc_info, expected_exception):
return True
elif raise_not_implemented and isinstance(exc_info, NotImplementedError):
raise exc_info
return exc_isinstance(exc_info.__cause__, expected_exception,
raise_not_implemented=raise_not_implemented) | 7e53dd94b7326faea1fe5accdc60b1b3b003f0af | 14,524 |
def g2c(tensor):
"""
Reshape from groups, channels to groups*channels
"""
if len(tensor.shape) == 5:
b, c, g, h, w = tensor.shape
tensor = tensor.reshape(b, c*g, h, w)
return tensor | 18517b066dfad91c9e8708fb94058b2780ddab9c | 14,525 |
import torch
def collate_dataset(batch, test=False):
"""
Preprocessing for given batch.
It will use for the Torch DataLoader(collate_fn=collate_dataset).
Args:
batch(Torch.Tensor)
return:
List(Torch.Tensor)
"""
drug_batch = []
protein_batch = []
y_batch = []
for data in batch:
drug_batch.append(data[0])
protein_batch.append(data[1])
if not test:
y_batch.append(data[2])
if not test:
return [torch.LongTensor(drug_batch), torch.LongTensor(protein_batch), torch.FloatTensor(y_batch)]
else:
return [torch.LongTensor(drug_batch), torch.LongTensor(protein_batch)] | 9eec71d5fe994bd6bb6742d2eb667ad0dda1bae1 | 14,528 |
def CalculatePlatt(mol):
"""
Calculation of Platt number in a molecule
Parameters:
mol: RDKit molecule object
Returns:
Platt: Platt number
"""
cc = [x.GetBeginAtom().GetDegree() + x.GetEndAtom().GetDegree() - 2 for x in mol.GetBonds()]
return sum(cc) | 4a960308487fea60857a3ed2268efcd5342099d2 | 14,529 |
def get_cli_kwargs(**kwargs):
"""
Transform Python keyword arguments to CLI keyword arguments
:param kwargs: Keyword arguments
:return: CLI keyword arguments
"""
return ' '.join(
[
f'--{k.replace("_", "-")} {str(v).replace(",", " ")}'
for k, v in kwargs.items()
if v
]
) | 7357516968952e13d8fe2edf07dc51c3a29e7673 | 14,532 |
def calc_tf_idf(tf_matrix, idf_matrix):
"""
creates dict of doc dict, each doc dict has key:val pais of: word : tf*idf value
:param tf_matrix: returned by calc_tf
:param idf_matrix: returned by calc_idf
:return: dict of dict with word and tf*idf score for each doc in corpus
"""
tf_idf_matrix = {}
for (sent1, f_table1), (sent2, f_table2) in zip(tf_matrix.items(), idf_matrix.items()):
tf_idf_table = {}
for (word1, value1), (word2, value2) in zip(f_table1.items(),
f_table2.items()):
tf_idf_table[word1] = float(value1 * value2)
tf_idf_matrix[sent1] = tf_idf_table
return tf_idf_matrix | 0e69d66499121f0aad8f2991cabaa6fe643e841a | 14,533 |
def find_active_sites(activeSites, targetName):
"""
Find an active site in a list of AS's given the name
Input: list of active sites, name of target
Output: list of indeces of matched active sites
"""
return [i for i, j in enumerate(activeSites) if j.name==targetName] | 544cc215f378eccc5aaa655482c0b3c03fed8974 | 14,534 |
from typing import Dict
def get_email_to_names_table(email_to_names_filename: str) -> Dict[str, str]:
"""Get a mapping of emails to names from the input file.
Args:
email_to_names_filename: Path to file with emails and corresponding
names, where each email-name pair is on a separate line and each line
looks like the following:
[email protected],Name
Returns:
A dict mapping emails to names.
"""
email_to_names_table = {}
with open(email_to_names_filename, "r") as email_to_names_file:
for line in email_to_names_file:
email, name = line.rstrip().split(sep=",")
assert email not in email_to_names_table
email_to_names_table[email] = name
return email_to_names_table | 856ce53a0abee1721b60603c468e5b66a484f7c8 | 14,539 |
def create_actions_file_msg(second_run: bool):
"""Actions File Message"""
article = "another" if second_run else "an"
return "Do you want to create %s action file?" % article | b3029285ac076049ea150155932044942ea3f5f7 | 14,542 |
def get_list_of(key, ch_groups):
"""
Return a list of values for key in ch_groups[groups]
key (str): a key of the ch_groups[groups] dict
ch_groups (dict): a group-name-indexed dict whose values are
dictionaries of group information (see expand_ch_groups)
Example: get_list_of('system', ch_groups)
If ch_groups is the one specified in the json in the expand_ch_groups
example, this will return the list [ 'geds', 'spms', 'auxs' ]
"""
values = []
for ch_info in ch_groups.values():
if key in ch_info and ch_info[key] not in values:
values.append(ch_info[key])
return values | f5bbc32c836d0de5e9aa2c055d97bacdfc794d0e | 14,546 |
def error(Y, X):
"""
Calculates mean squared error (MSE) of the reconstructed data (X) relative
to the original data (Y). In theory, this number should decrease as the
order of the markov process increases and/or the number of components
involved in the original projection (if used) increases.
Parameters
----------
Y : array, shape (N, M)
The original data.
X : array, shape (N, M)
The reconstructed data (output from "test").
Returns
-------
MSE : array, shape (N, M)
Same dimensions as X and Y, where each element is the MSE as if the
corresponding element in the reconstructed data Y operated as the estimator
for the corresponding element in the original data X.
"""
return (Y - X) ** 2 | 16e392f3ee4bece24ff736e47dcb7cae242a1997 | 14,552 |
def normalize_keys(data):
"""Convert keys to lowercase"""
return {k.lower(): v for k, v in data.items()} | b23e2aff374d9413a5c9a63db1fdd955ae7f24a6 | 14,553 |
def accuracy(y_pred, y, tags):
"""
Returns the accuracy of a classifier
"""
o_id = tags.index("O") if "O" in tags else None
correct = ignore = 0
for i, tag_id in enumerate(y):
if y_pred[i] == y[i]:
if tag_id == o_id:
ignore += 1
else:
correct += 1
return float(correct)/(len(y) - ignore) | 950108d475220dc7e16e62b873c4a5d7dbff0e97 | 14,554 |
def frac_year(hour, leap_year=False):
"""
This function calculates the fraction of the year.
Parameters
----------
hour : integer, float
The hour of the day. Could be a decimal value.
leap_year : boolean
Indicates if the year is a leap year. Default
is False.
Returns
-------
B : float
The fraction of the year
"""
if leap_year:
n_days = 366
else:
n_days = 365
B = (hour - 1944) / 24 * 360 / n_days
return B | fe66678278a2257e5b8fc34af042b5a72b29596f | 14,555 |
def box_calc(size_xyz, pallet_xyz):
"""Calculates a list of points to store parts in a pallet"""
[size_x, size_y, size_z] = size_xyz
[pallet_x, pallet_y, pallet_z] = pallet_xyz
xyz_list = []
for h in range(int(pallet_z)):
for j in range(int(pallet_y)):
for i in range(int(pallet_x)):
xyz_list = xyz_list + [[(i+0.5)*size_x, (j+0.5)*size_y, (h+0.5)*size_z]]
return xyz_list | 0ed2b14e117b1f66be67579136f0f25432367284 | 14,558 |
def function_intercept(intercepted_func, intercepting_func):
"""
Intercepts a method call and calls the supplied intercepting_func with the result of it's call and it's arguments
Example:
def get_event(result_of_real_event_get, *args, **kwargs):
# do work
return result_of_real_event_get
pygame.event.get = function_intercept(pygame.event.get, get_event)
:param intercepted_func: The function we are going to intercept
:param intercepting_func: The function that will get called after the intercepted func. It is supplied the return
value of the intercepted_func as the first argument and it's args and kwargs.
:return: a function that combines the intercepting and intercepted function, should normally be set to the
intercepted_functions location
"""
def wrap(*args, **kwargs):
real_results = intercepted_func(*args, **kwargs) # call the function we are intercepting and get it's result
intercepted_results = intercepting_func(real_results, *args, **kwargs) # call our own function a
return intercepted_results
return wrap | 28cfa1e873500cc9ca87a9c07275683cf41a33ae | 14,562 |
def is_ref(prop):
"""
Returns True if prop is a reference.
"""
return list(prop.keys()) == ['$ref'] | 3c52ba784d3d490cf44a60d5d35b2251b640eeff | 14,563 |
from typing import List
import collections
def molecular_formula_from_symbols(symbols: List[str], order: str = "alphabetical") -> str:
"""
Returns the molecular formula for a list of symbols.
Parameters
----------
symbols: List[str]
List of chemical symbols
order: str, optional
Sorting order of the formula. Valid choices are "alphabetical" and "hill".
Returns
-------
str
The molecular formula.
"""
supported_orders = ["alphabetical", "hill"]
order = order.lower()
if order not in supported_orders:
raise ValueError(f"Unsupported molecular formula order: {order}. Supported orders are f{supported_orders}.")
count = collections.Counter(x.title() for x in symbols)
element_order = sorted(count.keys())
if order == "hill" and "C" in element_order:
if "H" in element_order:
element_order.insert(0, element_order.pop(element_order.index("H")))
element_order.insert(0, element_order.pop(element_order.index("C")))
ret = []
for k in element_order:
c = count[k]
ret.append(k)
if c > 1:
ret.append(str(c))
return "".join(ret) | 82142bcae734f89c46e9fce854fdf6de080d8fd7 | 14,565 |
def is_anonymous_argument(c):
"""Test if one argument is anonymous (unnamed)
In the declaration `void f(int x, int);` the second argument is unnamed
"""
return c.spelling is None or c.spelling == '' | 1b262e4539b89c81dc21eebc585af8a4dfc9d342 | 14,569 |
def divide_round_up(a, b):
"""Calculates a / b rounded up to the nearest integer"""
if a % b < b / 2:
return a // b
else:
return (a // b) + 1 | 7ec28dfbe05c006e4e2cad340002a39c9b23f4b9 | 14,572 |
def get_cycle_stats(data_list):
"""
Calculates cycle statistics for test run.
Returns min, max, avg cycle count.
"""
cycles = [data[0] for data in data_list]
min_cycles = min(cycles)
max_cycles = max(cycles)
avg_cycles = sum(cycles) / len(cycles)
return min_cycles, max_cycles, avg_cycles | e8fc1a7b3619ed0f9b63995ded217a4037bdf618 | 14,577 |
def compute_mask(cpu):
"""
Given a CPU number, return a bitmask that can be used in /proc/irq to set
the processor affinity for an interrupt.
"""
return 1<<cpu | a393c663e5426ecde752aa107c97c7c429aa589a | 14,581 |
def decode_string(val: bytes) -> str:
"""Decodes a possibly null terminated byte sequence to a string using ASCII and strips whitespace."""
return val.partition(b'\x00')[0].decode('ascii').strip() | 1e963040a41ae16ba1bb7750ecf17b5e5cdb680f | 14,584 |
import re
def bqtk_default_context(request) -> str:
"""Generate unique id by using pytest nodeid.
Args:
request: pytest fixture which provide tests context info.
Returns:
str: unique id matching big query name constraints
"""
invalid_bq_chars = re.compile('([^a-zA-Z0-9_]+)')
nodeid = request.node.nodeid
return invalid_bq_chars.sub('_', nodeid) | 5f486f625ae0ff2670dd457a25043ad61193af90 | 14,586 |
def sort_list(this_list):
""" Lamda Function sorts pairs in list by 2nd-part (Shannon Entropy). """
# Sort the domain tuples (Entropy, FQDN) pairs high entropy items.
# Return SORTED LIST
return sorted(this_list, key = lambda pairs: pairs[0]) | 75f4def632b330c392f7a4f4db416262585c432b | 14,587 |
def ascii(str):
"""Return a string with all non-ascii characters hex-encoded"""
if type(str) != type(''):
return map(ascii, str)
rv = ''
for c in str:
if c in ('\t', '\n', '\r') or ' ' <= c < chr(0x7f):
rv = rv + c
else:
rv = rv + '\\' + 'x%02.2x' % ord(c)
return rv | 7224579507cc9e0efb0bb1700e2b8b0096422532 | 14,590 |
def right_shift(number, n):
"""
Right shift on 10 base number.
Parameters
----------
number : integer
the number to be shift
n : integer
the number of digit to shift
Returns
-------
shifted number : integer
the number right shifted by n digit
Examples
--------
>>> right_shift(123, 1)
3
>>> right_shift(0, 1)
0
>>> right_shift(1234, 2)
34
"""
return number % 10 ** n | e6d23b5bd630449aba54cb38c2c6d9be174386c0 | 14,591 |
def fast_multiply(matrix, value):
"""
Fast multiply list on value
:param matrix: List of values
:param value: Multiplier
:return: Multiplied list
"""
for ix in range(0, matrix.shape[0]):
matrix[ix] *= value
return matrix | 8a9eb29b1c6c0cc56b35f43f128e2595b45e1ff6 | 14,598 |
def freeze(x):
"""Freezes a dictionary, i.e. makes it immutable and thus hashable."""
frozen = {}
for k, v in x.items():
if isinstance(v, list):
frozen[k] = tuple(v)
else:
frozen[k] = v
return frozenset(frozen.items()) | fc3bf21419057563f2389ab7e26279bb1f37436b | 14,599 |
def humanize(seconds):
"""
Convert a duration in seconds to an human-readable form
"""
assert type(seconds) is int
if seconds >= 86400:
res = seconds // 86400
return '%s day' % res if res == 1 else '%s days' % res
if seconds >= 3600:
res = seconds // 3600
return '%s hour' % res if res == 1 else '%s hours' % res
if seconds >= 60:
res = seconds // 60
return '%s minute' % res if res == 1 else '%s minutes' % res
return '%s second' % seconds if seconds == 1 else '%s seconds' % seconds | 2a32c7b54b1be58ce571910edbf8d5383db66aa2 | 14,600 |
def schema_input_type(schema):
"""Input type from schema
:param schema:
:return: simple/list
"""
if isinstance(schema, list):
return 'list'
return 'simple' | cdcc9b724005083995f26a767d9b2ab95645ad79 | 14,602 |
def _jwt_decode_handler_with_defaults(token): # pylint: disable=unused-argument
"""
Accepts anything as a token and returns a fake JWT payload with defaults.
"""
return {
'scopes': ['fake:scope'],
'is_restricted': True,
'filters': ['fake:filter'],
} | 9374f03065a8592448ae3984e56bb9cae962059f | 14,604 |
def seconds_to_time(seconds):
"""Return a nicely formatted time given the number of seconds."""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return "%d days, %02d hours, %02d minutes, %02d seconds" % (d, h, m, s) | bf87da51527af08f60b3425169af1f696ecc9020 | 14,605 |
import random
def generate_exact_cover(num_of_subsets):
"""Generates a new exact cover problem with the given number of random filled subsets"""
subset = set()
while len(subset) < num_of_subsets:
subset.add(frozenset(random.sample(range(1 ,num_of_subsets + 1), random.randint(0, num_of_subsets))))
return list(subset) | 2e63bdc7181caa312ed42d48578989c8ddc29050 | 14,612 |
def read_long_description(path: str) -> str:
"""Utility function to read the README file."""
with open(path) as file:
data: str = file.read()
return data | c206f2cec2613fde5217845c1e36cd507924716e | 14,613 |
def _case_convert_snake_to_camel(token: str) -> str:
"""Implements logic to convert a snake case token to a camel case one.
"""
while True:
try:
# find next underscore
underscore_loc = token.index('_')
except ValueError:
# converted all underscores
break
# is the underscore at the end of the string?
if underscore_loc == len(token) - 1:
break
orig = token
token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'
# is there more after the capital?
if len(orig) > underscore_loc+2:
token += f'{orig[underscore_loc+2:]}'
return token | 3796f627cc836f884aeab765769cf652f8fd1ff9 | 14,619 |
def _truncate_float(matchobj, format_str='0.2g'):
"""Truncate long floats
Args:
matchobj (re.Match): contains original float
format_str (str): format specifier
Returns:
str: returns truncated float
"""
if matchobj.group(0):
return format(float(matchobj.group(0)), format_str)
return '' | 7fc4e90e496ea6f04838a1ccefb810c2467fafaf | 14,621 |
import base64
def to_base64(data):
"""
Utility function to base64 encode.
:param data: The data to encode.
:return: The base64 encoded data.
"""
return base64.b64encode(data) | 014ca1e1534e9350034e148abcceac525b0f0349 | 14,622 |
import zipfile
def get_pfile_contents(pfile):
"""
Get a list of files within the pfile Archive
"""
if zipfile.is_zipfile(pfile):
zip = zipfile.ZipFile(pfile)
return zip.namelist()
else:
return None | 8f67b87a0e35fe42d0c20cb9ed0c3cdc93a89163 | 14,624 |
import re
def twitter_preprocess(
text: str,
no_rt: bool = True,
no_mention: bool = False,
no_hashtag: bool = False
) -> str:
"""
Preprocessing function to remove retweets, mentions and/or hashtags from
raw tweets.
Examples
--------
>>> twitter_preprocess("RT @the_new_sky: Follow me !")
'Follow me !'
>>> twitter_preprocess("@the_new_sky please stop making ads for your #twitter account.", no_mention=True, no_hashtag=True)
' please stop making ads for your account.'
"""
if no_rt:
text = re.sub(r"^RT @(?:[^ ]*) ", '', text)
if no_mention:
text = re.sub(r"@\w+", '', text)
if no_hashtag:
text = re.sub(r"#\w+", '', text)
return text | 1644b7da6b43ebd77a33bcb186f2634b6d3bc8db | 14,628 |
def calculate_diff_metrics(util1, util2):
"""Calculate relative difference between two utilities"""
util_diff = {}
for metric_k1, metric_v1 in util1.items():
if metric_k1 not in util2:
continue
util_diff[metric_k1] = {}
for avg_k1, avg_v1 in metric_v1.items():
if avg_k1 not in util2[metric_k1]:
continue
diff = abs(avg_v1 - util2[metric_k1][avg_k1]) / max(1e-9, avg_v1)
util_diff[metric_k1][avg_k1] = diff
return util_diff | 45b8dc7a2441333870c8a0faa5b6e8da56df8a40 | 14,629 |
def compute_length(of_this):
"""
Of_this is a dict of listlikes. This function computes the length of that object, which is the length of all of the listlikes, which
are assumed to be equal. This also implicitly checks for the lengths to be equal, which is necessary for Message/TensorMessage.
"""
lengths = [len(value) for value in of_this.values()]
if lengths == []:
# An empty dict has 0 length.
return 0
if len(set(lengths)) != 1: # Lengths are not all the same
raise ValueError("Every element of dict must have the same length.")
return lengths[0] | 4f8e2b3b17ba6bed29e5636de504d05a028304d8 | 14,637 |
import socket
def client(
host: str,
port: str,
use_sctp: bool = False,
ipv6: bool = False,
buffer_size: int = 4096,
timeout: float = 5.0
) -> int:
"""Main program to run client for remote shell execution
:param host: Server name or ip address
:type host: str
:param port: Port on which server listens, can be also name of the service
:type port: str
:param use_sctp: Use SCTP transport protocol or not, defaults to False
:type use_sctp: bool, optional
:param ipv6: Use IPv6 protocol, defaults to False
:type ipv6: bool, optional
:param buffer_size: Buffer size for recv and send
:type buffer_size: int, optional
:param timeout: Timeout after which command is not more awaited, defaults to 5.0
:type timeout: float, optional
:return: Exit code
:rtype: int
"""
buffer_size = 4096
family = socket.AF_INET
transport_protocol = socket.IPPROTO_TCP
# Whether to use ipv6 or not
if ipv6:
family = socket.AF_INET6
# Whether to use SCTP or not
if use_sctp:
transport_protocol = socket.IPPROTO_SCTP
# Use DNS to get address and port of a server
try:
sockaddr = socket.getaddrinfo(
host=host,
port=port,
family=family,
proto=transport_protocol
)[0][-1]
except socket.gaierror as e:
print(f"getaddrinfo() error ({e.errno}): {e.strerror}")
return -1
# Define socket
try:
sock = socket.socket(family, socket.SOCK_STREAM, transport_protocol)
sock.setblocking(True)
sock.settimeout(timeout)
except OSError as e:
print(f"socket() error ({e.errno}): {e.strerror}")
return -2
# Connect to a server
try:
sock.connect(sockaddr)
print(f"Connected to {sockaddr[0]}:{sockaddr[1]}")
except OSError as e:
print(f"connect() error ({e.errno}): {e.strerror}")
return -3
print("Press: Ctrl+D to safely exit, Ctrl+C to interrupt")
# Remote shell endless loop
try:
while True:
cmd = input('> ')
try:
sock.sendall(bytes(cmd, 'utf-8'))
except OSError as e:
print(f"sendall() error ({e.errno}): {e.strerror}")
return -3
try:
data = sock.recv(buffer_size)
print(data.decode('utf-8').strip(' \n'))
except socket.timeout as e:
print("Request timeout..")
except OSError as e:
print(f"recv() error({e.errno}): {e.strerror}")
return -3
except KeyboardInterrupt:
print(" Keyboard Interrupt")
sock.close()
return 255
except EOFError:
print("Safely Closing")
sock.close()
return 0 | 196de595f6d6573c6bc1a2b2209688060b246cdc | 14,638 |
def gamma_approx(mean, variance):
"""
Returns alpha and beta of a gamma distribution for a given mean and variance
"""
return (mean ** 2) / variance, mean / variance | 8103a69d807c39c95c1604d48f16ec74761234c4 | 14,639 |
def public_coverage_filter(data):
"""
Filters for the public health insurance prediction task; focus on low income Americans, and those not eligible for Medicare
"""
df = data
df = df[df['AGEP'] < 65]
df = df[df['PINCP'] <= 30000]
return df | e8a579437fe484a738deefe6b32defaf405b5a58 | 14,641 |
def get_snippet(soup):
"""obtain snippet from soup
:param soup: parsed html by BeautifulSoup
:return: snippet_list
"""
tags = soup.find_all("div", {"class": "gs_rs"})
snippet_list = [tags[i].text for i in range(len(tags))]
return snippet_list | 8955aa6eee837f6b1473b8de2d02083d35c3b46e | 14,642 |
import yaml
def read_config(file_name="config.yaml"):
"""Read configurations.
Args:
file_name (str): file name
Returns:
configs (dict): dictionary of configurations
"""
with open(file_name, 'r') as f:
config = yaml.safe_load(f)
return config | 51faf47b4c28d1cbf80631d743d9087430efb148 | 14,643 |
def filter_dual_selection_aromatic(sele1_atoms, sele2_atoms, aromatic1_index, aromatic2_index):
"""
Filter out aromatic interactions that are not between selection 1 and selection 2
Parameters
----------
sele1_atoms: list
List of atom label strings for all atoms in selection 1
sele2_atoms: list
List of atom label strings for all atoms in selection 2
aromatic1_index: int
Aromatic atom index 1
aromatic2_index: int
Aromatic atom index 2
Returns
-------
filter_bool: bool
True to filter out interaction
"""
if (aromatic1_index in sele1_atoms) and (aromatic2_index in sele2_atoms):
return False
if (aromatic1_index in sele2_atoms) and (aromatic2_index in sele1_atoms):
return False
return True | b1cb590510e45b0bfb5386e1514f24911156d01b | 14,645 |
def lung_capacity(mass):
"""Caclulate lung capacity
Args
----
mass: float
Mass of animal
Return
------
volume: float
Lung volume of animal
References
----------
Kooyman, G.L., Sinnett, E.E., 1979. Mechanical properties of the harbor
porpoise lung, Phocoena phocoena. Respir Physiol 36, 287–300.
"""
return 0.135 * (mass ** 0.92) | 75bddd298ad7df69cda6c78e2353ee4cef22865a | 14,651 |
def correlation_calculator(x_points: list[float], y_points: list[float]) -> float:
"""Return a correlation of the association between the x and y variables
We can use this to judge the association between our x and y variables to determine if
we can even use linear regression. Linear regression assumes that there is some sort of
association between the x and y variables.
We will use the Pearson Correlation Coeficient Formula, the most commonly used correlation
formula. The formula is as follows:
- r = (n(Σxy) - (Σx)(Σy)) / ((nΣx^2 - (Σx)^2)(nΣy^2 - (Σy)^2)) ** 0.5
r = Pearson Coefficient
n= number of the pairs of the stock
∑xy = sum of products of the paired scores
∑x = sum of the x scores
∑y= sum of the y scores
∑x^2 = sum of the squared x scores
∑y^2 = sum of the squared y scores
Preconditions:
- len(x_points) == len(y_points)
>>> x_p = [6,8,10]
>>> y_p = [12,10,20]
>>> round(correlation_calculator(x_p, y_p), 4) == 0.7559
True
"""
# calculate n, the number of pairs
n = len(x_points)
# Calculate the sum for x points and x squared points
sum_x = 0
sum_x_squared = 0
for val in x_points:
sum_x += val
sum_x_squared += val ** 2
# Calculate the sum for y points and y squared poionts
sum_y = 0
sum_y_squared = 0
for val in y_points:
sum_y += val
sum_y_squared += val ** 2
# Calculate the sum for x points times y points
sum_xy = 0
for i in range(n):
sum_xy += x_points[i] * y_points[i]
# print(sum_x, sum_y, sum_x_squared, sum_y_squared, sum_xy)
numer = (n * sum_xy - sum_x * sum_y)
denom = ((n * sum_x_squared - sum_x ** 2) * (n * sum_y_squared - sum_y ** 2)) ** 0.5
r = numer / denom
return r | 3fa697a15ff86511cff2a1b37b532f3d14caf7d8 | 14,652 |
def generate_where_in_clause(field_name, feature_list):
"""
Args:
field_name (str): Name of column to query
feature_list (list): List of values to generate IN clause
Returns:
string e.g. `WHERE name IN ('a' ,'b', 'c')`
"""
# Build up 'IN' clause for searching
where_str = f"{field_name} in ("
for p in feature_list:
if not isinstance(p, str):
where_str += f"{str(p)},"
else:
where_str += f"'{str(p)}',"
where_str = f"{where_str[:-1]})"
return where_str | d00179cfa6a9841b00195b99863d7b37f60c45e4 | 14,655 |
def _zero_out_datetime(dt, unit):
"""To fix a super obnoxious issue where datetrunc (or SQLAlchemy) would
break up resulting values if provided a datetime with nonzero values more
granular than datetrunc expects. Ex. calling datetrunc('hour', ...) with
a datetime such as 2016-09-20 08:12:12.
Note that if any unit greater than an hour is provided, this method will
zero hours and below, nothing more.
:param dt: (datetime) to zero out
:param unit: (str) from what unit of granularity do we zero
:returns: (datetime) a well-behaved, non query-breaking datetime
"""
units = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
i = units.index(unit) + 1
for zeroing_unit in units[i:]:
try:
dt = dt.replace(**{zeroing_unit: 0})
except ValueError:
pass
return dt | adac3a5c84b0b7def93c044488afb57074cf2d09 | 14,660 |
def remove(S, rack):
"""removes only one instance of a letter in a rack"""
if rack == []:
return []
if S == rack[0]:
return rack[1:]
return [rack[0]] + remove(S, rack[1:]) | 28041197da6a7978dacbfe65d413d4e6fd9c3a03 | 14,668 |
from typing import Iterable
from typing import Any
from typing import Union
from typing import List
def remove_from_iterable(
the_iterable: Iterable[Any], items_to_remove: Iterable[Any]
) -> Union[Iterable[Any], None]:
"""
remove_from_iterable removes any items in items_to_remove from the_iterable.
:param the_iterable: Source iterable to remove items from.
:type the_iterable: Iterable[Any]
:param items_to_remove: Iterable containing items to be removed.
:type items_to_remove: Iterable[Any]
:return: Iterable containing all remaining items or None if no items left.
:rtype: Union[Iterable[Any], None]
"""
new_iterable: List[Any] = []
for item in the_iterable:
if item not in items_to_remove:
try:
new_iterable.append(item)
except NameError:
new_iterable = [item]
try:
return new_iterable
except NameError:
return None | 9ac800fb69f46c8b65ed2ac73158678b4c0ae7d6 | 14,669 |
def flatten_nested_list(l):
"""Flattens a list and provides a mapping from elements in the list back
into the nested list.
Args:
l: A list of lists.
Returns:
A tuple (flattened, index_to_position):
flattened: The flattened list.
index_to_position: A list of pairs (r, c) such that
index_to_position[i] == (r, c); flattened[i] == l[r][c]
"""
flattened = []
index_to_position = {}
r = 0
c = 0
for nested_list in l:
for element in nested_list:
index_to_position[len(flattened)] = (r, c)
flattened.append(element)
c += 1
r += 1
c = 0
return (flattened, index_to_position) | 14e20ef2a29dc338a4d706e0b2110a56447ae84e | 14,676 |
def get_owner_email_from_domain(domain):
"""Look up the owner email address for a given domain.
Returning dummy addresses here - implement your own domain->email lookup here.
Returns:
str: email address
"""
email = {
'example-domain-a.example.com': '[email protected]',
'example-domain-b.example.com': '[email protected]',
}.get(domain, '[email protected]')
return email | 2d3851c87c5ba3c19af4c989887bf750b66ffbe1 | 14,677 |
def update_name(name, mapping):
""" Update the street name using the mapping dictionary. """
for target_name in mapping:
if name.find(target_name) != -1:
a = name[:name.find(target_name)]
b = mapping[target_name]
c = name[name.find(target_name)+len(target_name):]
name = a + b + c
return name | 8046fc9ef83f262251fac1a3e540a607d7ed39b9 | 14,684 |
import re
def annotation_version(repo, tag_avail):
"""Generates the string used in Git tag annotations."""
match = re.match("^(.*)-build([0-9]+)$", tag_avail[repo.git()]["build_tag"])
if match is None:
return "%s version %s." % (repo.git(), tag_avail[repo.git()]["build_tag"])
else:
return "%s version %s Build %s." % (repo.git(), match.group(1), match.group(2)) | 484eada27b698c2d7a9236d7b86ea6c53050e1e8 | 14,685 |
import math
def _scale_absolute_gap(gap, scale):
"""Convert an absolute gap to a relative gap with the
given scaling factor."""
assert scale > 0
if not math.isinf(gap):
return gap / scale
else:
return gap | 29c1b03e53c66bf7728066ff3503118a266acd3d | 14,686 |
def yolo_label_format(size, box):
"""
Rule to convert anchors to YOLO label format
Expects the box to have (xmin, ymin, xmax, ymax)
:param size: Height and width of the image as a list
:param box: the four corners of the bounding box as a list
:return: YOLO style labels
"""
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[2]) / 2.0
y = (box[1] + box[3]) / 2.0
w = box[2] - box[0]
h = box[3] - box[1]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return x, y, w, h | c958f75ffe3de8462ed25c5eb27bcaa21d52c46c | 14,688 |
def upload_dir(app, filename):
""" Returns the upload path for applications.
The end result will be something like:
"uploads/applications/Firefox/icons/firefox.png"
"""
path = ('applications/' + app.name + '/icons/' + filename)
# Note: if the file already exist in the same path, django will
# automatically append a random hash code to the filename!
return path | e188c6c11733e0b88313c65b6be67c8db2fd69a9 | 14,689 |
def is_instance_id(instance):
""" Return True if the user input is an instance ID instead of a name """
if instance[:2] == 'i-':
return True
return False | f4389c340b21c6b5e0f29babe49c556bd4b4c120 | 14,692 |
def readiness() -> tuple[str, int]:
"""A basic healthcheck. Returns 200 to indicate flask is running"""
return 'ready', 200 | de1072809a32583a62229e390ea11f6b21f33b1e | 14,694 |
def name_sep_val(mm, name, sep='=', dtype=float, ipos=1):
"""Read key-value pair such as "name = value"
Args:
mm (mmap): memory map
name (str): name of variable; used to find value line
sep (str, optional): separator, default '='
dtype (type, optional): variable data type, default float
ipos (int, optiona): position of value in line, default 1 (after sep)
Return:
dtype: value of requested variable
Examples:
>>> name_sep_val(mm, 'a') # 'a = 2.4'
>>> name_sep_val(mm, 'volume', ipos=-2) # 'volume = 100.0 bohr^3'
>>> name_sep_val(mm, 'key', sep=':') # 'key:val'
>>> name_sep_val(mm, 'new', sep=':') # 'new:name'
>>> name_sep_val(mm, 'natom', dtype=int) # 'new:name'
"""
idx = mm.find(name.encode())
if idx == -1:
raise RuntimeError('"%s" not found' % name)
mm.seek(idx)
line = mm.readline().decode()
tokens = line.split(sep)
val_text = tokens[ipos].split()[0]
val = dtype(val_text)
return val | 4eee14e88ca2a6f43cb1b046de239c6847d57931 | 14,695 |
def extract_class(query):
"""Extract original class object from a SQLAlchemy query.
Args:
query (query): SQLAlchemy query
Returns:
class: base class use when setting up the SQL query
"""
first_expression = query.column_descriptions[0]['expr']
try:
# query returns subset of columns as tuples
return first_expression.class_
except AttributeError:
# query returns a full class ORM object
return first_expression | 48bfec4301c752c68bda613334dcb7dfc17f3f15 | 14,699 |
def get_restore_user(domain, couch_user, as_user_obj):
"""
This will retrieve the restore_user from the couch_user or the as_user_obj
if specified
:param domain: Domain of restore
:param couch_user: The couch user attempting authentication
:param as_user_obj: The user that the couch_user is attempting to get
a restore user for. If None will get restore of the couch_user.
:returns: An instance of OTARestoreUser
"""
couch_restore_user = as_user_obj or couch_user
if couch_restore_user.is_commcare_user():
return couch_restore_user.to_ota_restore_user()
elif couch_restore_user.is_web_user():
return couch_restore_user.to_ota_restore_user(domain)
else:
return None | 09ad1783236860779064f2929bb91c05915cdd6e | 14,700 |
from tempfile import mkdtemp
def create_temporary_directory(prefix_dir=None):
"""Creates a temporary directory and returns its location"""
return mkdtemp(prefix='bloom_', dir=prefix_dir) | b2a1ddeb8bcaa84532475e3f365ab6ce649cd50c | 14,705 |
from typing import Sequence
def in_bbox(point: Sequence, bbox: Sequence):
"""
Checks if point is inside bbox
:param point: point coordinates [lng, lat]
:param bbox: bbox [west, south, east, north]
:return: True if point is inside, False otherwise
"""
return bbox[0] <= point[0] <= bbox[2] and bbox[1] <= point[1] <= bbox[3] | fb95e2506adee4ff425997a0211ee53095894f35 | 14,708 |
def _FinalElement(key):
"""Return final element of a key's path."""
return key.path().element_list()[-1] | 80ecac3fe3eb3d2587d64dd759af10dcddb399fe | 14,714 |
import torch
from typing import Optional
def clamp(data: torch.Tensor, min: float, max: float, out: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Clamp tensor to minimal and maximal value
Args:
data: tensor to clamp
min: lower limit
max: upper limit
out: output tensor
Returns:
Tensor: clamped tensor
"""
return torch.clamp(data, min=float(min), max=float(max), out=out) | 3e835f134fd9eedefb5ca7841d8c3a9960063389 | 14,716 |
def rtiInputFixture(rtiConnectorFixture):
"""
This `pytest fixture <https://pytest.org/latest/fixture.html>`_
creates a session-scoped :class:`rticonnextdds_connector.Input` object
which is returned everytime this fixture method is referred.
The initialized Input object is cleaned up at the end
of a testing session.
``MySubscriber::MySquareReader`` `datareader
<https://community.rti.com/static/documentation/connext-dds/5.2.3/doc/api/connext_dds/api_cpp2/classdds_1_1sub_1_1DataReader.html>`_ in
``test/xml/TestConnector.xml`` `application profile
<https://community.rti.com/rti-doc/510/ndds.5.1.0/doc/pdf/RTI_CoreLibrariesAndUtilities_XML_AppCreation_GettingStarted.pdf>`_
is used for initializing the Input object.
:param rtiConnectorFixture: :func:`rtiConnectorFixture`
:type rtiConnectorFixture: `pytest.fixture <https://pytest.org/latest/builtin.html#_pytest.python.fixture>`_
:returns: session-scoped Input object for testing
:rtype: :class:`rticonnextdds_connector.Input`
"""
return rtiConnectorFixture.get_input("MySubscriber::MySquareReader") | b9fbd841e1640fb77b2991583a2b167e714a0a7b | 14,721 |
import binascii
def base64(data):
"""
Get base64 string (1 line) from binary "data".
:param (bytes, bytearray) data: Data to format.
:raises TypeError: Raises an error if the data is not a bytearray of bytes
instance
:return str: Empty string if this failed, otherwise the base64 encoded
string.
"""
if isinstance(data, (bytearray, bytes)):
b64_data = binascii.b2a_base64(data).decode('ascii')
else:
raise TypeError('Data passed to base64 function is of the wrong type')
# Remove any new lines and carriage returns
b64_data.replace("\n", "").replace("\r", "")
return b64_data.strip("\n") | a51e138beddb788c248c5f2423b3fde2faacdb63 | 14,725 |
def get_exp_data(diff_fname):
"""
Parse genes with differential expression score.
The file expects genes with a score in a tab delimited format.
"""
gene_exp_rate=dict()
dfh=open(diff_fname, "rU")
for line in dfh:
parts=line.strip('\n\r').split('\t')
try:
float(parts[1])
except:continue
gene_exp_rate[parts[0]]=float(parts[1])
dfh.close()
return gene_exp_rate | b376f9e8f42ed2e1f3c0209696de7e85b8bed082 | 14,732 |
import time
def timestamp() -> float:
"""
Returns fractional seconds of a performance counter.
It does include time elapsed during sleep and is system-wide
Note: The reference point of the returned value is undefined,
so that only the difference between the results of two calls is valid.
Returns:
float: fractional seconds
"""
return time.perf_counter() | e2c4d8c06d8d07ca67ec4f857a464afde19b6c2a | 14,734 |
def verify_user_prediction(user_inputs_dic: dict, correct_definition_dic: dict):
"""
Verifies user prediction json against correct json definition
returns true if correct format, false if not
"""
if user_inputs_dic.keys() != correct_definition_dic.keys():
return False
for user_key, user_value in user_inputs_dic.items():
possible_values = correct_definition_dic[user_key].keys()
if user_value not in possible_values:
return False
return True | de1d2b579ab312929f64196467a3e08874a5be42 | 14,735 |
def _maybe_format_css_class(val: str, prefix: str = ""):
"""
Create a CSS class name for the given string if it is safe to do so.
Otherwise return nothing
"""
if val.replace("-", "_").isidentifier():
return f"{prefix}{val}"
return "" | c5c615b9e0894807a020186cdade9c031f904c06 | 14,736 |
def get_text(soup, file_path):
"""Read in a soup object and path to the soup source file,
and return an object with the record's text content"""
text = ""
text_soup = soup.find('div', {'id': 'Text2'})
for o in text_soup.findAll(text=True):
text += o.strip() + "\n"
return {
"text": text
} | 764d3751742584c657613f5c2d00ef544a060a1d | 14,740 |
def list_to_string_with_comma(thing):
"""Input a list, returns every item in the list as a string with commas in between"""
string = ""
for item in thing:
string += str(item) + ','
return string[:-1] | a7fff928137c3f7041b030c1fe4dd099f0ff8ca2 | 14,743 |
import json
def read_geo_info(file_path):
"""
Reads geographic point and region infomation from a json containing
individual locations and regions.
Parameters:
file_path (str) : path to the json file
Return:
dictionary with geographic info
"""
with open(file_path) as json_file:
geo_dict = json.load(json_file)
# pop out the "information" key \\ useless
geo_dict.pop('information')
return geo_dict | d4e8b34bee16183ac535c146e4f9b6026ed57b7d | 14,746 |
def adjust_index_pair(index_pair, increment):
"""Returns pair of indices incremented by given number."""
return [i + increment for i in index_pair] | 43968980998f6d12457e922c3c70d2ceba6d6b2e | 14,750 |
def paragraph_detokenizer(sentences, breaks):
"""Restore original paragraph format from indexes of sentences and newlines
Args:
sentences (list): List of sentences
breaks (list): List of indexes of sentences and newlines
Returns:
text (str): Text with original format
"""
output = []
for br in breaks:
if br == "\n":
output.append("\n")
else:
output.append(sentences[br] + " ")
text = "".join(output)
return text | fbba0099326156bff3ba5d76dfb3b9bc197c3269 | 14,751 |
def custom_lineplot(ax, x, y, error, xlims, ylims, color='red'):
"""Customized line plot with error bars."""
ax.errorbar(x, y, yerr=error, color=color, ls='--', marker='o', capsize=5, capthick=1, ecolor='black')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
return ax | f80b76b8f9f60a44bf62bc64829c40793747ada2 | 14,753 |
def generate_role_with_colon_format(content, defined_role, generated_role):
"""Generate role data with input as Compute:ComputeA
In Compute:ComputeA, the defined role 'Compute' can be added to
roles_data.yaml by changing the name to 'ComputeA'. This allows duplicating
the defined roles so that hardware specific nodes can be targeted with
specific roles.
:param content defined role file's content
:param defined_role defined role's name
:param generated_role role's name to generate from defined role
:exception ValueError if generated role name is of invalid format
"""
# "Compute:Compute" is invalid format
if generated_role == defined_role:
msg = ("Generated role name cannot be same as existing role name ({}) "
"with colon format".format(defined_role))
raise ValueError(msg)
# "Compute:A" is invalid format
if not generated_role.startswith(defined_role):
msg = ("Generated role name ({}) name should start with existing role "
"name ({})".format(generated_role, defined_role))
raise ValueError(msg)
name_line = "name:%s" % defined_role
name_line_match = False
processed = []
for line in content.split('\n'):
stripped_line = line.replace(' ', '')
# Only 'name' need to be replaced in the existing role
if name_line in stripped_line:
line = line.replace(defined_role, generated_role)
name_line_match = True
processed.append(line)
if not name_line_match:
raise ValueError(" error")
return '\n'.join(processed) | a303bf17a8416b2b4df410095d76faaa7d2466ca | 14,755 |
import locale
def decode_byte_str(byte_str):
"""
Decodes byte string into unicode string.
Args:
byte_str(byte): Byte string.
Returns:
Unicode string.
"""
# first try to decode with utf-8 and if that fails try with system default
for encoding in ("utf-8", locale.getpreferredencoding()):
try:
res = byte_str.decode(encoding)
return res
except Exception:
continue
else:
return str(byte_str) | 1ae790ef9fee9ce5053f55e1d8757a4a385dc56d | 14,759 |
def make_options(obs_settings, obs_names):
"""Constructs a dict of configuration options for a set of named observables.
Args:
obs_settings: An `ObservationSettings` instance.
obs_names: An `ObservableNames` instance.
Returns:
A nested dict containing `{observable_name: {option_name: value}}`.
"""
observable_options = {}
for category, spec in obs_settings._asdict().items():
for observable_name in getattr(obs_names, category):
observable_options[observable_name] = spec._asdict()
return observable_options | a9c0d605835533b0e41cf8df4c934c42f5601a4a | 14,764 |
def decode_val(val_text, hexnum):
"""
Decode tile value text to its numerical equvalent.
@param val_text: Value text to decode.
@type val_text: C{str}
@param hexnum: Value is a hexadecimal number.
@type hexnum: C{bool}
@return: Its numeric value if it can be decoded, C{None} if it cannot be
decoded.
@rtype: C{int} or C{None}
"""
try:
if hexnum:
return int(val_text, 16)
else:
return int(val_text, 10)
except ValueError:
return None | d68ad3c3dcac9d023a430c4a17aa045742f3ae87 | 14,766 |
import random
def generate_bdays(num_bdays):
"""Generates n birthdays and returns them as a list"""
bdays = []
for bday in range(num_bdays):
bdays.append(random.randint(1, 365))
return bdays | ef2c5988db714bebea81301edc9f2aa3e979766d | 14,767 |
from typing import List
from typing import Tuple
def append_per_tuple(
dataset_2tuples: List[Tuple[str,str]],
new_val: int
) -> List[Tuple[str,str,int]]:
"""
Given a list of 2-tuple elements, append to every 2-tuple another fixed
item, such that a list of 3-tuples is returned.
"""
dataset_3tuples = []
for (val0, val1) in dataset_2tuples:
dataset_3tuples += [(val0,val1,new_val)]
return dataset_3tuples | f0518a29f9d4d4219a19d41d1dd2dee4d271ee30 | 14,778 |
def cidr_mask_to_ip_int(mask_num):
"""
掩码位数转换为整数值
:param mask_num: 掩码位数, 如 16
:return: 一个整数值
"""
cidr_num = int(mask_num)
if 0 < cidr_num <= 32:
return ((1 << cidr_num) - 1) << (32 - cidr_num)
raise ValueError('% is not valid cidr code.' % cidr_num) | d62cd2c52f3a4fdbbceccb0f3fc1b1988754d410 | 14,780 |
from bs4 import BeautifulSoup
def get_img_url(offer_markup):
""" Searches for images in offer markup
:param offer_markup: Class "offerbody" from offer page markup
:type offer_markup: str
:return: Images of offer in list
:rtype: list
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
images = html_parser.find_all(class_="bigImage")
output = []
for img in images:
output.append(img.attrs["src"])
return output | b04a1e886fb520f33a325c425dc8afcb4ee58950 | 14,784 |
def sorted(iterable, key=None, reverse=False):
"""Return a new list containing all items from the iterable in ascending order.
A custom key function can be supplied to customize the sort order, and the
reverse flag can be set to request the result in descending order.
"""
result = list(iterable)
result.sort(key=key, reverse=reverse)
return result | da629fec69583e7d4e56c1c25d710304d2e2e800 | 14,785 |
def clean_file_record(raw_record):
# type: (str) -> str
"""
Removes NUL values from the raw_record
"""
return raw_record.replace('\x00', '') | 6eaad2c7e8e687ea038fe17c7f10618be6ebf4c1 | 14,788 |
import binascii
import hmac
import hashlib
def sign_message(message: str, key: str) -> str:
"""Signs a message with a key.
Args:
message:
String of the message we want to sign.
key:
String of the SHA256 hex encoded key we want to use to sign the message.
Returns:
A string containing the SHA256 hex encoded signature of the message.
"""
key_bytes = binascii.unhexlify(key)
message_bytes = message.encode()
return hmac.new(key_bytes, message_bytes, hashlib.sha256).hexdigest() | 20eb1a354c9ec452a1f705f6f808082d55b4e331 | 14,790 |
def succ(B, T):
"""Return the successor of the tuple T over the base represented by B.
Returns None if T is the last tuple in the ordering.
See the library description for how base representations work."""
Tnew = T[:]
for i in range(len(B)):
# Increase the entry in position len(B)-i-1 if possible (i.e. we work
# right-to-left).
idx = len(B)-i-1
if type(B[idx]) == int:
Tnew[idx] = (Tnew[idx] + 1) % B[idx]
# If we are not 0, then the increment was successful and we
# can stop. Otherwise, we reached the end of the base set and
# had to loop back to the beginning, so we must continue.
if Tnew[idx] > 0:
return Tnew
else:
# Move to the next position in the list.
basis, lookup = B[idx]
Tnew[idx] = basis[(lookup[Tnew[idx]] + 1) % len(basis)]
# If we re not the first element in the base set, the increment
# was successful and we can stop. otherwise, we reached the end
# of the base set and had to loop back to the beginning, so we
# must continue.
if Tnew[idx] != basis[0]:
return Tnew
# We could not increment anything, and thus were at the end of the list.
return None | 4cd072b762551887ee7f4ad64b0f7b574bd58eea | 14,797 |
def find_approximate_rt(peaklist, rt):
"""
Find a peak with the closest retention time if it's within a tolerance.
:param peaklist: list, of Peaks
:param rt: float, retention time to match to
:return: Peak, the peak matching the retention time within the tolerance, if any
"""
peaklist = [peak for peak in peaklist if peak.rt] # clean list for only those with RTs
return next((peak for peak in peaklist if rt - .011 < peak.rt < rt + .011), None) | 4cc7e933b02892fd41ff250764b67e7b2a75ff6e | 14,799 |
def find_image_center(xsize,ysize):
"""
Find the pixel coordinates of the image center
"""
if xsize % 2 == 0:
cen_x = (xsize+1)/2.0
else:
cen_x = xsize/2.0
if ysize % 2 == 0:
cen_y = (ysize+1)/2.0
else:
cen_y = ysize/2.0
return (cen_x, cen_y) | 95b70c27352a514433be28fdd2ee1a42334cd183 | 14,801 |
def int_to_letter(i: int) -> str:
"""Converts an integer in range [0, 25] into a latter from 'A' to 'Z'.
Behavior is unspecified for other inputs!
"""
return chr(i + ord("A")) | 593896ad2b9ca3cfa592b87d6a04f7ffaaf7dd21 | 14,808 |
def _explicit_module_name(tags):
"""Returns an explicit module name specified by a tag of the form `"swift_module=Foo"`.
Since tags are unprocessed strings, nothing prevents the `swift_module` tag from being listed
multiple times on the same target with different values. For this reason, the aspect uses the
_last_ occurrence that it finds in the list.
Args:
tags: The list of tags from the `cc_library` target to which the aspect is being applied.
Returns:
The desired module name if it was present in `tags`, or `None`.
"""
module_name = None
for tag in tags:
if tag.startswith("swift_module="):
_, _, module_name = tag.partition("=")
return module_name | 88cfba0731e3d1503d6a377150ed12534d7c8688 | 14,816 |
def value_index_map(array):
"""
Given input array, returns dict with key/values k,i,
where i is the 0-index where value k appeared in the input array
Assumes array elements are unique
Used to get a mapping from pk's of an query set axis to the 0-index
:param array:
:return: dict
"""
output_map = {v: i for i, v in enumerate(array)}
return output_map | aa623b0a6fed762caa005506f4729b079f97fbb2 | 14,817 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.