content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Dict
def merge_hooks(hooks1: Dict[str, list], hooks2: Dict[str, list]) -> Dict[str, list]:
"""
Overview:
merge two hooks, which has the same keys, each value is sorted by hook priority with stable method
Arguments:
- hooks1 (:obj:`dict`): hooks1 to be merged
- hooks2 (:obj:`dict`): hooks2 to be merged
Returns:
- new_hooks (:obj:`dict`): merged new hooks
.. note::
This merge function uses stable sort method without disturbing the same priority hook
"""
assert set(hooks1.keys()) == set(hooks2.keys())
new_hooks = {}
for k in hooks1.keys():
new_hooks[k] = sorted(hooks1[k] + hooks2[k], key=lambda x: x.priority)
return new_hooks | add5ae72917ca9aff109e8ac86a4d6902c14b298 | 5,614 |
def get_max_assocs_in_sample_csr(assoc_mat):
"""
Returns the maximum number of co-associations a sample has and the index of
that sample.
"""
first_col = assoc_mat.indptr
n_cols = first_col[1:] - first_col[:-1]
max_row_size = n_cols.max()
max_row_idx = n_cols.argmax()
return max_row_size, max_row_idx | a341153afa0398cb2a43b97614cd39129e6b2ac5 | 5,615 |
def _decicelsius_to_kelvins(temperatures_decicelsius):
"""Converts from temperatures from decidegrees Celsius to Kelvins.
:param temperatures_decicelsius: numpy array of temperatures in decidegrees
Celsius.
:return: temperatures_kelvins: numpy array of temperatures in Kelvins, with
same shape as input.
"""
return temperatures_decicelsius * 0.1 + 273.15 | 880d42637970c680cd241b5418890468443c6a5b | 5,616 |
def undupe_column_names(df, template="{} ({})"):
"""
rename df column names so there are no duplicates (in place)
e.g. if there are two columns named "dog", the second column will be reformatted to "dog (2)"
Parameters
----------
df : pandas.DataFrame
dataframe whose column names should be de-duplicated
template : template taking two arguments (old_name, int) to use to rename columns
Returns
-------
df : pandas.DataFrame
dataframe that was renamed in place, for convenience in chaining
"""
new_names = []
seen = set()
for name in df.columns:
n = 1
new_name = name
while new_name in seen:
n += 1
new_name = template.format(name, n)
new_names.append(new_name)
seen.add(new_name)
df.columns = new_names
return df | 51d13bad25571bc60edd78026bb145ff99281e2d | 5,624 |
def _has_endpoint_name_flag(flags):
"""
Detect if the given flags contain any that use ``{endpoint_name}``.
"""
return '{endpoint_name}' in ''.join(flags) | e8827da778c97d3be05ec82ef3367686616d3a88 | 5,626 |
def format_date(date: str):
"""
This function formats dates that are in MM-DD-YYYY format,
and will convert to YYYY-MM-DD, which is required sqlite.
:param date: The date to modify.
:return: The modified string.
"""
tmp = date.split("/")
return "{}-{}-{}".format(tmp[2], tmp[0], tmp[1]) | f1a0149bfd96db557c49becdedb84789daa1168c | 5,630 |
def xcrun_field_value_from_output(field: str, output: str) -> str:
"""
Get value of a given field from xcrun output.
If field is not found empty string is returned.
"""
field_prefix = field + ': '
for line in output.splitlines():
line = line.strip()
if line.startswith(field_prefix):
return line[len(field_prefix):]
return '' | a99efe76e21239f6ba15b8e7fb12d04d57bfb4de | 5,633 |
def trip(u, v):
"""
Returns the scalar triple product of vectors u and v and z axis.
The convention is z dot (u cross v). Dotting with the z axis simplifies
it to the z component of the u cross v
The product is:
positive if v is to the left of u, that is,
the shortest right hand rotation from u to v is ccw
negative if v is to the right of u, that is,
the shortest right hand rotation from u to v is cw
zero if v is colinear with u
Essentially trip is the z component of the cross product of u x v
"""
return (u[0] * v[1] - u[1] * v[0]) | 5f687ee4b16dc6c1b350ed574cb632a7c9ca996b | 5,636 |
def get_instance(module, name, config):
"""
Get module indicated in config[name]['type'];
If there are args to specify the module, specify in config[name]['args']
"""
func_args = config[name]['args'] if 'args' in config[name] else None
# if any argument specified in config[name]['args']
if func_args:
return getattr(module, config[name]['type'])(**func_args)
# if not then just return the module
return getattr(module, config[name]['type'])() | ea57e7097665343199956509bb302e3806fb383a | 5,639 |
def dp_palindrome_length(dp, S, i, j):
"""
Recursive function for finding the length
of the longest palindromic sequence
in a string
This is the algorithm covered in the lecture
It uses memoization to improve performance,
dp "dynamic programming" is a Python dict
containing previously computed values
"""
if i == j:
return 1
if (i, j) in dp:
return dp[(i, j)]
if S[i] == S[j]:
if i + 1 == j:
dp[(i, j)] = 2
else:
dp[(i, j)] = 2 + \
dp_palindrome_length(dp, S, i + 1, j - 1)
else:
dp[(i, j)] = \
max(
dp_palindrome_length(dp, S, i + 1, j),
dp_palindrome_length(dp, S, i, j - 1))
return dp[(i, j)] | 10a8ac671674ba1ef57cd473413211a339f94e62 | 5,641 |
def create_own_child_column(X):
"""
Replaces the column 'relationship' with a binary one called own-child
"""
new_column = X['relationship'] == 'own-child'
X_transformed = X.assign(own_child=new_column)
X_transformed = X_transformed.drop('relationship', axis=1)
return X_transformed | 303ec8f073920f0bba6704740b200c7f3306b7bd | 5,642 |
def load_spans(file):
"""
Loads the predicted spans
"""
article_id, span_interval = ([], [])
with open(file, 'r', encoding='utf-8') as f:
for line in f.readlines():
art_id, span_begin, span_end = [int(x) for x in line.rstrip().split('\t')]
span_interval.append((span_begin, span_end))
article_id.append(art_id)
return article_id, span_interval | 8f8de31e1d1df7f0d2a44d8f8db7f846750bd89f | 5,643 |
def geometric_expval(p):
"""
Expected value of geometric distribution.
"""
return 1. / p | 3afb3adb7e9dafa03026f22074dfcc1f81c58ac8 | 5,647 |
def getKey(event):
"""Returns the Key Identifier of the given event.
Available Codes: https://www.w3.org/TR/2006/WD-DOM-Level-3-Events-20060413/keyset.html#KeySet-Set
"""
if hasattr(event, "key"):
return event.key
elif hasattr(event, "keyIdentifier"):
if event.keyIdentifier in ["Esc", "U+001B"]:
return "Escape"
else:
return event.keyIdentifier
return None | 0935ad4cb1ba7040565647b2e26f265df5674e1d | 5,657 |
def generate_discord_markdown_string(lines):
"""
Wraps a list of message into a discord markdown block
:param [str] lines:
:return: The wrapped string
:rtype: str
"""
output = ["```markdown"] + lines + ["```"]
return "\n".join(output) | 1c0db2f36f4d08e75e28a1c024e6d4c35638d8f5 | 5,665 |
def wizard_active(step, current):
"""
Return the proper classname for the step div in the badge wizard.
The current step needs a 'selected' class while the following step needs a
'next-selected' class to color the tip of the arrow properly.
"""
if current == step:
return 'selected'
elif (current + 1) == step:
return 'next-selected' | 2daad3f7651df7609f3473af698e116ce419c9df | 5,666 |
def _maybe_encode_unicode_string(record):
"""Encodes unicode strings if needed."""
if isinstance(record, str):
record = bytes(record, "utf-8").strip()
return record | 2621056ba77fd314b966e3e0db08887da53e3803 | 5,671 |
def merge_extras(extras1, extras2):
"""Merge two iterables of extra into a single sorted tuple. Case-sensitive"""
if not extras1:
return extras2
if not extras2:
return extras1
return tuple(sorted(set(extras1) | set(extras2))) | 0383e0e99c53844f952d919eaf3cb478b4dcd6d1 | 5,673 |
import torch
def _relu_3_ramp(x):
""" Relu(x) ** 3 ramp function
returns
f(x) = relu(x) ** 3
df/dx(x) = relu(x) ** 2
"""
rx = torch.relu(x)
ramp = rx.pow(3)
grad = rx.pow(2) * 3.0
return ramp, grad | 56dfc37ef81209590e020f0c67f8204a6d8d338a | 5,674 |
def get_shared_prefix(w1, w2):
"""Get a string which w1 and w2 both have at the beginning."""
shared = ""
for i in range(1, min(len(w1), len(w2))):
if w1[:i] != w2[:i]:
return shared
else:
shared = w1[:i]
return shared | d52850f038bc6bfe65878e3a58d7009e563af0a0 | 5,675 |
from typing import Dict
def update_args(args: Dict, inv_file: str, conf_file: str) -> Dict:
""" Add inventory file and config file in the correct spots inside the
arguments
Args:
args (Dict): controller args
inv_file (str): inventory file
conf_file (str): config file
Returns:
Dict: updated args
"""
args['inventory'] = inv_file
args['config'] = conf_file
return args | c1cd377785f0af26740d5cecd73186caaa6c79b6 | 5,688 |
def _add_vessel_class(df):
"""Creates 'Class' column based on vessel LOA ft."""
df.loc[:, "Class"] = "Panamax"
post_row = (df.loc[:, "LOA ft"] > 965)
post_loc = df.loc[post_row, :].index
post_pan = df.index.isin(post_loc)
df.loc[post_pan, "Class"] = "Post-Panamax"
return df | 5abec9f0bee8d7d6c734100c64a7624fdb5fb672 | 5,689 |
def mean(list):
"""Function that returns the mean of a list"""
sum = 0
for num in list:
sum += num
return sum/len(list) | 972544f64f87860a078405a4938226f7fab307c2 | 5,695 |
def is_auto(item):
"""
Checks if a parameter should be automatically determined
"""
if isinstance(item, float):
if item == 9999.9:
return True
elif isinstance(item, str):
if 'auto' in item.lower():
return True
return False | fe6320adef43c51cdffd5b5d4a0bf34ac43d9c5a | 5,703 |
def read_accelerometer(serial, calibration):
"""
Reads the raw values from the Arduino, parses them into separate variables
and uses the calibration data to normalize the data
Args:
serial: a reference to the serial connection with the Arduino
calibration: a reference to the calibration object that holds the
values from the accelerometer calibration process
Returns:
(x_cal, y_cal, z_cal): a tuple of the normalized data
"""
components = serial.read_str()
# parses the string from the Arduino into three separate variables
x_raw, y_raw, z_raw = tuple(map(float, components.split(',')))
# normalizes the data using the calibration information
x_cal = (x_raw - calibration.offset[0]) / (calibration.gain[0])
y_cal = (y_raw - calibration.offset[1]) / (calibration.gain[1])
z_cal = (z_raw - calibration.offset[2]) / (calibration.gain[2])
return (x_cal, y_cal, z_cal) | 3c5537e2a017f57dca8dccd24c2ba083a9c47345 | 5,708 |
def get_access(name):
"""Get access based on name
In Python __var__ refers to a private access
_var refers to protected access
and var would refer to public access
"""
assert isinstance(name, str), "Expecting name to be a string"
if len(name) > 4 and "__" == name[:2] and "__" == name[-2:]:
return "PRIVATE"
elif len(name) > 1 and name[0] == "_":
return "PROTECTED"
else:
return "PUBLIC" | ffe072ed1820ce0536533a5882af1e1270780744 | 5,709 |
def run_query_series(queries, conn):
"""
Iterates through a list of queries and runs them through the connection
Args:
-----
queries: list of strings or tuples containing (query_string, kwargs)
conn: the triplestore connection to use
"""
results = []
for item in queries:
qry = item
kwargs = {}
if isinstance(item, tuple):
qry = item[0]
kwargs = item[1]
result = conn.update_query(qry, **kwargs)
# pdb.set_trace()
results.append(result)
return results | 7a3e920663222b57233e9a01d1b3cacb039a02eb | 5,710 |
def get_domain_id_field(domain_table):
"""
A helper function to create the id field
:param domain_table: the cdm domain table
:return: the id field
"""
return domain_table + '_id' | 5805da82b4e57d14d4105d92a62cf4b5cc4bc3f2 | 5,711 |
def insertion_sort(arr):
"""
Returns the list 'arr' sorted in nondecreasing order in O(n^2) time.
"""
for i in range(1,len(arr)):
key = arr[i]
j = i-1
while j >= 0 and arr[j] > key:
arr[j+1] = arr[j]
j = j-1
arr[j+1] = key
return arr | cafd83cd31cbadcbc0a5c3aaff7d21f3ae907083 | 5,713 |
def vertical_move(t, v_speed=2/320):
"""Probe moves vertically at v_speed [cm/s]"""
return 0.*t, 0*t, v_speed*t | eb6a066bf6b6659728647c78dd7673a3d45b250d | 5,718 |
def _total_probe_count_without_interp(params, probe_counts):
"""Calculate a total probe count without interpolation.
This assumes that params are keys in the datasets of probe_counts.
The result of ic._make_total_probe_count_across_datasets_fn should give
the same count as this function (if params are keys in the datasets
of probe_counts). But this uses probe_counts directly and can be
used as a sanity check -- i.e., it does not do any interpolation.
Args:
params: parameter values to use when determining probe counts;
params[i] is the (i % N)'th parameter of the (i/N)'th dataset,
where N is the number of datasets
probe_counts: dict giving number of probes for each dataset and
choice of parameters
Returns:
total number of probes across all datasets, according to the
given values of params
"""
num_datasets = len(probe_counts)
# The total number of parameters must be a multiple of the number
# of datasets
assert len(params) % num_datasets == 0
num_params = int(len(params) / num_datasets)
s = 0
for i, dataset in enumerate(sorted(probe_counts.keys())):
p = tuple(params[num_params * i + j] for j in range(num_params))
s += probe_counts[dataset][p]
return s | 0973e667dbf1fc3bdf476791cbf709549230f94b | 5,729 |
from typing import Any
import math
def make_divisible(x: Any, divisor: int):
"""Returns x evenly divisible by divisor."""
return math.ceil(x / divisor) * divisor | bfbcfb334777a6c7214f16aa0fadd56906e2b7bc | 5,731 |
def select_data(all_tetrode_data, index):
"""
Select tetrode data by trial indices.
:param all_tetrode_data: (list of 4d numpy arrays) each of format [trial, 1, neuron + tetrode, time]
:param index: (1d numpy array) trial indices
:return: (list of 4d numpy arrays) selected subset of tetrode data
"""
current_data = []
for x in all_tetrode_data:
current_data.append(x[index, :, :, :])
return current_data | 5a883771ef499e0b82e0d3ac5b86550180760e13 | 5,733 |
from functools import reduce
def rec_hasattr(obj, attr):
"""
Recursive hasattr.
:param obj:
The top-level object to check for attributes on
:param attr:
Dot delimited attribute name
Example::
rec_hasattr(obj, 'a.b.c')
"""
try:
reduce(getattr, attr.split('.'), obj)
except AttributeError:
return False
else:
return True | b1a9b12f54abb93202a5b41c950f761986307170 | 5,735 |
from typing import Dict
from typing import Any
def __create_notification(title: str, content: str) -> Dict[str, Any]:
"""
Creates a notification "object" from the given title and content.
:params title: The title of the notification.
:params content: The content of the notification.
:returns A dictionary representing a notification "object".
"""
return {"title": title, "content": content} | 484abcc2afcb8f726811e36516572bc5c302a415 | 5,737 |
import torch
def reparametisation_trick(mu, log_var, device):
"""
:param mu: The mean of the latent variable to be formed (nbatch, n_z)
:param log_var: The log variance of the latent variable to be formed (nbatch, n_z)
:param device: CPU or GPU
:return: latent variable (nbatch, n_z)
"""
noise = torch.normal(mean=0, std=1.0, size=log_var.shape).to(torch.device(device))
z = mu + torch.mul(torch.exp(log_var / 2.0), noise)
return z | 9cb646132f49fa79b6a8690d10fd188968931978 | 5,741 |
def mro_hasattr(cls: type, attr: str) -> bool:
"""Check if an attribute exists in a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
Returns:
bool: True if has the attribute.
Raises:
TypeError: Not called on a type
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
return True
return False | cfc41693e3d3321bcb63dae079abf2e768f97905 | 5,742 |
import inspect
def get_classes(mod):
"""Return a list of all classes in module 'mod'"""
return [
key
for key, _ in inspect.getmembers(mod, inspect.isclass)
if key[0].isupper()
] | be04546650a6243a3abfe4053a4dcaa9d71f85d7 | 5,746 |
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return 'https://%s/' % host | caf5c9015a4cd863e407fb889d473ddebd7bbabc | 5,751 |
def _is_greater(list1: list, list2: list):
"""
return True if `list1[i] > list2[i]` for each `i`
"""
return all([list1[i] > list2[i] for i in range(len(list1))]) | 925fb214f741d6503b41b49d57a268506f05a048 | 5,760 |
def _get_pair_nodes(root_node):
"""
Internal method to get "pair" nodes under root_node
"""
method_elem = root_node
in_configs_elem_list = method_elem.getElementsByTagName("inConfigs")
in_configs_elem = in_configs_elem_list[0]
pair_elems_list = in_configs_elem.getElementsByTagName("pair")
return pair_elems_list | c2b74f7a507394d2117cd6292116e62d34f3e556 | 5,761 |
def Shard(ilist, shard_index, num_shards):
"""Shard a given list and return the group at index |shard_index|.
Args:
ilist: input list
shard_index: 0-based sharding index
num_shards: shard count
"""
chunk_size = len(ilist) / num_shards
chunk_start = shard_index * chunk_size
if shard_index == num_shards - 1: # Exhaust the remainder in the last shard.
chunk_end = len(ilist)
else:
chunk_end = chunk_start + chunk_size
return ilist[chunk_start:chunk_end] | 7f79ade521c1264d0ddc8c5a228679d7053d9651 | 5,762 |
def ppmv2pa(x, p):
"""Convert ppmv to Pa
Parameters
----------
x Gas pressure [ppmv]
p total air pressure [Pa]
Returns
-------
pressure [Pa]
"""
return x * p / (1e6 + x) | 974d79d022a7fb655040c7c2900988cd4a10f064 | 5,767 |
def make_elastic_uri(schema: str, user: str, secret: str, hostname: str, port: int) -> str:
"""Make an Elasticsearch URI.
:param schema: the schema, e.g. http or https.
:param user: Elasticsearch username.
:param secret: Elasticsearch secret.
:param hostname: Elasticsearch hostname.
:param port: Elasticsearch port.
:return: the full Elasticsearch URI.
"""
return f"{schema}://{user}:{secret}@{hostname}:{port}" | be959e98330913e75485006d1f4380a57e990a05 | 5,768 |
def _truncate(s: str, max_length: int) -> str:
"""Returns the input string s truncated to be at most max_length characters
long.
"""
return s if len(s) <= max_length else s[0:max_length] | 52c49c027057024eaa27a705a0d2c013bff7a2ce | 5,769 |
import importlib
def load_model(opt, dataloader):
""" Load model based on the model name.
Arguments:
opt {[argparse.Namespace]} -- options
dataloader {[dict]} -- dataloader class
Returns:
[model] -- Returned model
"""
model_name = opt.model
model_path = f"lib.models.{model_name}"
print('use model:',model_name)
model_lib = importlib.import_module(model_path)
model = getattr(model_lib, model_name.title())
return model(opt, dataloader) | 8ad05c4a0f51c40851a9daecf81ed8bf9862979c | 5,770 |
def get_maxlevel(divs, maxlevel):
"""
Returns the maximum div level.
"""
for info in divs:
if info['level'] > maxlevel:
maxlevel = info['level']
if info.get('subdivs', None):
maxlevel = get_maxlevel(info['subdivs'], maxlevel)
return maxlevel | b7153ef84cb260a4b48c58315aa63fc5179fc06c | 5,776 |
def get_molecules(topology):
"""Group atoms into molecules."""
if 'atoms' not in topology:
return None
molecules = {}
for atom in topology['atoms']:
idx, mol_id, atom_type, charge = atom[0], atom[1], atom[2], atom[3]
if mol_id not in molecules:
molecules[mol_id] = {'atoms': [], 'types': [], 'charge': []}
molecules[mol_id]['atoms'].append(idx)
molecules[mol_id]['types'].append(atom_type)
molecules[mol_id]['charge'].append(charge)
return molecules | 4bf63000c9d5b56bb9d35922ed521ce81cf3a6c1 | 5,777 |
def get_union(*args):
"""Return unioin of multiple input lists.
"""
return list(set().union(*args)) | 18025cfd37d64f15daf92aa2ae3e81176cae6e39 | 5,786 |
def is_owner(obj, user):
""" Check if user is owner of the slice """
return obj and user in obj.owners | f0c49ffe8a8879d1d052f6fc37df596efa021a84 | 5,788 |
def convert_data_to_ints(data, vocab2int, word_count, unk_count, eos=True):
"""
Converts the words in the data into their corresponding integer values.
Input:
data: a list of texts in the corpus
vocab2list: conversion dictionaries
word_count: an integer to count the words in the dataset
unk_count: an integer to count the <UNK> tokens in the dataset
eos: boolean whether to append <EOS> token at the end or not (default true)
Returns:
converted_data: a list of corpus texts converted to integers
word_count: updated word count
unk_count: updated unk_count
"""
converted_data = []
for text in data:
converted_text = []
for token in text.split():
word_count += 1
if token in vocab2int:
# Convert each token in the paragraph to int and append it
converted_text.append(vocab2int[token])
else:
# If it's not in the dictionary, use the int for <UNK> token instead
converted_text.append(vocab2int['<UNK>'])
unk_count += 1
if eos:
# Append <EOS> token if specified
converted_text.append(vocab2int['<EOS>'])
converted_data.append(converted_text)
assert len(converted_data) == len(data)
return converted_data, word_count, unk_count | c415aea164f99bc2a44d5098b6dbcc3d723697a6 | 5,801 |
def _endian_char(big) -> str:
"""
Returns the character that represents either big endian or small endian in struct unpack.
Args:
big: True if big endian.
Returns:
Character representing either big or small endian.
"""
return '>' if big else '<' | 2e1a63ec593ca6359947385019bcef45cb3749c0 | 5,804 |
import random
def get_initators(filepath, n_lines):
"""
Open text file with iniator words and sample random iniator for each line in the poem.
"""
with open(filepath, "r", encoding = "utf-8") as file:
# save indices of all keywords
loaded_text = file.read() # load text file
lines = loaded_text.splitlines() # seperate initiator lines
initiators_list = list(random.sample(lines, n_lines)) # sample random initators
return initiators_list | 94792679a6ea4e0bb14afd5eb38b656a2cc8af67 | 5,806 |
import traceback
import time
def wrapLoop(loopfunc):
"""Wraps a thread in a wrapper function to restart it if it exits."""
def wrapped():
while True:
try:
loopfunc()
except BaseException:
print(f"Exception in thread {loopfunc},"
" restarting in 10s...")
traceback.print_exc()
else:
print(f"Thread {loopfunc} exited, restarting in 10s...")
time.sleep(10)
return wrapped | 86c48bc850bb1cf17121130ee9349dd529acf5e3 | 5,807 |
def protobuf_get_constant_type(proto_type) :
"""About protobuf write types see :
https://developers.google.com/protocol-buffers/docs/encoding#structure
+--------------------------------------+
+ Type + Meaning + Used For +
+--------------------------------------+
+ + + int32, int64, uint32+
+ 0 + Varint + uint64,sint32,sint64+
+ + + boolean, enum +
+--------------------------------------+
+ + + +
+ 1 + 64-bit + fixed64, sfixed64, +
+ + + double +
+--------------------------------------+
+ 2 + string + string +
+--------------------------------------+
+ 5 + 32-bit + float +
+--------------------------------------+
"""
if 'uInt32' == proto_type or \
'sInt32' == proto_type or \
'int32' == proto_type :
return 0
elif 'double' == proto_type :
return 1
elif 'string' == proto_type :
return 2
elif 'float' == proto_type :
return 5
return 2 | 46ce7e44f8499e6c2bdcf70a2bc5e84cb8786956 | 5,811 |
import pprint
def format_locals(sys_exc_info):
"""Format locals for the frame where exception was raised."""
current_tb = sys_exc_info[-1]
while current_tb:
next_tb = current_tb.tb_next
if not next_tb:
frame_locals = current_tb.tb_frame.f_locals
return pprint.pformat(frame_locals)
current_tb = next_tb | b5a21f42c8543d9de060ff7be2b3ad6b23065de9 | 5,812 |
def binarize_garcia(label: str) -> str:
"""
Streamline Garcia labels with the other datasets.
:returns (str): streamlined labels.
"""
if label == 'hate':
return 'abuse'
else:
return 'not-abuse' | 5cc26303e0c496d46b285e266604a38a0c88e8d7 | 5,813 |
import random
def spliter(data_dict, ratio=[6, 1, 1], shuffle=True):
"""split dict dataset into train, valid and tests set
Args:
data_dict (dict): dataset in dict
ratio (list): list of ratio for train, valid and tests split
shuffle (bool): shuffle or not
"""
if len(ratio) != 3:
raise ValueError(f'ratio must include three int numbers')
train = {'x': list(), 'y': list()}
valid = {'x': list(), 'y': list()}
tests = {'x': list(), 'y': list()}
for _, [samples, labels] in data_dict.items():
samples_lens = len(samples)
train_ratio = round(samples_lens * (ratio[0] / sum(ratio)))
tests_ratio = round(samples_lens * (ratio[2] / sum(ratio)))
valid_ratio = samples_lens - train_ratio - tests_ratio
data = list(zip(samples, labels))
if shuffle:
random.shuffle(data)
x, y = zip(*data)
train['x'].extend(x[:train_ratio])
train['y'].extend(y[:train_ratio])
valid['x'].extend(x[train_ratio:train_ratio + valid_ratio])
valid['y'].extend(y[train_ratio:train_ratio + valid_ratio])
tests['x'].extend(x[-tests_ratio:])
tests['y'].extend(y[-tests_ratio:])
return train, valid, tests | 793af274e3962d686f2ef56b34ae5bc0a53aac5b | 5,819 |
import torch
def freeze_layers(
model: torch.nn.Sequential,
n_layers_to_train: int
) -> torch.nn.Sequential:
"""
Function to freeze given number of layers for selected model
:param model: Instance of Pytorch model
:param n_layers_to_train: number of layers to train, counting from the last one.
The rest of the layers is going to be frozen.
:return: Model with frozen layers.
"""
n_layers = len(list(model.children()))
for idx, child in enumerate(model.children()):
if idx < (n_layers - n_layers_to_train):
for param in child.parameters():
param.requires_grad = False
return model | bfeca127c684de0815493ef621dce790b3a090f3 | 5,821 |
def get_headers(wsgi_env):
"""
Extracts HTTP_* variables from a WSGI environment as
title-cased header names.
"""
return {
key[5:].replace('_', '-').title(): value
for key, value in wsgi_env.iteritems() if key.startswith('HTTP_')} | 01e7140a670957c691fec01dd90d53bdc29425bd | 5,823 |
def permute_by_indices(list_of_things, *list_of_index_transpositions):
"""Given a list_of_things and a list of pairs of transpositions of indices
[(i, j), (k, m), ...], return the list_of_things with the i-th an j-th
values swapped, the k-th- and m-th values swapped, and so on.
Examples
--------
>>> permute_by_indices(['a', 'b', 'c'], [(0, 1)])
['b', 'a', 'c']
>>> permute_by_indices(['a', 'b', 'c'], [(0, 2), (1, 2)])
['c', 'a', 'b']
"""
result = list_of_things
for i, j in list_of_index_transpositions:
result[j], result[i] = result[i], result[j]
return result | 31d7f73028fcb4c3a43750d1ade0c27e1b563dbb | 5,830 |
import re
def _string_to_int(string: str) -> int:
"""
a helper function convert from string to int, like S1 -> 1
Args:
string (str): input string
Returns:
(int): return value if this is a int, return 0 if this is not a int
"""
r = re.findall('\d+', string)
if (len(r) > 0):
return int(r[0])
else:
return 0 | d4dbea658e6092edb27b85154b319e098c588a76 | 5,831 |
from pathlib import Path
def create_moving_path(**kwargs):
"""
User interface function to create Path object for moving load.
:keyword:
* start_point (`Point`): Start point of path
* end_point (`Point`): End point of path
* increments (`int`): Increment of path steps. Default is 50
* mid_point (`Point`): Default = None
:returns: :class:`~ospgrillage.load.Path` object
"""
return Path(**kwargs) | 05de795c61e7b3fc4c3f4c2aa14505b4a6fcf986 | 5,832 |
def _no_negative_zero(val):
"""Make sure -0 is never output. Makes diff tests easier."""
if val == 0:
return 0
return val | 345802e297cc1e1c77a5b1db664715bfc42f3da6 | 5,833 |
import json
def load_json_from_string(string):
"""Load schema from JSON string"""
try:
json_data = json.loads(string)
except ValueError as e:
raise ValueError('Given string is not valid JSON: {}'.format(e))
else:
return json_data | 66f96373a8e02bf69289e5e4594ac319906475f5 | 5,839 |
def _parse_detector(detector):
"""
Check and fix detector name strings.
Parameters
----------
detector : `str`
The detector name to check.
"""
oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',
'n10', 'n11']
altlist = [str(i) for i in range(12)]
if detector in oklist:
return detector
elif detector in altlist:
return 'n' + detector
else:
raise ValueError('Detector string could not be interpreted') | f78d7eb5004b3cb6d3276b0c701263c71668e36e | 5,840 |
def rel_ordered(x1,x2,x3,x4):
"""
given 4 collinear points, return true if the direction
from x1->x2 is the same as x3=>x4
requires x1!=x2, and x3!=x4
"""
if x1[0]!=x2[0]:
i=0 # choose a coordinate which is varying
else:
i=1
assert x1[i]!=x2[i]
assert x3[i]!=x4[i]
return (x1[i]<x2[i]) == (x3[i]<x4[i]) | 2649250e2ea2619c7f6c21b8dd2cebaeec10647b | 5,841 |
def bounds(gdf):
"""Calculates the bounding coordinates (left, bottom, right, top) in the given GeoDataFrame.
Args:
gdf: A GeoDataFrame containing the input points.
Returns:
An array [minx, miny, maxx, maxy] denoting the spatial extent.
"""
bounds = gdf.total_bounds
return bounds | 48242e870edd1db9b1191518c4b9ba7433420610 | 5,845 |
def is_hr_between(time: int, time_range: tuple) -> bool:
"""
Calculate if hour is within a range of hours
Example: is_hr_between(4, (24, 5)) will match hours from 24:00:00 to 04:59:59
"""
if time_range[1] < time_range[0]:
return time >= time_range[0] or time <= time_range[1]
return time_range[0] <= time <= time_range[1] | 70d874f0a5dee344d7638559101fc6be2bcca875 | 5,848 |
def fib_lista(n):
"""
Função que retorna uma lista contendo os números da sequência de Fibonacci
até o número n.
"""
lista = []
i, j = 0, 1
while i < n:
lista.append(i)
i, j = j, i + j
return lista | ec307ce80ae70e5fba81d2e26b140f1b86c95619 | 5,852 |
def calc_correlation(data, data2):
"""
Calculate the correlations between 2 DataFrames().
Parameters:
- data: The first dataframe.
- data2: The second dataframe.
Returns:
A Series() object.
"""
return (
data.corrwith(data2).
loc[lambda x: x.notnull()]
) | 7f47592a4525efa9db2fba317d095448d5288399 | 5,855 |
def commandLine(Argv):
"""
Method converting a list of arguments/parameter in a command line format (to include in the execution of a program for exemple).
list --> str
"""
assert type(Argv) is list, "The argument of this method are the arguments to convert in the command line format. (type List)"
commandLine = ''
for i in Argv[1::]:
commandLine += i+" "
return(commandLine) | 4b27e73fd43ec914f75c22f2482271aafd0848ac | 5,856 |
def find_nth(s, x, n):
"""
find the nth occurence in a string
takes string where to search, substring, nth-occurence
"""
i = -1
for _ in range(n):
i = s.find(x, i + len(x))
if i == -1:
break
return i | b54998db817272ec534e022a9f04ec8d350b08fb | 5,859 |
def get_maf(variant):
"""
Gets the MAF (minor allele frequency) tag from the info field for the
variant.
Args:
variant (cyvcf2.Variant)
Returns:
maf (float): Minor allele frequency
"""
return variant.INFO.get("MAF") | 1d25f577a3cec14b8d05095d320fad6584484718 | 5,863 |
import statistics
def coverageCalc(coverageList,minCov):
"""Function parsing coverageList for
:param coverageList: List of pacbam coverage information
:param minCov: Int of minimum passing coverage
:return:
covCount: Int of bases with coverage
minCovCount: Int of bases with minimum coverage
meanDepth: Int mean coverage stat
"""
covCount = 0
minCovCount = 0
meanDepth = statistics.mean(coverageList)
for i in coverageList:
if i != 0:
covCount +=1
if i >= minCov:
minCovCount +=1
return(covCount,minCovCount,round(meanDepth,2)) | e20dc1e1f0b6f7e328501afe9921455a705f196a | 5,864 |
def parse_rsync_url(location):
"""Parse a rsync-style URL."""
if ':' in location and '@' not in location:
# SSH with no user@, zero or one leading slash.
(host, path) = location.split(':', 1)
user = None
elif ':' in location:
# SSH with user@host:foo.
user_host, path = location.split(':', 1)
if '@' in user_host:
user, host = user_host.rsplit('@', 1)
else:
user = None
host = user_host
else:
raise ValueError('not a valid rsync-style URL')
return (user, host, path) | fc315c1a6b376cbb83b047246fee51ae936b68ef | 5,868 |
def setup_go_func(func, arg_types=None, res_type=None):
"""
Set up Go function, so it know what types it should take and return.
:param func: Specify Go function from library.
:param arg_types: List containing file types that function is taking. Default: None.
:param res_type: File type that function is returning. Default: None.
:return: Returns func arg back for cases when you want to setup function and assign it to variable in one line.
"""
if arg_types is not None:
func.argtypes = arg_types
if res_type is not None:
func.restype = res_type
return func | 05f48f4dfecdf0133613f76f235b1e82f14bc5a9 | 5,869 |
def jaccard_similarity(x, y):
""" Returns the Jaccard Similarity Coefficient (Jarccard Index) between two
lists.
From http://en.wikipedia.org/wiki/Jaccard_index: The Jaccard
coefficient measures similarity between finite sample sets, as is defined as
the size of the intersection divided by the size of the union of the sample
sets.
"""
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality) | 81cf0c882ff4b06e79b102abb2d8f13755b68873 | 5,873 |
import shlex
def tokenizer_word(text_string, keep_phrases=False):
"""
Tokenizer that tokenizes a string of text on spaces and new lines (regardless of however many of each.)
:param text_string: Python string object to be tokenized.
:param keep_phrases: Booalean will not split "quoted" text
:return: Array of strings, each is a word
"""
text_string = str(text_string)
if keep_phrases:
tokens = shlex.split(text_string.replace('\n', ' ').replace('/', ' '))
else:
tokens = text_string.replace('\n', ' ').replace('/', ' ').split()
return tokens | 940f716072e9b2ce522c9854b2394327fbd1e934 | 5,875 |
def weighted_mean(values, weights):
"""Calculate the weighted mean.
:param values: Array of values
:type values: numpy.ndarray
:param weights: Array of weights
:type weights: numpy.ndarray
:rtype: float
"""
weighted_mean = (values * weights).sum() / weights.sum()
return weighted_mean | 886d7cff1555c40b448cda03e08620a0e2d69ede | 5,876 |
def strip_long_text(text, max_len, append=u'…'):
"""Returns text which len is less or equal max_len.
If text is stripped, then `append` is added,
but resulting text will have `max_len` length anyway.
"""
if len(text) < max_len - 1:
return text
return text[:max_len - len(append)] + append | 02ce128f1de1dbeb2a2dcef5bc2b6eb8745322d3 | 5,886 |
def tuplify2d(x):
"""Convert ``x`` to a tuple of length two.
It performs the following conversion:
.. code-block:: python
x => x if isinstance(x, tuple) and len(x) == 2
x => (x, x) if not isinstance(x, tuple)
Args:
x (any): the object to be converted
Returns:
tuple:
"""
if isinstance(x, tuple):
assert len(x) == 2
return x
return (x, x) | 64170b14dbe7eb8885d21f45acff6b43979f1219 | 5,894 |
def get_request_fixture_names(request):
"""Get list of fixture names for the given FixtureRequest.
Get the internal and mutable list of fixture names in the enclosing scope of
the given request object.
Compatibility with pytest 3.0.
"""
return request._pyfuncitem._fixtureinfo.names_closure | 665fff4538f3817b6eb882f9a873683d69003bfd | 5,901 |
def check_version(stdout):
"""Check version of Ensembl-VEP.
Example of the first part of an output from the command `vep --help`:
#----------------------------------#
# ENSEMBL VARIANT EFFECT PREDICTOR #
#----------------------------------#
Versions:
ensembl : 104.1af1dce
ensembl-funcgen : 104.59ae779
ensembl-io : 104.1d3bb6e
ensembl-variation : 104.6154f8b
ensembl-vep : 104.3
Help: [email protected] , [email protected]
Twitter: @ensembl
"""
vep_version = int(
float(
next(
(line for line in stdout.split("\n") if "ensembl-vep" in line)
).split()[2]
)
)
return vep_version | 5c3b716db7016f1b612f764fb54e3b25d970b0f2 | 5,902 |
def _get_trip_from_id(trip_obj_list, trip_id):
""" Get a trip from a list, based on a trip id """
found_trip_obj = None
for trip_obj in trip_obj_list:
if trip_obj.id == trip_id:
found_trip_obj = trip_obj
break
return found_trip_obj | f2bbacfccda1e4ff778ba793ad238f744400f020 | 5,907 |
def to_camel_case(string: str) -> str:
"""
Converts a ``snake_case`` string to ``camelCase``.
:param string: A ``snake_case`` string.
:return: A ``camelCase`` version of the input.
"""
components = string.split("_")
return components[0] + "".join(x.capitalize() for x in components[1:]) | ae0d82efd9a5a65ef16cc401a0fe302b4f04d524 | 5,909 |
def parse_qsub_defaults(parsed):
"""Unpack QSUB_DEFAULTS."""
d = parsed.split() if type(parsed) == str else parsed
options={}
for arg in d:
if "=" in arg:
k,v = arg.split("=")
options[k.strip("-")] = v.strip()
else:
options[arg.strip("-")] = ""
return options | a5c50aef405d88bcb018af48904a384b090d22a2 | 5,910 |
def rc_seq(seq=""):
"""Returns the reverse compliment sequence."""
rc_nt_ls = []
rc_dict = {
"a": "t",
"c": "g",
"t": "a",
"g": "c",
"n": "n",
"A": "T",
"C": "G",
"T": "A",
"G": "C",
"N": "N"
}
rc_nt_ls = [rc_dict[seq[i]] for i in range(len(seq)-1, -1, -1)]
rc_seq_ = "".join(rc_nt_ls)
return rc_seq_ | 827877a76d4ffbe61e40e4f00641afa4277f3ff5 | 5,919 |
import six
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360 | 7ed0fd31f9a90ddb5743faa8e45e46f0d5cc08bd | 5,920 |
def insert_with_key_enumeration(agent, agent_data: list, results: dict):
"""
Checks if agent with the same name has stored data already in the given dict and enumerates in that case
:param agent: agent that produced data
:param agent_data: simulated data
:param results: dict to store data into
:return: dict with inserted data/name pair
"""
# add to results dict and don't double agent names
if agent.get_name() not in results:
results[agent.get_name()] = agent_data
else:
# add index to agent name if another agent of same type was simulated before
new_name = agent.get_name() + "_" + str(
sum([agent.get_name() in s for s in list(results.keys())]))
results[new_name] = agent_data
return results | d2d653dcff20836c4eaf8cf55b31b1a1209a4ddd | 5,922 |
def do_simple_math(number1, number2, operator):
"""
Does simple math between two numbers and an operator
:param number1: The first number
:param number2: The second number
:param operator: The operator (string)
:return: Float
"""
ans = 0
if operator is "*":
ans = number1 * number2
elif operator is "/":
ans = number1 / number2
elif operator is "+":
ans = number1 + number2
elif operator is "-":
ans = number1 - number2
elif operator is "^":
ans = number1 ** number2
elif operator is "%":
ans = number1 % number2
return ans | eb745f9c3f3c1e18de30cbe6c564d68c29e39ff4 | 5,926 |
def is_intersection(g, n):
"""
Determine if a node is an intersection
graph: 1 -->-- 2 -->-- 3
>>> is_intersection(g, 2)
False
graph:
1 -- 2 -- 3
|
4
>>> is_intersection(g, 2)
True
Parameters
----------
g : networkx DiGraph
n : node id
Returns
-------
bool
"""
return len(set(g.predecessors(n) + g.successors(n))) > 2 | 415e5154095cd78112ef029b6c4d62c36da0b3b8 | 5,932 |
def secs_to_str(secs):
"""Given number of seconds returns, e.g., `02h 29m 39s`"""
units = (('s', 60), ('m', 60), ('h', 24), ('d', 7))
out = []
rem = secs
for (unit, cycle) in units:
out.append((rem % cycle, unit))
rem = int(rem / cycle)
if not rem:
break
if rem: # leftover = weeks
out.append((rem, 'w'))
return ' '.join(["%02d%s" % tup for tup in out[::-1]]) | 0918fd72fbaaa0adf8fe75bcb1ef39b4e9aba75b | 5,937 |
def stairmaster_mets(setting):
"""
For use in submaximal tests on the StairMaster 4000 PT step ergometer.
Howley, Edward T., Dennis L. Colacino, and Thomas C. Swensen. "Factors Affecting the Oxygen Cost of Stepping on an Electronic Stepping Ergometer." Medicine & Science in Sports & Exercise 24.9 (1992): n. pag. NCBI. Web. 10 Nov. 2016.
args:
setting (int): the setting of the step ergometer
Returns:
float: VO2:subscript:`2max` in kcal/kg*hour
"""
return 0.556 * 7.45 * setting | 1d6cc9fc846773cfe82dfacb8a34fb6f46d69903 | 5,940 |
def check_threshold(service, config_high_threshold, config_low_threshold, curr_util):
""" Checks whether Utilization crossed discrete threshold
Args:
service: Name of the micro/macroservice
config_high_threshold: Upper limit threshold to utilization set in config file
config_low_threshold: Lower limit threshold to utilization set in config file
curr_util: value of the current utilization
Returns:
String "High" if upper limit crossed
String "Low" if lower limit crossed
String "Normal" if none crossed
"""
if float(curr_util) > float(config_high_threshold):
return "High"
elif float(curr_util) < float(config_low_threshold):
return "Low"
else:
return "Normal" | 80bf8ab4f5b2bbac35df7c48764114e213fba580 | 5,947 |
def double_bin_pharmacophore_graph(distance, bins, delta):
""" Assign two bin values to the distance between pharmacophoric points.
Parameters
----------
distance : float
The distance that will be binned.
bins : np.ndarray
Array of bins. It has to be one dimensional and monotonic.
delta : float
The tolerance from which a distance value is considered to belong to
the lower and upper bin. It has to be a value between 0 and 0.5
Returns
-------
2-tuple of int
The two bins assigned to the distance.
"""
for ii in range(bins.shape[0] - 1):
if distance == bins[ii]:
return (bins[ii], bins[ii])
elif distance > bins[ii] and distance < bins[ii + 1]:
if distance - bins[ii] > delta:
return (bins[ii], bins[ii + 1])
else:
return (bins[ii], bins[ii]) | b7dedf4f31b5cd08c9875139df837a57a8117001 | 5,950 |
def is_absolute_url(parsed_url):
""" check if it is an absolute url """
return all([parsed_url.scheme, parsed_url.netloc]) | 578c1443ec18f9b741cd205763604cba2242ac48 | 5,952 |
def get_levelized_cost(solution, cost_class='monetary', carrier='power',
group=None, locations=None,
unit_multiplier=1.0):
"""
Get the levelized cost per unit of energy produced for the given
``cost_class`` and ``carrier``, optionally for a subset of technologies
given by ``group`` and a subset of ``locations``.
Parameters
----------
solution : solution container
cost_class : str, default 'monetary'
carrier : str, default 'power'
group : str, default None
Limit the computation to members of the given group (see the
groups table in the solution for valid groups).
locations : str or iterable, default None
Limit the computation to the given location or locations.
unit_multiplier : float or int, default 1.0
Adjust unit of the returned cost value. For example, if model units
are kW and kWh, ``unit_multiplier=1.0`` will return cost per kWh, and
``unit_multiplier=0.001`` will return cost per MWh.
"""
if group is None:
group = 'supply'
members = solution.groups.to_pandas().at[group, 'members'].split('|')
if locations is None:
locations_slice = slice(None)
elif isinstance(locations, (str, float, int)):
# Make sure that locations is a list if it's a single value
locations_slice = [locations]
else:
locations_slice = locations
cost = solution['costs'].loc[dict(k=cost_class, x=locations_slice, y=members)]
ec_prod = solution['ec_prod'].loc[dict(c=carrier, x=locations_slice, y=members)]
if locations is None:
cost = cost.sum(dim='x').to_pandas()
ec_prod = ec_prod.sum(dim='x').to_pandas()
else:
cost = cost.to_pandas()
ec_prod = ec_prod.to_pandas()
return (cost / ec_prod) * unit_multiplier | 96b8f9a9fceaa932bcee72033e73ad8b9551759d | 5,954 |
def get_parameter(model, name):
"""
Finds the named parameter within the given model.
"""
for n, p in model.named_parameters():
if n == name:
return p
raise LookupError(name) | ba35b743d9189c94da0dcce27630bba311ea8a46 | 5,964 |
def valid_tetrodes(tetrode_ids, tetrode_units):
"""
Only keep valid tetrodes with neuron units so that there is corresponding spike train data.
:param tetrode_ids: (list) of tetrode ids in the order of LFP data
:param tetrode_units: (dict) number of neuron units on each tetrode
:return: (list) of tetrode ids with neuron units
"""
return [x for x in tetrode_ids if tetrode_units[x] > 0] | c887f5e5c29d841da63fe0cd56c41eda5ddde891 | 5,967 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.