content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def sample_from_simplex(rng, dim):
"""Uniformly samples a probability vector from a simplex of dimension dim."""
alpha = [1] * dim
return rng.dirichlet(alpha) | d034c4502634678874f89e17f2fde28eb8c28e0b | 13,206 |
import math
def getDistance(p1, p2):
"""Return the distance between p1 and p2."""
return math.sqrt(sum([(p1[i] - p2[i])**2 for i in range(max(len(p1), len(p2)))])) | 28d8156ad1eb5557a3fb3fa8bc7a94a66d06db3e | 13,208 |
def file_is_pdf(filename: str) -> bool:
"""
Check if `filename` has the .pdf extension.
Args:
filename (str): Any filename, including its path.
Returns:
True if `filename` ends with .pdf, false otherwise.
"""
return filename.endswith(".pdf") | ba9a540a1f0edd33e48010a001de4493bdff80d9 | 13,217 |
import torch
def torch_hilbert(x_real, n_fft=None):
"""
Obtain imaginary counterpart to a real signal such that there are no negative frequency
components when represented as a complex signal. This is done by using the Hilbert transform.
We end up with an analytic signal and return only the imaginary part. Most importantly,
this procedure is fully differentiable. Adapted from the SciPy signal.hilbert function.
Parameters
----------
x_real : Tensor (F x T)
Real counterpart of an analytic signal,
F - number of independent signals
T - number of time steps (samples)
n_fft : int
Number of Fourier components
Returns
----------
x_imag : Tensor (F x T)
Imaginary counterpart of an analytic signal,
F - number of independent signals
T - number of time steps (samples)
"""
# Default to the length of the input signal
if n_fft is None:
n_fft = x_real.size(-1)
# Create the transfer function for an analytic signal
h = torch.zeros(n_fft).to(x_real.device)
if n_fft % 2 == 0:
h[0] = h[n_fft // 2] = 1
h[1 : n_fft // 2] = 2
else:
h[0] = 1
h[1 : (n_fft + 1) // 2] = 2
# Take the Fourier transform of the real part
Xf = torch.fft.fft(x_real, n=n_fft, dim=-1)
# Apply the transfer function to the Fourier transform
Xfh = Xf * h.unsqueeze(-2)
# Take the inverse Fourier Transform to obtain the analytic signal
x_alyt = torch.fft.ifft(Xfh, dim=-1)
# Take the imaginary part of the analytic signal to obtain the Hilbert transform
x_imag = x_alyt.imag
return x_imag | 33bb230c78beb84ca5569c5fca8ebb61a36fd7c5 | 13,221 |
import math
def calcCirclePos(robotIdx, numRobots, radius=3, center=(0,0)):
"""
Helper function to distribute robot positions on a circle.
"""
gamma = 2*math.pi / numRobots
x = radius * math.cos(gamma * robotIdx) + center[0]
y = radius * math.sin(gamma * robotIdx) + center[1]
phi = gamma * robotIdx - math.pi
return (x, y, phi) | 393c044dcb4016c34b8efe2a7acd185b45c52467 | 13,224 |
import re
def is_valid_vlive_url(url: str) -> bool:
"""Uses a regex to check if the given url is a valid 'vlive.tv./video/' address."""
# VLIVE videos are only identified by numbers in the url (unlike Youtube IDs,
# for example)
vlive_url_regex = r"(vlive\.tv\/video\/[0-9]*)"
if not re.search(vlive_url_regex, url):
return False
return True | 35c741ea512cad5edd791ebbb8acef99b19d84d3 | 13,227 |
import torch
def embedding_similarity(
batch: torch.Tensor,
similarity: str = 'cosine',
reduction: str = 'none',
zero_diagonal: bool = True
) -> torch.Tensor:
"""
Computes representation similarity
Example:
>>> from pytorch_lightning.metrics.functional import embedding_similarity
>>> embeddings = torch.tensor([[1., 2., 3., 4.], [1., 2., 3., 4.], [4., 5., 6., 7.]])
>>> embedding_similarity(embeddings)
tensor([[0.0000, 1.0000, 0.9759],
[1.0000, 0.0000, 0.9759],
[0.9759, 0.9759, 0.0000]])
Args:
batch: (batch, dim)
similarity: 'dot' or 'cosine'
reduction: 'none', 'sum', 'mean' (all along dim -1)
zero_diagonal: if True, the diagonals are set to zero
Return:
A square matrix (batch, batch) with the similarity scores between all elements
If sum or mean are used, then returns (b, 1) with the reduced value for each row
"""
if similarity == 'cosine':
norm = torch.norm(batch, p=2, dim=1)
batch = batch / norm.unsqueeze(1)
sqr_mtx = batch.mm(batch.transpose(1, 0))
if zero_diagonal:
sqr_mtx = sqr_mtx.fill_diagonal_(0)
if reduction == 'mean':
sqr_mtx = sqr_mtx.mean(dim=-1)
if reduction == 'sum':
sqr_mtx = sqr_mtx.sum(dim=-1)
return sqr_mtx | dda82b8fa4dc4f3760a5c7ec329c77b980f3860c | 13,234 |
def _compute_input_padding(size, bcount, bsize, boffset, bstride):
"""Computes the padding for the operation.
:param size: `[SZH, SZW]` list-like of ints, size of image
:param bcount: `[BCH, BCW]` list of ints
:param bsize:
:param boffset:
:param bstride:
:returns: `pad_h, pad_w` for _pad_inputs function, possibly negative.
:rtype:
"""
pad_h = [boffset[0],
boffset[0] + bstride[0] * bcount[0] + bsize[0] - size[0]]
pad_w = [boffset[1],
boffset[1] + bstride[1] * bcount[1] + bsize[1] - size[1]]
return pad_h, pad_w | bce6ed06406f0d259d029c745e3c9f5ff015959e | 13,242 |
from typing import Union
import string
def _strip_unwanted_chars(price: Union[int, str]) -> str:
"""Returns price text with all unnecessary chars stripped (nonnumeric etc).
Examples:
"100" should return "100"
"100 yen" should return "100"
"10,000" should return "10000"
Args:
price: The raw value of the price data.
Returns:
String that represents the price with currency and other unnecessary
punctuation removed.
"""
return ''.join(char for char in str(price) if char in string.digits) | 18679bbd7e53fcea7cadd3343fc82a3dd875f8af | 13,243 |
import warnings
def update(data):
"""Update the data in place to remove deprecated properties.
Args:
data (dict): dictionary to be updated
Returns:
True if data was changed, False otherwise
"""
updated = False
if 'include' in data:
msg = ("included configuration files should be updated manually"
" [files={0}]")
warnings.warn(msg.format(', '.join(data['include'])))
# Spack 0.19 drops support for `spack:concretization` in favor of
# `spack:concretizer:unify`. Here we provide an upgrade path that changes the former
# into the latter, or warns when there's an ambiguity. Note that Spack 0.17 is not
# forward compatible with `spack:concretizer:unify`.
if 'concretization' in data:
has_unify = 'unify' in data.get('concretizer', {})
to_unify = {'together': True, 'separately': False}
unify = to_unify[data['concretization']]
if has_unify and data['concretizer']['unify'] != unify:
warnings.warn(
'The following configuration conflicts: '
'`spack:concretization:{}` and `spack:concretizer:unify:{}`'
'. Please update manually.'.format(
data['concretization'], data['concretizer']['unify']))
else:
data.update({'concretizer': {'unify': unify}})
data.pop('concretization')
updated = True
return updated | 2e604cde4455bb1ab784651798fb3be0cd3733db | 13,247 |
def _default_filter(line):
"""Default bad header filter.
Filters out lines that start with ">From" or "From " (no colon).
"""
line = line.strip()
return (False if line.startswith('>From') or line.startswith('From ') else
True) | 4867ba376914d7d688038c8471f09405b0a17c24 | 13,253 |
def parse_starlog(path_to_log):
"""Parse star logfile into a dict.
Args:
path_to_log: filepath containing the star run log
Returns:
dict that contains metrics from star log
"""
qc_dict = {}
with open(path_to_log) as f:
for line in f:
if "|" in line:
tokens = line.split("|")
qc_dict[tokens[0].strip()] = tokens[1].strip()
return qc_dict | ced4f91846a4c658c1801eeeb75430cdecafb32d | 13,258 |
def get_universe_id_from_script(universe_script):
""" Get the id of a universe given the universe script """
return int(universe_script.split('.')[0].split('_')[1]) | a714d1de25f4972c1cccd6c70453d01e2938af2b | 13,260 |
def _cytomine_parameter_name_synonyms(name, prefix="--"):
"""For a given parameter name, returns all the possible usual synonym (and the parameter itself). Optionally, the
function can prepend a string to the found names.
If a parameters has no known synonyms, the function returns only the prefixed $name.
Parameters
----------
name: str
Parameter based on which synonyms must searched for
prefix: str
The prefix
Returns
-------
names: str
List of prefixed parameter names containing at least $name (preprended with $prefix).
"""
synonyms = [
["host", "cytomine_host"],
["public_key", "publicKey", "cytomine_public_key"],
["private_key", "privateKey", "cytomine_private_key"],
["base_path", "basePath", "cytomine_base_path"],
["id_software", "cytomine_software_id", "cytomine_id_software", "idSoftware", "software_id"],
["id_project", "cytomine_project_id", "cytomine_id_project", "idProject", "project_id"]
]
synonyms_dict = {params[i]: params[:i] + params[(i + 1):] for params in synonyms for i in range(len(params))}
if name not in synonyms_dict:
return [prefix + name]
return [prefix + n for n in ([name] + synonyms_dict[name])] | 4d8d58571b03182cf31546c370fda81e2caf5800 | 13,262 |
def read_prot(filepath):
"""
This function accepts the filepath of a protein to align, ignores the first line
(proceeded by '>' char), strips newlines, and returns the protein as a single string.
"""
seq = ""
with open(filepath) as f:
for line in f:
if not line.startswith(">"):
seq += (line.rstrip())
return seq | e09fab92fca28601d5c92d927ceddccaaf1f5569 | 13,264 |
def _create_table_query(table_name: str) -> str:
"""Build SQL query to create metrics table."""
return (
f"CREATE TABLE IF NOT EXISTS {table_name}("
f" id SERIAL PRIMARY KEY,"
f" page_url TEXT,"
f" http_code SMALLINT,"
f" response_time INT,"
f" timestamp TIMESTAMPTZ"
f")"
) | 427bf61a475c0f012b0242880400d1573ae8bbd1 | 13,268 |
def to_binary(df, variable_names):
"""
Recode specified variables of dataframe to binary; any positive value is
set to 1 and all other values are set to 0. This replaces the existing
column(s).
Parameters
----------
df : pandas DataFrame
dataframe containing variables to be recoded
variable_names : dict
list of variable names to recode to binary
Returns
-------
recoded_df : pandas DataFrame
dataframe containing new variables with coding reversed
"""
recoded_df = df.copy()
recoded_df[variable_names] = (
recoded_df[variable_names]
.astype(bool)
.astype("int64")
)
return recoded_df | 604b5a84a7ade73b9e569fa4d238f32e9039acee | 13,271 |
def get_http_body(http_request):
"""Given a HTTP request, return the body."""
return http_request.split("\r\n\r\n")[1] | 4a11c97f0eeddac933e8b311d9c1cfda6e61674c | 13,273 |
def parse_gn_flags(gn_lines):
"""
Parse lines of GN flags into dictionary
"""
gn_args = {}
for line in gn_lines:
name, var = line.strip().partition("=")[::2]
gn_args[name.strip()] = var.strip()
return gn_args | 98d8f91e9defea1484a569deb70fb7cc62b2c743 | 13,275 |
def floor(x,unit=1):
""" Returns greatest multiple of 'unit' below 'x' """
return unit*int(x/unit) | b289bf714eb3ecf23072533a51badeac43743686 | 13,279 |
import hashlib
import io
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file:
Read chunks of a file and update the hasher.
Returns the hex digest of the md5 hash.
"""
hasher = hashlib.md5()
with io.FileIO(path, 'r') as fp:
while True:
buf = fp.read(65536)
if not buf:
break
hasher.update(buf)
return hasher.hexdigest() | 6bddfd83a8bc326fcfea8fb69affde45f3cb1dd8 | 13,282 |
import torch
def get_position(lens, maxlen, left_pad=False):
""" transform sequence length to a series of positions. e.g., 3 -> 1,2,3"""
batch_size = lens.size(0)
pos = torch.zeros((batch_size, maxlen), dtype=torch.long, device=lens.device)
for i, input_len in enumerate(lens):
if not left_pad:
pos[i,:input_len] = torch.arange(1, input_len+1, dtype=torch.long, device=lens.device)
else:
pos[i,maxlen-input_len:] = torch.arange(1, input_len+1, dtype=torch.long, device=lens.device)
return pos | 48c2ac5c0bde1f554b3b44c82177945ab2c667ab | 13,283 |
def convert_bxd_name(name: str) -> str:
"""
the names of BXDs are not consistent across all metadata
and VCF files, so we sometimes have to convert their names
>>> convert_bxd_name("4512-JFI-0469_BXD100_RwwJ_phased_possorted_bam")
"BXD100_RwwJ_0469"
>>> convert_bxd_name("4512-JFI-0410_BXD013_TyJ_phased_possorted_bam")
"BXD013_TyJ_0410"
"""
bxd_line_name = '_'.join(name.split('_phased')[0].split('_')[1:])
bxd_line_num = name.split('_phased')[0].split('_')[0].split('-')[-1]
bxd_line_new = bxd_line_name + '_' + bxd_line_num
return bxd_line_new | 577f936d81fdd7a87bd02fec069ff5f09e3b1412 | 13,284 |
import torch
def makeInp(inputs):
"""Move tensors onto GPU if available.
Args:
inputs: A dict with a batch of word-indexed data from DataLoader. Contains
['brk_sentence', 'bs_inp_lengths', 'style', 'sentence', 'st_inp_lengths', 'marker', 'mk_inp_lengths']
Returns:
inputs: The dict with same structure but stored on GPU.
"""
if torch.cuda.is_available():
for key in inputs:
inputs[key] = inputs[key].cuda()
return inputs | af00b78f33627f45e505a6b8c75fb07ef29b94e6 | 13,285 |
import warnings
def rename_internal_nodes(tree, names=None, inplace=False):
""" Names the internal according to level ordering.
The tree will be traversed in level order (i.e. top-down, left to right).
If `names` is not specified, the node with the smallest label (y0)
will be located at the root of the tree, and the node with the largest
label will be located at bottom right corner of the tree.
Parameters
----------
tree : skbio.TreeNode
Tree object where the leafs correspond to the features.
names : list, optional
List of labels to rename the tip names. It is assumed that the
names are listed in level ordering, and the length of the list
is at least as long as the number of internal nodes.
inplace : bool, optional
Specifies if the operation should be done on the original tree or not.
Returns
-------
skbio.TreeNode
Tree with renamed internal nodes.
Raises
------
ValueError:
Raised if `tree` and `name` have incompatible sizes.
"""
if inplace:
_tree = tree
else:
_tree = tree.copy()
non_tips = [n for n in _tree.levelorder() if not n.is_tip()]
if names is not None and len(non_tips) != len(names):
raise ValueError("`_tree` and `names` have incompatible sizes, "
"`_tree` has %d tips, `names` has %d elements." %
(len(non_tips), len(names)))
i = 0
for n in _tree.levelorder():
if not n.is_tip():
if names is None:
label = 'y%i' % i
else:
label = names[i]
if n.name is not None and label == n.name:
warnings.warn("Warning. Internal node (%s) has been replaced "
"with (%s)" % (n.name, label), UserWarning)
n.name = label
i += 1
return _tree | d5df42023afe184af41d7553b2e1491b09d5edc1 | 13,286 |
def headers(mime, length):
"""Returns a list of HTTP headers given the MIME type and the length of the
content, in bytes (in integer or sting format)."""
return [('Content-Type', mime),
('Content-Length', str(length))] | da5f73591bf9d4bbc8b1d01f4d6babf0de54ce00 | 13,287 |
import statistics
def run_stats(this_set):
""" Find standard deviation and mean (average) for the data set. """
# Simply calculate Mean & StdDev for this set.
# Return TUPLE of FLOATS
return statistics.mean(this_set), statistics.stdev(this_set) | 35ce499654bf7e8fe43d862b5456b0578a093fc7 | 13,289 |
def generate_csv_url(sheet_url):
"""
Utility function for generating csv URL from a google sheets link
This function generates a link to a csv file from a link used to edit a google sheets file.
The gid must be present in the URL.
Parameters
----------
sheet_url : str
The URL for the google sheet file
Returns
-------
str
URL for the csv file
"""
if type(sheet_url) == str:
if(sheet_url.find("edit#gid") > -1):
return sheet_url.replace("edit#gid", "export?format=csv&gid")
else:
raise ValueError("sheet_url must contain 'edit#gid' phrase")
else:
raise TypeError("sheet_url must be a string") | d941ef98f3400175b9db4f7ef5da858fc6426caf | 13,291 |
def byte_array_declaration(data: bytes, name: str) -> str:
"""Generates a byte C array declaration for a byte array"""
type_name = '[[maybe_unused]] const std::byte'
byte_str = ''.join([f'std::byte{{0x{b:02x}}},' for b in data])
array_body = f'{{{byte_str}}}'
return f'{type_name} {name}[] = {array_body};' | 015f0cb1a4dfd94b6f488b4eae0b58860da4d1dc | 13,295 |
def shorten(s: str, max_len: int = 60) -> str:
"""Truncate a long string, appending '...' if a change is made."""
if len(s) < max_len:
return s
return s[:max_len-3] + '...' | 8bafe69253e12a67fdb4c476a2d4b55f6ad4d2af | 13,302 |
import json
def elasticsearch_bulk_decorate(bulk_index, bulk_type, msg):
""" Decorates the msg with elasticsearch bulk format and adds index and message type"""
command = json.dumps({'index': {'_index': bulk_index, '_type': bulk_type}})
return '{0}\n{1}\n'.format(command, msg) | e215ab15b347046b22dcc46317632c68fa9d36c0 | 13,303 |
import itertools
def get_hp_fold_iterator(hp_grid, num_folds):
""" Create an iterator over all combinations of hyperparameters and folds
"""
hp_grid = list(hp_grid)
folds = list(range(num_folds))
hp_fold_it = itertools.product(hp_grid, folds)
hp_fold_it = list(hp_fold_it)
return hp_fold_it | ea16c3b3867dc87a18f2dc1dc2f59e5a48f7c04d | 13,304 |
def sub_dir_algo(d):
""" build out the algorithm portion of the directory structure.
:param dict d: A dictionary holding BIDS terms for path-building
"""
return "_".join([
'-'.join(['tgt', d['tgt'], ]),
'-'.join(['algo', d['algo'], ]),
'-'.join(['shuf', d['shuf'], ]),
]) | 73d91f4f5517bbb6966109bd04f111df5f6302e4 | 13,307 |
def _get_words(data_dir):
"""Get list of words from history file.
Parameters
----------
data_dir : pathlib.Path
Directory where data is saved.
Returns
-------
word_list : list of str
List of words.
"""
words_file = data_dir.joinpath("words.txt")
word_list = []
if not words_file.is_file():
return word_list
with open(words_file, mode="r") as f:
for l in f:
line = l.rstrip()
word_list.append(line)
return word_list | 40728b61ed4af05d26a8ece678661cddf838602a | 13,308 |
def containsAll(target, keys):
"""Returns true iff target contains all keys.
"""
result = ((i in target) for i in keys)
return all(result) | 75fda44a515e2249bd4c6ee1146c36eb91657b56 | 13,312 |
import re
def parse_log_entry(msg):
"""
Parse a log entry from the format
$ip_addr - [$time_local] "$request" $status $bytes_sent $http_user_agent"
to a dictionary
"""
data = {}
# Regular expression that parses a log entry
search_term = '(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+\-\s+\[(.*)]\s+'\
'"(\/[/.a-zA-Z0-9-]+)"\s+(\d{3})\s+(\d+)\s+"(.*)"'
values = re.findall(search_term, msg)
if values:
val = values[0]
data['ip'] = val[0]
data['date'] = val[1]
data['path'] = val[2]
data['status'] = val[3]
data['bytes_sent'] = val[4]
data['agent'] = val[5]
return data | f237696e8215793b4cf17e7b20543ac01494b686 | 13,325 |
import tempfile
def get_logprop_file(logger, template, pattern, project):
"""
Return the filename of file with logging properties specific for given
project.
"""
with open(template, 'r') as f:
data = f.read()
data = data.replace(pattern, project)
with tempfile.NamedTemporaryFile(delete=False) as tmpf:
tmpf.write(data.encode())
return tmpf.name | 061254a5e65a08c7e5ee8367c94abaee04238e61 | 13,335 |
def Iyy_beam(b, h):
"""gets the Iyy for a solid beam"""
return 1 / 12. * b * h ** 3 | 8912b769b1bf8f91d2bb7eefad261c0e4f9f4026 | 13,336 |
def read_user_header(head):
"""
The 'User-Id' header contains the current online user's id, which is an integer
-1 if no user online
:param head: the request's header
:return: online user's id, abort type error if the header is not an integer.
"""
try:
return int(head['User-Id'])
except TypeError:
print('user_id header of wrong variable type') | 95586f32b698283f33c9271b5ffdf61a934dbb4a | 13,341 |
def y_intercept_line(slope, point):
"""
Calculate a y-intercept of a line for given values of slope and point.
Parameters
----------
slope : float
A value of slope line.
point : tuple
A tuple with xy coordinates.
Returns
-------
y-intercept : float
A vaule of the y-intercept for the line.
"""
return point[1] - slope*point[0] | 839cdaa4b4646ebbdc5a2b941e5e14b45587cd24 | 13,345 |
import itertools
def _groupby_leading_idxs(shape):
"""Group the indices for `shape` by the leading indices of `shape`.
All dimensions except for the rightmost dimension are used to create
groups.
A 3d shape will be grouped by the indices for the two leading
dimensions.
>>> for key, idxs in _groupby_leading_idxs((3, 2, 2)):
... print('key: {}'.format(key))
... print(list(idxs))
key: (0, 0)
[(0, 0, 0), (0, 0, 1)]
key: (0, 1)
[(0, 1, 0), (0, 1, 1)]
key: (1, 0)
[(1, 0, 0), (1, 0, 1)]
key: (1, 1)
[(1, 1, 0), (1, 1, 1)]
key: (2, 0)
[(2, 0, 0), (2, 0, 1)]
key: (2, 1)
[(2, 1, 0), (2, 1, 1)]
A 1d shape will only have one group.
>>> for key, idxs in _groupby_leading_idxs((2,)):
... print('key: {}'.format(key))
... print(list(idxs))
key: ()
[(0,), (1,)]
"""
idxs = itertools.product(*[range(s) for s in shape])
return itertools.groupby(idxs, lambda x: x[:-1]) | cc71304b9da4df2c9a32e009e3159bc9777d3e70 | 13,353 |
def _recvall(sock, size):
"""
Read exactly the specified amount of data from a socket. If a
``recv()`` call returns less than the requested amount, the
``recv()`` is retried until it either returns EOF or all the
requested data has been read.
:param sock: A socket from which bytes may be read.
:param int size: The amount of data to read from the socket.
:returns: The requested data.
"""
data = b''
while len(data) < size:
buf = sock.recv(size - len(data))
# Break out if we got an EOF
if not buf:
# Return None for an EOF
return data or None
data += buf
return data | 6a456ee4925b7faa25646e9bc6e702848d047783 | 13,354 |
def get_expected_name(logfile):
"""get the name of the file with the expected json"""
return logfile.with_name(logfile.stem + "-expected.json") | 7bc886fdbc23a53610817391db3fccbe53fbab81 | 13,359 |
def get_free_symbols(s, symbols, free_symbols=None):
"""
Returns free_symbols present in `s`.
"""
free_symbols = free_symbols or []
if not isinstance(s, list):
if s in symbols:
free_symbols.append(s)
return free_symbols
for i in s:
free_symbols = get_free_symbols(i, symbols, free_symbols)
return free_symbols | 873d0bac2fa24db3b4ded6d9d867ca703d0a2314 | 13,362 |
def zeropad_1501(name):
""" Arxiv IDs after yymm=1501 are padded to 5 zeros """
if not '/' in name: # new ID
yymm, num = name.split('.')
if int(yymm) > 1500 and len(num) < 5:
return yymm + ".0" + num
return name | d2a46fccf649a59194b55ff0542bd6bfbe0d8629 | 13,364 |
def __labels(known_labels, trans, labels):
"""Convert list of label names to label dictionaries with all info.
"""
return {
lbl: {
"title": trans.gettext(known_labels[lbl]['title']),
"description": trans.gettext(known_labels[lbl]['description']),
"severity": known_labels[lbl].get('severity', "primary"),
} for lbl in labels if lbl in known_labels.keys()
} | 55652386496f1312ae66f64ae7673ad43d1e1879 | 13,365 |
import binascii
def hexlify(data):
"""The binascii.hexlify function returns a bytestring in
Python3, but sometimes we want to use its output to
show a log message. This function returns an unicode
str, which is suitable for it."""
return binascii.hexlify(data).decode('ascii') | a24bf82a71a15dab12b2f683833c8a250a63097e | 13,366 |
def command_eval(expr: str):
"""Evaluates a given expression."""
# Command callbacks can return values which will be passed to the caller of `dispatch()`
return eval(expr) | 0664e4c3f6f0aad320642b000e4bbf242a5b219a | 13,381 |
import requests
def get_episode_data(cfg, episode_id):
"""
Requests episode information from Opencast.
:param episode_id: Unique identifier for episode
:param cfg: Opencast configuration
:return: Tuple of episode title, parent seriesId and parent series title
"""
url = cfg['uri'] + "/api/events/" + episode_id
r = requests.get(url=url, auth=(cfg['user'], cfg['password']))
x = r.json()
return x['title'], x['is_part_of'], x['series'] | 41aa3053312827d7081776fcb3c414e358f0cc61 | 13,385 |
def isempty(prt_r):
"""Test whether the parent of a missingness indicator is empty"""
return len(prt_r) == 0 | 167e6cb0a63116c6157291a6ba225b14fbfebe11 | 13,386 |
import math
def vortex_feldtkeller(beta, center, right_handed=True, polarity=+1):
"""
Returns a function f: (x,y,z) -> m representing a vortex
magnetisation pattern where the vortex lies in the x/y-plane (i.e.
the magnetisation is constant along the z-direction), the vortex
core is centered around the point `center` and the m_z component
falls off exponentially as given by the following formula, which
is a result by Feldtkeller and Thomas [1].:
m_z = exp(-2*r^2/beta^2).
Here `r` is the distance from the vortex core centre and `beta` is
a parameter, whose value is taken from the first argument to this
function.
[1] E. Feldtkeller and H. Thomas, "Struktur und Energie von
Blochlinien in Duennen Ferromagnetischen Schichten", Phys.
Kondens. Materie 8, 8 (1965).
"""
beta_sq = beta ** 2
def f(pt):
x = pt[0]
y = pt[1]
# To start with, create a right-handed vortex with polarity 1.
xc = x - center[0]
yc = y - center[1]
phi = math.atan2(yc, xc)
r_sq = xc ** 2 + yc ** 2
mz = math.exp(-2.0 * r_sq / beta_sq)
mx = -math.sqrt(1 - mz * mz) * math.sin(phi)
my = math.sqrt(1 - mz * mz) * math.cos(phi)
# If we actually want a different polarity, flip the z-coordinates
if polarity < 0:
mz = -mz
# Adapt the chirality accordingly
if ((polarity > 0) and (not right_handed)) or\
((polarity < 0) and right_handed):
mx = -mx
my = -my
return (mx, my, mz)
return f | 4714dcdfb2b96413562a6f531be944963a3a3b2f | 13,389 |
from typing import Union
from typing import Optional
import re
def convert_to_int(value: Union[str, float]) -> Optional[int]:
"""
Converts string and float input to int. Strips all non-numeric
characters from strings.
Parameters:
value: (string/float) Input value
Returns:
integer/None Integer if successful conversion, otherwise None
"""
if isinstance(value, str):
str_val = re.sub(r'[^0-9\-\.]', '', value)
try:
return int(str_val)
except (ValueError, TypeError):
try:
return int(float(str_val))
except (ValueError, TypeError):
return None
elif isinstance(value, (int, float)):
return int(value)
else:
return None | 15036374ca499edfa58e2865d4f5fb70c3aecb40 | 13,393 |
def format_describe_str(desc, max_len=20):
"""Returns a formated list for the matplotlib table
cellText argument.
Each element of the list is like this : ['key ','value ']
Number of space at the end of the value depends on
len_max argument.
Parameters
----------
desc: dict
Dictionnary returned by the variable.describe
function
len_max: int (default 20)
Maximum length for the values
Returns
-------
list(list):
Formated list for the matplotlib table
cellText argument
"""
res = {}
_max = max([len(str(e)) for k, e in desc.items()])
max_len = _max if _max < max_len else max_len
n_valid = desc['valid values']
n_missing = desc['missing values']
n = n_valid + n_missing
for k, e in desc.items():
if k == 'valid values':
e = str(e) + ' (' + str(int(n_valid*100/n)) + '%)'
elif k == 'missing values':
e = str(e) + ' (' + str(int(n_missing*100/n)) + '%)'
else:
e = str(e)
e = e.ljust(max_len) if len(e) <= 15 else e[:max_len]
res[k.ljust(15).title()] = e
return [[k, e] for k, e in res.items()] | 9d172ea7afc7fbcb10b4fdc3e120cd955b9eb196 | 13,395 |
def minutes_to_seconds(minutes):
"""A function that converts minutes to seconds
:param minutes: The number of minutes do be converted
:return: The number of seconds in a give number of minutes
"""
return 60 * minutes | b9dee8e669f7659fe669e5b97ef79e8e035b63bf | 13,399 |
def escapes_sub(match):
"""Substitutes escaped characters."""
return match.group(1) | f9d535bfdeea2cf4791301c348d513b606ed258d | 13,403 |
def read_data_line(line):
"""Process a line from data file.
Assumes the following format:
WORD W ER D
i.e. word tab separated with pronunciation where pronunciation has space
separated phonemes
Args:
line: string representing the one line of the data file
Returns:
chars: list of characters
phones: list of phones
"""
line = line.strip()
word, pronunciation = line.split(" ") #line.split("\t")
chars = list(word.strip())
phones = pronunciation.strip().split(" ")
return chars, phones | 66a840e2f2dfbc5e6c9d4a62c938b1bc60454a1f | 13,406 |
def formatted_constituent_array(constituent_array):
"""
Given a constituent array of Species, return the classic CALPHAD-style interaction.
Parameters
----------
constituent_array : list
List of sublattices, which are lists of Species in that sublattice
Returns
-------
str
String of the constituent array formatted in the classic CALPHAD style
Examples
--------
>>> from pycalphad import variables as v
>>> const_array = [[v.Species('CU'), v.Species('MG')], [v.Species('MG')]]
>>> formatted_constituent_array(const_array)
'CU,MG:MG'
"""
return ':'.join([','.join([sp.name for sp in subl]) for subl in constituent_array]) | b1e60d21c28b66620eaffed1cda20abcb394a833 | 13,411 |
def calc_automated_readability_index(
n_letters: int,
n_words: int,
n_sents: int,
a: float = 6.26,
b: float = 0.2805,
c: float = 31.04,
) -> float:
"""
Вычисление автоматического индекса удобочитаемости
Описание:
Чем выше показатель, тем сложнее текст для чтения
Результатом является число лет обучения в американской системе образования, необходимых для понимания текста
Значения индекса могут интерпретироваться следующим образом:
1 - 6-7 лет
2 - 7-8 лет
3 - 8-9 лет
4 - 9-10 лет
5 - 10-11 лет
6 - 11-12 лет
7 - 12-13 лет
8 - 13-14 лет
9 - 14-15 лет
10 - 15-16 лет
11 - 16-17 лет
12 - 17-18 лет
Ссылки:
https://en.wikipedia.org/wiki/Automated_readability_index
https://ru.wikipedia.org/wiki/Автоматический_индекс_удобочитаемости
Аргументы:
n_letters (int): Количество букв
n_words (int): Количество слов
n_sents (int): Количество предложений
a (float): Коэффициент a
b (float): Коэффициент b
c (float): Коэффициент c
Вывод:
float: Значение индекса
"""
return (a * n_letters / n_words) + (b * n_words / n_sents) - c | d2a49044749dc93bb8684b27a81f47ff1498cc46 | 13,412 |
def FirstFree(seq, base=0):
"""Returns the first non-existing integer from seq.
The seq argument should be a sorted list of positive integers. The
first time the index of an element is smaller than the element
value, the index will be returned.
The base argument is used to start at a different offset,
i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
Example: C{[0, 1, 3]} will return I{2}.
@type seq: sequence
@param seq: the sequence to be analyzed.
@type base: int
@param base: use this value as the base index of the sequence
@rtype: int
@return: the first non-used index in the sequence
"""
for idx, elem in enumerate(seq):
assert elem >= base, "Passed element is higher than base offset"
if elem > idx + base:
# idx is not used
return idx + base
return None | 42323664c7bb2c59506ed3b24115a38bc0fcf63d | 13,413 |
from typing import Iterable
def flatten(nested):
"""Flatten a nested sequence where the sub-items can be sequences or
primitives. This differs slightly from itertools chain methods because
those require all sub-items to be sequences. Here, items can be primitives,
sequences, nested sequences, or any combination of these. Any iterable
items aside from strings will be completely un-nested, so use with caution
(e.g. a torch Dataset would be unpacked into separate items for each
index). This also returns a list rather than a generator.
Parameters
----------
nested: sequence (list, tuple, set)
Sequence where some or all of the items are also sequences.
Returns
-------
list: Flattened version of `nested`.
"""
def _walk(nested):
for group in nested:
if isinstance(group, Iterable) and not isinstance(group, str):
yield from _walk(group)
else:
yield group
return list(_walk(nested)) | 464cd221dfaf6f842bf6da0d96ad8322d1c84e71 | 13,415 |
def calculadora(x = 1, y = 1):
# Docstring
"""
Calculadora
-----------
Cria um dicionário com as principais operações matemáticas, dado dois números.
args
----
x : int ou float
Primeiro número de entrada
y : int ou float
Segundo número de entrada
return
------
dict
{'operação' : valor}
"""
# Retornamos um dicionário com as operações básicas
return {
'soma' : x + y,
'subtração' : x - y,
'divisão' : x / y,
'multiplicação' : x * y,
'potência' : x ** y
} | 6fad4c8c1d388cb5b77c52d68f63a37070379657 | 13,418 |
def move_by_month(month, offset):
"""Get the month with given offset raletive to current month."""
return (((month - 1) + offset) % 12) + 1 | 38229d5a45b4643dfeb64e0e311949b9e26625ef | 13,421 |
def is_image_file(filename):
"""Checks if a file is an image.
Arguments:
filename {str} -- File path.
Returns:
bool -- True if the path is PNG or JPG image.
"""
return any(filename.endswith(extension) for extension in [".png", ".jpg", ".jpeg"]) | d2971a57cd4fda456384f1e2f839dc32b3c897a6 | 13,423 |
def break_up_sink_info(sink_info:str) -> dict:
"""Break up the info into a dictionary.
Parameters
----------
sink_info: str
The sink info from pulsemixer.
Returns
-------
dict
A dictionary of sink information.
"""
pieces = sink_info.split(",")
if "\t" in pieces[0]:
# Split up the first item
pieces[0] = pieces[0].split("\t")[1]
pieces_dict = {}
for p in pieces:
p_pieces = p.split(":")
if len(p_pieces) == 2:
pieces_dict[p_pieces[0].replace(" ", "")] =\
p_pieces[1].replace(" ", "")
return pieces_dict | 790c757500528af28a6a01bc0cea2b7772d2ea55 | 13,434 |
def project_content_url(project, **kwargs):
"""
Get the URL for a file path within a project.
This can significantly reduce the number of queries instead
of fetching the project and account for every file in a list
which happens when using `{{ file.download_url }}`.
"""
return project.content_url(**kwargs) | 91a6a927b3fa88f4b9e976093965a1176657532e | 13,435 |
def mavlink_latlon(degrees):
"""Converts a MAVLink packet lat/lon degree format to decimal degrees."""
return float(degrees) / 1e7 | 20d883e45f99cca3c99eeb9d7c5fae96db03fd5a | 13,436 |
import itertools
def _merge_dicts(first, second):
"""Merge two dicts into a new one
Args:
first (dict): Primary dict; if a key is present in both, the value from this
dict is used.
second (dict): Other dict to merge.
Returns:
dict: Union of provided dicts, with value from first used in the case of overlap.
"""
return {k: v for k, v in itertools.chain(second.iteritems(), first.iteritems())} | 85b3377d25d730b32c9bf925bfde375db5c49450 | 13,439 |
def accuracy(actual_y: list, predicted_y: list) -> float:
"""
Calculate the value of accuracy based-on predictions
:param actual_y:a list containing initial Y values generated by 'y_generator'
function
:param predicted_y: a list containing predicted Y values generated by
'predict_y_values' function
:return: percentage of accuracy
"""
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of correct predictions divided by number
# of all data and multiplied by 100
return (correct / len(actual_y)) * 100 | 7082d0e5505fc58e9ba80883b63bec98f4632676 | 13,443 |
def dict2columns(data, id_col=None):
"""Convert a dict-based object to two-column output.
Also make sure the id field is at top.
"""
if not data:
return ({}, {})
else:
if id_col is not None and id_col in data:
keys = [id_col]
[keys.append(key) for key in sorted(data) if key != id_col]
items = [(key, data[key]) for key in keys]
else:
items = sorted(data.items())
return list(zip(*items)) | 24917339d53febc4239e21ebb8b39e487f45a370 | 13,444 |
def resolve_int_list_arg(arg):
"""Resolve a CLI argument as a list of integers"""
if arg is None:
return None
if isinstance(arg, int):
return [arg]
if isinstance(arg, str):
return [int(arg)]
if isinstance(arg, tuple):
# Interpret as range (ignore any other items in tuple beyond second)
return list(range(arg[0], arg[1]))
return arg | 5201d191946c69e10253e476341ab70c76b3a67d | 13,445 |
def groupwise_normalise(gs):
"""
Normalises each group of GeoSeries
:param gs: GeoSeries
:return: normalised GeoSeries
"""
return gs.groupby(level=0).apply(lambda x: x / x.sum()) | e0c87701658481ccea01828c75244b7a1043ee29 | 13,447 |
from typing import OrderedDict
def read_populations(flname):
"""
Read populations from a file.
We assume that the file has one population per line. The population name
comes first and is followed by a comma. The name of each sample follows,
also separated by comma.
For example:
pop1,sample_1,sample_2
pop2,sample_3,sample_4
"""
with open(flname) as fl:
groups = OrderedDict()
group_names = OrderedDict()
for group_id, ln in enumerate(fl):
cols = ln.strip().split(",")
for ident in cols[1:]:
groups[ident] = group_id
group_names[group_id] = cols[0]
return groups, group_names | 9467bf2e882a2de0ebfb1ecfb566baeab16a1500 | 13,455 |
from datetime import datetime
def get_unix(str_ts, date_format):
"""Get unix timestamp from a string timestamp in date_format.
Args:
str_ts: string timestamp in `date_format`
date_format: datetime time stamp format
Returns:
int: unix timestamp
"""
return datetime.strptime(str_ts, date_format).timestamp() | 6d0c591734fa78defed11cd5ed8c66da63ad3b5b | 13,457 |
def replace_underscore_with_space(original_string):
"""
Another really simple method to remove underscores and replace with spaces for titles of plots.
Args:
original_string: String with underscores
Returns:
replaced_string: String with underscores replaced
"""
return original_string.replace('_', ' ') | 27bea63c33c2ffe44b1ddc591a7813d8830e9f76 | 13,462 |
def is_submod(mod_obj):
"""
Find out whether module has a parent or not.
:param mod_obj: module object
:return: module status
"""
try:
bt = mod_obj.get('belongs-to')
if not bt:
return False
return True
except Exception as e:
return False | 274d985b3b7b07f02e919af20ed7fd4531136ccc | 13,464 |
def CCW_ps(Pxx, Pyy, Pxy):
"""Counter clockwise power spectrum."""
QS = Pxy.imag
return (Pxx + Pyy + 2*QS)/2. | 76710ff31e4fead278afc9eb37691f2178f45def | 13,472 |
def check_interval(child_span, parent_span):
"""
Given a child span and a parent span, check if the child is inside the parent
"""
child_start, child_end = child_span
parent_start, parent_end = parent_span
if (
(child_start >= parent_start)
&(child_end <= parent_end)
):
return True
else:
return False | 6c6a8e636ad181af821d185ba35f18db41d0ce77 | 13,473 |
def calc_bpe_bulk_electrolyte_resistance(characteristic_length, sigma):
"""
The area specific charge transfer resistance through the bulk electrolyte
units: Ohm*m2
Notes:
Adjari, 2006 - "(area specific) bulk electrolyte resistance"
Squires, 2010 - does not explicitly define this but uses the same equation
Inputs:
char_length: (m) length of BPE
sigma (S/m) conductivity of electrolyte/buffer
Output:
Resistance: Ohm*m^2
"""
R_0 = characteristic_length / sigma
return R_0 | f6a08cd5997de8954faa41d69e7abca1541e3fa0 | 13,476 |
def count_correl_above(correl_mat,limit):
"""
count numbers of correlation matrix elements above a certain threshold
Args:
correl_mat (array): Matrix correlation values
limit: Threshold for counting
Returns:
(float): Percentage of correlations above the limit
"""
correl_list = correl_mat.flatten()
full_len = len(correl_list)
above_len = len([p for p in correl_list if p>limit])
return float(above_len)/float(full_len)*100 | 00fb1f0aea268a9333c3741255a2fd7cafcb43ee | 13,480 |
def testdat(testdir):
"""Path to the testdat directory"""
return testdir / "testdat" | d7c278fba718164d50863e3fb353155a1ff00eee | 13,483 |
def parity(self, allow_rescaling_flag=True):
"""
Returns the parity ("even" or "odd") of an integer-valued quadratic
form over `ZZ`, defined up to similitude/rescaling of the form so that
its Jordan component of smallest scale is unimodular. After this
rescaling, we say a form is even if it only represents even numbers,
and odd if it represents some odd number.
If the 'allow_rescaling_flag' is set to False, then we require that
the quadratic form have a Gram matrix with coefficients in `ZZ`, and
look at the unimodular Jordan block to determine its parity. This
returns an error if the form is not integer-matrix, meaning that it
has Jordan components at `p=2` which do not have an integer scale.
We determine the parity by looking for a 1x1 block in the 0-th
Jordan component, after a possible rescaling.
INPUT:
self -- a quadratic form with base_ring `ZZ`, which we may
require to have integer Gram matrix.
OUTPUT:
One of the strings: "even" or "odd"
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 3, 2]); Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 4 -2 0 ]
[ * 2 3 ]
[ * * 2 ]
sage: Q.parity()
'even'
::
sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 3, 1]); Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 4 -2 0 ]
[ * 2 3 ]
[ * * 1 ]
sage: Q.parity()
'even'
::
sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 2, 2]); Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 4 -2 0 ]
[ * 2 2 ]
[ * * 2 ]
sage: Q.parity()
'even'
::
sage: Q = QuadraticForm(ZZ, 3, [4, -2, 0, 2, 2, 1]); Q
Quadratic form in 3 variables over Integer Ring with coefficients:
[ 4 -2 0 ]
[ * 2 2 ]
[ * * 1 ]
sage: Q.parity()
'odd'
"""
## Deal with 0-dim'l forms
if self.dim() == 0:
return "even"
## Identify the correct Jordan component to use.
Jordan_list = self.jordan_blocks_by_scale_and_unimodular(2)
scale_pow_list = [J[0] for J in Jordan_list]
min_scale_pow = min(scale_pow_list)
if allow_rescaling_flag:
ind = scale_pow_list.index(min_scale_pow)
else:
if min_scale_pow < 0:
raise TypeError("Oops! If rescaling is not allowed, then we require our form to have an integral Gram matrix.")
ind = scale_pow_list.index(0)
## Find the component of scale (power) zero, and then look for an odd dim'l component.
J0 = Jordan_list[ind]
Q0 = J0[1]
## The lattice is even if there is no component of scale (power) 0
if J0 is None:
return "even"
## Look for a 1x1 block in the 0-th Jordan component (which by
## convention of the local_normal_form routine will appear first).
if Q0.dim() == 1:
return "odd"
elif Q0[0,1] == 0:
return "odd"
else:
return "even" | 12c68f6dfb447cd88d59367ad1fed9c2ba9aedde | 13,487 |
from pathlib import Path
def strings_to_paths(strings: list[str]) -> list[Path]:
"""Converts a list of filenames in string form into a list of paths"""
return [Path(string) for string in strings] | 660e5f0fe72f32a4c4f2b218386e4c640c038846 | 13,489 |
def meta(str1, str2, ratios, weights):
"""A meta ratio function. Returns a weighted meta ratio.
The Wiesendahl ratio is a meta ratio which combines a weighted
ratio of given ratio functions.
Args:
str1 (str): first string
str2 (str): second string
ratios (list(function(str, str) -> float)): ratio functions
This parameter is a list of ratio functions.
weights (list(float)): list of weights
Each weight gets applied to its corresponding function.
Returns:
float: the combined and weighted meta ratio
"""
c = 0
r = 0.0
for i, fn in enumerate(ratios):
r += fn(str1, str2) * weights[i]
c += weights[i]
return r / float(c) | b694216cce7b78e15065788497985416537ea95c | 13,495 |
import warnings
def get_unique_schema_name(components, name, counter=0):
"""Function to generate a unique name based on the provided name and names
already in the spec. Will append a number to the name to make it unique if
the name is already in the spec.
:param Components components: instance of the components of the spec
:param string name: the name to use as a basis for the unique name
:param int counter: the counter of the number of recursions
:return: the unique name
"""
if name not in components._schemas:
return name
if not counter: # first time through recursion
warnings.warn(
"Multiple schemas resolved to the name {}. The name has been modified. "
"Either manually add each of the schemas with a different name or "
"provide a custom schema_name_resolver.".format(name),
UserWarning,
)
else: # subsequent recursions
name = name[: -len(str(counter))]
counter += 1
return get_unique_schema_name(components, name + str(counter), counter) | 68086fc7a8e523322f5a1745996b6dc8056833a1 | 13,496 |
def _nobarrier(ts_dct):
""" Determine if reaction is barrierless
"""
print('cla', ts_dct['class'])
rad_rad = bool('radical radical' in ts_dct['class'])
low_spin = bool('low' in ts_dct['class'])
return rad_rad and low_spin | 393831d8b0af5085f8ff0f42d54bf8d14faca680 | 13,503 |
import torch
def pad_collate_func(batch):
"""
This should be used as the collate_fn=pad_collate_func for a pytorch DataLoader object in order to pad out files in a batch to the length of the longest item in the batch.
"""
vecs = [x[0] for x in batch]
labels = [x[1] for x in batch]
x = torch.nn.utils.rnn.pad_sequence(vecs, batch_first=True)
# stack will give us (B, 1), so index [:,0] to get to just (B)
y = torch.stack(labels)[:, 0]
return x, y | 89d4f2c2adb7295457afe7f6a3235bbc4d9c8155 | 13,506 |
from typing import Any
import random
def getKeyByWeights(array: dict, item: int = 0) -> Any:
"""
Returns a weighted random key from array
:param array: dict[Any: int | float]
:param item: int
:return:
- key - Any
"""
return random.choices(list(array.keys()), weights=list(array.values()))[item] | 8f1d474c97466407ff643abce1ea0b12b3ebd951 | 13,512 |
def solucion_c(imc: float) -> str:
"""Devuelve una categoría de acuerdo al imc
:param imc: Índice de masa corporal
:imc type: float
:return: Categoría
:rtype: str
"""
categorias = {
imc < 16 : "criterio de ingreso hospitalario",
16 <= imc < 17 : "infrapeso",
17 <= imc < 18 : "bajo peso",
18 <= imc < 25 : "saludable",
25 <= imc < 30 : "sobrepeso",
30 <= imc < 35 : "sobrepeso crónico",
35 <= imc < 40 : "sobrepeso premórbida",
40 <= imc : "obesidad mórbida",
}
return categorias[True] | ca05b9b7af9df3237e8aa1170638e7e695e6bcad | 13,515 |
def splitter(model):
"""
Splits model parameters into multiple groups to allow fine-tuning
"""
params = list(model.parameters())
return [
# weights and biases of the first linear layer
params[:2],
# weights and biases of the second linear layer
params[2:],
] | 2ac16b536bd50884c50ade3fb67f6ca43a16799d | 13,517 |
def parse_id(text):
""" Parse and return the <job>:<id> string.
Args:
text String to parse
Returns:
This node's job name, this node's id
"""
sep = text.rfind(":")
if sep < -1:
raise ValueError("Invalid ID format")
nid = int(text[sep + 1:])
if nid < 0:
raise ValueError("Expected non-negative node ID")
return text[:sep], nid | 23dbc417abf7fb799da78e63acf18b2d0ffc450f | 13,526 |
def substr_match(a, b):
"""
Verify substring matches of two strings.
"""
if (a is None) or (b is None):
return False
else:
return a in b | 9b98d14b6ec5f2ab433eea92d377b5e1477fef64 | 13,535 |
def replace_negative(l, default_value=0):
"""
Replaces all negative values with default_value
:param l: Original list
:param default_value: The value to replace negatives values with. Default is 0.
:return: Number of values replaced
"""
n_replaced = 0
for i in range(len(l)):
if l[i] < 0:
l[i] = default_value
n_replaced += 1
return n_replaced | 431781a48a36a00329537b92b589cf223b945ca4 | 13,536 |
def getClassAttendance(moduleCode):
"""
Will take in the module code and use the code to open the file of the specified module and read it to created
the list of names, presents, absents and excuses. To be return for future use
:param moduleCode:
:return: (list)
"""
classData = open(f"{moduleCode}.txt", 'r')
studentNameList = []
presentList = []
absentList = []
excuseList = []
while True:
line = classData.readline().strip()
if line == "":
break
lineData = line.split(',')
studentNameList.append(lineData[0])
presentList.append(int(lineData[1]))
absentList.append(int(lineData[2]))
excuseList.append(int(lineData[3]))
classData.close()
# print(presentList)
# print(absentList)
# print(excuseList)
return studentNameList, presentList, absentList, excuseList | a618b90bd6be84fbe23c28ab8d71a57c108b76dc | 13,540 |
import yaml
def get_config(config):
"""Loads a yaml configuration file."""
with open(config, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader) | 58937acf5984b08193c78877fbb94c07c6d779df | 13,542 |
def is_rgb(image):
"""
Return True if image is RGB (ie 3 channels) for pixels in WxH grid
"""
return len(image.shape) == 3 and image.shape[-1] == 3 | f0452fc5f9b6eb69917f8b5de76329eb2e4f03b2 | 13,543 |
def _attr_list_to_dict(attr_list):
"""
_attr_list_to_dict -- parse a string like: host:ami, ..., host:ami into a
dictionary of the form:
{
host: ami
host: ami
}
if the string is in the form "ami" then parse to format
{
default: ami
}
raises ValueError if list can't be parsed
"""
attr_dict = {}
for host_attr in attr_list.split(","):
host_attr = host_attr.split(":")
if len(host_attr) == 1:
attr_dict["default"] = host_attr[0].strip()
elif len(host_attr) == 2:
attr_dict[host_attr[0].strip()] = host_attr[1].strip()
else:
raise ValueError("Can't split '%s' into suitable host"
" attribute pair" % host_attr)
return attr_dict | 378b1a55d908750eea2b457e35abce0a17364a41 | 13,547 |
def humansize(nbytes):
"""
Translates a size in bytes to a human readable size format
:param int nbytes: integer of size of torrent file
:return: size in bytes in a human readable format
:rtype: str
"""
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '{} {}'.format(f, suffixes[i]) | 98390c0f5c471b501caf2fa2bba1e593c8a17eb6 | 13,549 |
def pad_sents(sents, padding_token_index):
"""
Pad the sents(in word index form) into same length so they can form a matrix
# 15447
>>> sents = [[1,2,3], [1,2], [1,2,3,4,5]]
>>> pad_sents(sents, padding_token_index = -1)
[[1, 2, 3, -1, -1], [1, 2, -1, -1, -1], [1, 2, 3, 4, 5]]
"""
max_len_sent = max(sents,
key = lambda sent: len(sent))
max_len = len(max_len_sent)
get_padding = lambda sent: [padding_token_index] * (max_len - len(sent))
padded_sents = [(sent + get_padding(sent))
for sent in sents]
return padded_sents | 0063d8716f7081644e4353de662d58f0dc04e8fe | 13,550 |
def clamp(minimum, n, maximum):
"""Return the nearest value to n, that's within minimum to maximum (incl)
"""
return max(minimum, min(n, maximum)) | a3db191a733041196b8a3e0cfc83731e839a14aa | 13,552 |
def get_vehicle_mass(vehicle_info):
"""
Get the mass of a carla vehicle (defaults to 1500kg)
:param vehicle_info: the vehicle info
:type vehicle_info: carla_ros_bridge.CarlaEgoVehicleInfo
:return: mass of a carla vehicle [kg]
:rtype: float64
"""
mass = 1500.0
if vehicle_info.mass:
mass = vehicle_info.mass
return mass | 216f109e9f963ba6de92cf168055b1a7e516d777 | 13,555 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.