content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def sumevenfib(N):
"""
N is a positive number
This function will add up all even fibonacci numbers that do not exceed N
We use the convention that F_0 = 0, F_1 = 1,...,F_3 = 2
Note that F_3k is the subsequence of even Fibonacci numbers
"""
F1 = 1
F0 = 0
S = 0
while F0 <= N:
S += F0
F3 = 2*F1 + F0
F1 = F3 + F1 + F0
F0 = F3
return S | 7b1949fa72e4595265bad5d5b2354f25c8a83bbc | 22,622 |
import collections
def _append_cell_contents(notebook):
"""Appends prior cell contents to a later cell dependent on labels
This function will iterate through a notebook and grab all cells that have a
label and add them to any cell that references that label (i.e., has the label
in its ref_labels list). Each cell's content will be displayed according to
the order of its appearance in the notebook.
"""
Cell = collections.namedtuple('Cell', ['label', 'contents'])
cells = []
for cell in notebook['cells']:
label = cell.get('metadata', {}).get('label', None)
ref_labels = cell.get('metadata', {}).get('ref_labels', [])
if label is not None:
cells.append(Cell(label, cell['source']))
elif ref_labels:
cell['source'] = '\n\n'.join(cell.contents for cell in cells if cell.label in ref_labels).strip()
return notebook | fc9d15d9c351e55a36201aae889e08b044c0ca9b | 22,624 |
def change_op(instructions, i, new_op):
"""
Return a copy of the `instructions` where the operation at index `i` is changed to `new_op`
"""
# Store the value of the argument at index `i` in the instructions
_, arg = instructions[i]
# Make a copy of the instructions list
modified = instructions.copy()
# Update the instruction at index `i` with the new operation whilst keeping the argument unchanged
modified[i] = (new_op, arg)
return modified | 3a2d6f690a341b2941a9feb20e8a060bb04cec04 | 22,626 |
def _intersection(A,B):
"""
A simple function to find an intersection between two arrays.
@type A: List
@param A: First List
@type B: List
@param B: Second List
@rtype: List
@return: List of Intersections
"""
intersection = []
for i in A:
if i in B:
intersection.append(i)
return intersection | ad0be8b29900d7238df93309f5b9ad143e60ba0f | 22,629 |
def compound_score(text, sia):
"""
This function computes VADER's compound score
of some text
Arguments:
text : (string)
sia: nltk.SentimentIntensityAnalyzer() object
Returns:
float between -1 and 1
"""
return sia.polarity_scores(text)['compound'] | 5cdf5b2f5cc87ef28c80dbea1f53eafda6286acf | 22,635 |
def recursively_split_version_string(input_version: str, output_version: list = []):
"""
Splits a version/tag string into a list with integers and strings
i.e. "8.0.0.RC10" --> [8, '.', 0, '.', 0, '.RC', 10]
Input:
input_version (str): a version or tag i.e. "8.0.0.RC10"
output_version (list): an empty list, which will be filled iteratively
Returns:
list: the version/tag string in a list with integers and strings i.e. [8, '.', 0, '.', 0, '.RC', 10]
"""
if type(input_version) != str:
raise TypeError(
"The provided version should be a str data type but is of type {}.".format(
type(input_version)
)
)
# when the part to split is only digits or no digits at all, the process is finished
if (
input_version.isdigit()
or any(char.isdigit() for char in input_version) == False
):
version = output_version + [input_version]
return [int(segment) if segment.isdigit() else segment for segment in version]
# otherwise check until what position it is a digit (since we want to keep i.e. a multiple digits number as one integer)
pos = 0
while (
input_version[pos].isdigit() == input_version[pos + 1].isdigit()
and pos != len(input_version) - 2
): #
pos += 1
return recursively_split_version_string(
input_version[pos + 1 :], output_version + [input_version[: pos + 1]]
) | b5e8be1d88d5113591e8199bbb39c89f803a20ea | 22,640 |
import types
import importlib
def import_pickle() -> types.ModuleType:
""" Returns cPickle module if available, returns imported pickle module otherwise """
try:
pickle = importlib.import_module('cPickle')
except ImportError:
pickle = importlib.import_module('pickle')
return pickle | 5d876a41f875e8b57c526a271c94b92a36d991ca | 22,643 |
def clean_euler_path(eulerian_path: list) -> list:
"""Cleans a Eulerian path so that each edge (not directed) appears only once in the list. If a edge appears more than once, only the first occurrence is kept.
Arguments:
eulerian_path {list} -- Eulerian path
Returns:
list -- cleaned Eulerian path
""" # noqa
path = []
for edge in eulerian_path:
if edge not in path and edge[::-1] not in path:
path.append(edge)
return path | 207d5d5ef6747b5d537c6c8502dd536638ccee9d | 22,644 |
def frame_idx(fname):
"""Get frame index from filename: `name0001.asc` returns 1"""
return int(fname[-8:-4]) | af3e6a4c693fa77b7516e5560c20f50d3a5f925a | 22,645 |
import pickle
def load_calib(filename):
""" Loads calibration parameters from '.pkl' file.
Parameters
----------
filename : str
Path to load file, must be '.pkl' extension
Returns
-------
calib_params : dict
Parameters for undistorting images.
"""
# read python dict back from the file
pkl_file = open(filename, 'rb')
try:
calib_params = pickle.load(pkl_file)
except:
raise IOError("File must be '.pkl' extension")
pkl_file.close()
return calib_params | 93700abe123df3ebcd17bddf16a6acd1a42ea1a7 | 22,647 |
import re
def get_molecular_barcode(record,
molecular_barcode_pattern):
"""Return the molecular barcode in the record name.
Parameters
----------
record : screed record
screed record containing the molecular barcode
molecular_barcode_pattern: regex pattern
molecular barcode pattern to detect in the record name
Returns
-------
barcode : str
Return molecular barcode from the name,if it doesn't exit, returns None
"""
found_molecular_barcode = re.findall(molecular_barcode_pattern,
record['name'])
if found_molecular_barcode:
return found_molecular_barcode[0][1] | 1507cf7ad3c39c02b6dfdfdd12c6800155346253 | 22,648 |
def int_to_bytes(x):
"""Changes an unsigned integer into bytes."""
return x.to_bytes((x.bit_length() + 7) // 8, 'big') | 5f441a6a5767d8cd1292e8976a24b0c9c4ca157e | 22,653 |
import uuid
def get_uuid(data):
"""Compute a UUID for data.
:param data: byte array
"""
return str(uuid.uuid3(uuid.NAMESPACE_URL, data)) | f91e6e76c14736c1678bc000b7246ac6b518171f | 22,656 |
def ssqrange(charge, sz, nsingle):
"""
Make a list giving all possible :math:`S^{2}` values for given charge and :math:`S_{z}`.
Parameters
----------
charge : int
Value of the charge.
sz : int
Value of sz.
nsingle : int
Number of single particle states.
Returns
-------
list
List of all possible :math:`S^{2}` values for given charge and :math:`S_{z}`.
"""
szmax = min(charge, nsingle-charge)
return list(range(abs(sz), szmax+1, +2)) | b05451081b0b13dd8f43a14e88eaf591ba6324ec | 22,658 |
def split(iterable, **split_options):
"""Perform a split on iterable.
This method is highly inspired in the `iter` global method (in conjunction
with its __iter__ counterpart method) for iterable classes.
:param iterable: An iterable, which will typically be a Storage<Collection>
:param split_options: The optional additional arguments to the split method.
May be ignored.
:return: A collection of Split, or something similar. If iterable is not a
Storage<Collection>, returns a tuple with a single element, the iterable
argument itself
"""
try:
# Default behaviour is to use the data model `split` method
return iterable.split(**split_options)
except AttributeError:
# Otherwise, simply return a iterable which yields a single item
return iterable, | ac3e8743393d66ab9553db0e4735e2425d97518c | 22,659 |
def scale_vector(vector, scale):
"""Scale a 3D vector's components by scale."""
return vector[0] * scale, vector[1] * scale, vector[2] * scale | 292269d2e54db362b823c547b0415f53d93e3e4c | 22,661 |
def lr_lambda(epoch, base=0.99, exponent=0.05):
"""Multiplier used for learning rate scheduling.
Parameters
----------
epoch: int
base: float
exponent: float
Returns
-------
multiplier: float
"""
return base ** (exponent * epoch) | 24ac54a8e54fa64e44ab941125a72ac4bbb269fd | 22,667 |
import string
def isValidMapKey(key):
"""Returns ``True`` if the given string is a valid key for use as a colour
map or lookup table identifier, ``False`` otherwise. A valid key comprises
lower case letters, numbers, underscores and hyphens.
"""
valid = string.ascii_lowercase + string.digits + '_-'
return all(c in valid for c in key) | 2e9167c3351b6c80bcc12c129279c4048f511e24 | 22,670 |
def factory_class_name(model_class_name):
"""Return factory class name from model class"""
return model_class_name + 'Factory' | acfde8e129fb44f2db108a778b15938efbcc237b | 22,671 |
import pathlib
import json
import re
def check_markers(test_mode=False):
"""Validate markers in PinNames.h files"""
mbed_os_root = pathlib.Path(__file__).absolute().parents[3]
errors = []
with (
mbed_os_root.joinpath("targets", "targets.json")
).open() as targets_json_file:
targets_json = json.load(targets_json_file)
if test_mode:
search_dir = pathlib.Path(__file__).parent.joinpath('test_files').absolute()
else:
search_dir = mbed_os_root.joinpath('targets')
for f in search_dir.rglob("PinNames.h"):
with open(f) as pin_names_file:
pin_names_file_content = pin_names_file.read()
target_list_match = re.search(
"\/* MBED TARGET LIST: ([0-9A-Z_,* \n]+)*\/",
pin_names_file_content
)
marker_target_list = []
if target_list_match:
marker_target_list = list(
re.findall(
r"([0-9A-Z_]{3,})",
target_list_match.group(1),
re.MULTILINE,
)
)
if not marker_target_list:
print("WARNING: MBED TARGET LIST marker invalid or not found in file " + str(f))
errors.append({ "file": str(f), "error": "marker invalid or not found"})
continue
for target in marker_target_list:
target_is_valid = False
if target in targets_json:
target_is_valid = True
if "public" in targets_json[target]:
if targets_json[target]["public"] == False:
target_is_valid = False
if not target_is_valid:
print("WARNING: MBED TARGET LIST in file " + str(f) + " includes target '" + target + "' which doesn't exist in targets.json or is not public")
errors.append({ "file": str(f), "error": "target not found"})
return errors | 27f303542d83f99c75df5d1804ec103f1377959d | 22,672 |
def _group_counts_to_group_sizes(params, group_counts):
"""Convert numbers of groups to sizes of groups."""
_, out_channels, _ = params
group_sizes = [out_channels // count for count in group_counts]
return group_sizes | fdcf6c86dd4c90507d1cc2f9b5968d5110df264b | 22,678 |
def __read_sequence_ids(data):
"""
Reads SequenceIDs.txt (file included in OrthoFinder Output) and parses it to a dict
:param data: list of lines in SequenceIDs.txt
:return: dict with key: OrthoFinder ID en value: the proper name
"""
output = {}
for l in data:
if l.strip() != '':
k, v = l.split(': ')
output[k] = v
return output | 7855cf70398e22c45516b822ceff3ec9702ce5e5 | 22,679 |
def snap_value(input, snap_value):
"""
Returns snap value given an input and a base snap value
:param input: float
:param snap_value: float
:return: float
"""
return round((float(input) / snap_value)) * snap_value | 1b2f967ecca2a151c5229cbb9eb57d6c67853925 | 22,680 |
from pathlib import Path
def fixture(name: str):
"""
Construct an absolute path to the fixture directory
"""
return str(Path(Path(__file__).parent.absolute(), 'fixtures', name)) | 0055b7076615893531977afaba49e421c3440224 | 22,682 |
from datetime import datetime
def parseDate(timestamp):
"""Parses an EXIF date."""
return datetime.strptime(timestamp, '%Y:%m:%d %H:%M:%S') | d59d2c0b1c1370035a93ed8e4a93834db6915646 | 22,685 |
import torch
def one_hot_vector(length, index, device=torch.device('cpu')):
"""
Create a one-hot-vector of a specific length with a 1 in the given index.
Args:
length: Total length of the vector.
index: Index of the 1 in the vector.
device: Torch device (GPU or CPU) to load the vector to.
Returns: Torch vector of size [1 x length] filled with zeros, and a 1 only at index, loaded to device.
"""
vector = torch.zeros([1, length]).to(device)
vector[0, index] = 1.
return vector | 88dca8d63792b5a8eb58abbf12eb2800e28b3264 | 22,686 |
def get_minutes(seconds):
"""Convert seconds to minutes."""
return seconds // 60 | 7ca290c4e581a786af32f333d1c1f771de6b91c9 | 22,691 |
import math
def sumlog(v1, v2):
"""Returns the sum of two logspaced values in logspace."""
if v1 < v2: v1, v2 = v2, v1
return math.log(1 + math.exp(v2 - v1)) + v1 | 3942f5a05da47065161c29164d57b0f22bda478a | 22,695 |
import requests
def _get_bin_count(fpath, delimiter=',', encoding='ISO-8859-1'):
"""
Gets the number of bins in the file.
:param fpath: A path or url for the file (if a url it must
include `http`, and if a file path it must not contain
`http`).
:type fpath: string
:param delimiter: The delimiter between items in the file,
defaults to ','.
:type delimiter: string
:param encoding: The encoding for the file, defaults to
'ISO-8859-1'.
:type encoding: string
:return: The number of bins in the file.
:rtype: int
"""
bins = 0
if 'http' in fpath:
req = requests.get(fpath)
for line in req.iter_lines():
try:
if float(line.decode(encoding).split(delimiter)[0]):
bins += 1
except: pass
else:
with open(fpath, 'r', encoding=encoding) as f:
for line in f:
try:
if float(line.split(delimiter)[0]):
bins += 1
except: pass
return bins | 5bc02857ba87beb736b2a74baca665924863c68f | 22,701 |
def is_auxiliary_relation(token1, token2):
"""Return True if `token1` is an auxiliary dependent of `token2`."""
return (
(token1.upos == "AUX")
and (token1.deprel in ["aux", "aux:pass"])
and (token2.upos == "VERB")
and (token1.head == token2.id)
) | 1d0e6a50523e8c61e8cb77e68064db1a2750118f | 22,707 |
from datetime import datetime
def convert_string_date_to_datetime(now_str: str) -> datetime:
"""
Converts string of the format yyyy-dd-yy-hh-mm-ss to datetime format.
:param now_str: str. String of the format yyyy-dd-yy-hh-mm-ss.
"""
numbers = [int(string_number) for string_number in now_str.split("-")]
date = datetime(numbers[0], numbers[1], numbers[2], numbers[3], numbers[4], numbers[5])
return date | f1eb59bb618758ffe82fa09db820fb141b5e7781 | 22,708 |
def toCategorical(df):
"""
This function change object datatype in pandas.DataFrame into category datatype.
Parameters
----------
df : pandas.DataFrame with train or test DataFrame.
Returns
-------
df : pandas.DataFrame with new datatypes.
"""
columns=['availability','group','content','unit','pharmForm',
'campaignIndex','salesIndex', 'category', 'manufacturer']
for col in columns:
if col in df.columns:
df[col]=df[col].astype('category')
return df | 18ec1010be4404d340d2a3f55353c11fae64b844 | 22,709 |
def seperate_list(all_list, pred):
"""Given a predicate, seperate a list into a true and false list.
Arguments:
all_list {list} -- all items
pred {function} -- predicate function
"""
true_list = []
false_list = []
for item in all_list:
if pred(item):
true_list.append(item)
else:
false_list.append(item)
return true_list, false_list | 9ef7f2f3a16eb83478c75f7c8bb8ec95af5e5eba | 22,710 |
def count_parameters(model):
"""
Compute the number of trainable parameters of the model.
:param model: type nn.Module
:return: number of parameters, type int
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad) | 2d57e514e5480538d5a9586229af15b7a32b3791 | 22,711 |
from typing import Any
from typing import Dict
from typing import OrderedDict
def convert_dict_to_ordered_dict(qconfig_dict: Any) -> Dict[str, Dict[Any, Any]]:
""" Convert dict in qconfig_dict to ordered dict
"""
# convert a qconfig list for a type to OrderedDict
def _convert_to_ordered_dict(key, qconfig_dict):
qconfig_dict[key] = OrderedDict(qconfig_dict.get(key, []))
_convert_to_ordered_dict('object_type', qconfig_dict)
_convert_to_ordered_dict('module_name_regex', qconfig_dict)
_convert_to_ordered_dict('module_name', qconfig_dict)
return qconfig_dict | bf5a8543a2b306d79824902a7b60200ee1994a4c | 22,712 |
def checkEmblFile(filin):
"""Check EMBL annotation file given by user"""
line = filin.readline()
# TEST 'ID'
if line[0:2] != 'ID':
return 1
else:
return 0 | 8087a78a35193545070f76dbd969b617c7c92b0c | 22,717 |
import math
def calc_distance(asteroid1, asteroid2):
"""Calculates the distance between two asteroids"""
x1, y1 = asteroid1
x2, y2 = asteroid2
dx = x2 - x1
dy = y2 - y1
return math.sqrt(dx * dx + dy * dy) | 1c424a94c7638c0a913675c42f4e4226f0c7c97b | 22,719 |
def gzipOA(directory=None, cmdverbose : int =0, add_check : bool = True, unzip_text : bool = False):
""" Create script to compress all oa files in a directory with gzip """
if directory is not None:
cmd = 'cd %s\n' % directory
else:
cmd = ''
if cmdverbose>=2:
cmd += 'echo "Running script generated by gzipOA"\n'
if unzip_text:
cmd += '# check for accidentially zipped text files\n'
cmd += 'gzoafiles=$(ls *.oa.gz 2> /dev/null)\n'
cmd += 'for f in $gzoafiles\ndo\n'
cmd += 'format=$(oainfo -v 0 -f $f)\n'
if cmdverbose>=3:
cmd += 'echo "unzip? $f, format is $format"\n'
cmd += 'if [ "$format" == 5 ]; then\n'
if cmdverbose:
cmd += 'echo "unzip $f, format is $format"\n'
cmd += ' gzip -d -q -f $f; \n'
cmd += 'fi\n'
cmd += 'done\n'
cmd += '\n'
cmd += 'oafiles=$(ls *.oa 2> /dev/null)\n'
cmd += 'noafiles=$(ls *.oa 2> /dev/null | wc -l)\n'
cmd += '# echo "oafiles $oafiles\n'
cmd += 'if [ "$noafiles" != "0" ]; then\n'
if cmdverbose:
cmd += 'echo "doing zip of $noafiles file(s)..."\n'
if add_check:
# bash only...
cmd += 'for f in $oafiles\ndo\n'
cmd += 'format=$(oainfo -v 0 -f $f)\n'
cmd += 'if [ "$format" != 0 ]; then\n'
if cmdverbose:
cmd += 'echo "zip $f, format is $format"\n'
cmd += ' gzip -q -f $f; \n'
cmd += 'fi\n'
cmd += 'done\n'
else:
cmd += ' gzip -q -f *.oa; \n'
cmd += 'fi\n'
return cmd | 6f8dfa12369a654c7bc8dd24a3ac7f1faa650d71 | 22,721 |
def get_record_count(hook, database, table):
""" gets the row count for a specific table """
query = '''SELECT t.row_count as record_count
FROM "{}".information_schema.tables t
WHERE t.table_name = '{}';'''.format(database, table)
return hook.get_records(query).pop()[0] | 24d5ae81093bd4ae411d13571ee9ee0d8e36da4e | 22,724 |
import fnmatch
def fpath_has_ext(fname, exts, case_sensitive=False):
"""returns true if the filename has any of the given extensions"""
fname_ = fname.lower() if not case_sensitive else fname
if case_sensitive:
ext_pats = ['*' + ext for ext in exts]
else:
ext_pats = ['*' + ext.lower() for ext in exts]
return any([fnmatch.fnmatch(fname_, pat) for pat in ext_pats]) | 6283429081ceed9a825d7d1541a660e1d12f9a2d | 22,725 |
def area_triangle(base: float, height: float) -> float:
"""
Calculate the area of a triangle given the base and height.
>>> area_triangle(10, 10)
50.0
>>> area_triangle(-1, -2)
Traceback (most recent call last):
...
ValueError: area_triangle() only accepts non-negative values
>>> area_triangle(1, -2)
Traceback (most recent call last):
...
ValueError: area_triangle() only accepts non-negative values
>>> area_triangle(-1, 2)
Traceback (most recent call last):
...
ValueError: area_triangle() only accepts non-negative values
"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values")
return (base * height) / 2 | 33520e4457a027157886ab14f1822b1675b2fa92 | 22,726 |
import math
def get_3d_pos_from_x_orientation(x_orientation, norm=1):
"""
Get a 3d position x, y, z for a specific x orientation in degrees
Args:
- (float) orientation around x axis
- (float) norm to rescale output vector
Return:
- (float) x position [0; 1] * norm
- (float) y position [0; 1] * norm
- (float) z position [0; 1] * norm
"""
x_orientation_rad = math.radians(x_orientation)
x_pos = 0
y_pos = -math.sin(x_orientation_rad)
z_pos = math.cos(x_orientation_rad)
return x_pos*norm, y_pos*norm, z_pos*norm | 9cbecf86db3d3de5e203bde3381d6c3f05115577 | 22,728 |
import math
def invertedCantorParingFunction(z):
"""
@see http://en.wikipedia.org/wiki/Pairing_function
>>> invertedCantorParingFunction(0)
(0, 0)
>>> invertedCantorParingFunction(1)
(1, 0)
>>> invertedCantorParingFunction(2)
(0, 1)
>>> invertedCantorParingFunction(3)
(2, 0)
>>> invertedCantorParingFunction(4)
(1, 1)
"""
w = int(math.floor((math.sqrt(8*z + 1) - 1) / 2))
t = (w**2 + w) / 2
y = z - t
x = w - y
return (x,y) | 963b663949485cdbf3f1b738d49d684ce18cd28b | 22,731 |
def extract_data(source:str):
"""Read list of newline delineated ints from text file."""
with open(source, "r") as f:
data = f.readlines()
data = [int(el) for el in data if el != "/n"]
return data | a3ea4301699bad013fa479503ac9fc460781a34e | 22,739 |
def yes_no_dialog(prompt: str) -> bool:
"""Return true/false based on prompt string and Y/y or N/n response."""
while True:
print()
print(prompt)
print()
s = input("Y/n to confirm, N/n to reject >")
if s in ("Y", "y"):
return True
if s in ("N", "n"):
return False | d0b8de6173621c38b8f2072ecaf2f6536a188cd1 | 22,740 |
def getHuffmanCoding( tree, prefix="", code = {} ):
"""
Projde rekurzivně předaný strom a ohodnotí jednotlivé znaky dle
pravidel Huffmanova kódování.
To znamená, že hrana jdoucí od svého rodiče nalevo, má
ohodnocení 0 a hrana napravo ohodnocení 1.
Ohodnocení samotného vrcholu je tvořen posloupností kódů
Parametry:
----------
tree: dictionary
Slovník reprezentující strom, který se má projít
prefix: string
Řetězec obsahující kód cesty k tomuto node
code: dictionary
Slovník obsahujcí páry písmen/číslic (klíč) a jejich kódu.
Vrací:
------
dictionary
Slovník obsahující páry písmen/čísli (klíč) a jejich kódu.
"""
# Pokud je potomek nalevo jen "rodičovský node"
if tree["left"]["name"] == "":
getHuffmanCoding( tree["left"], prefix+"0", code)
else:
# Node je znak/číslo, rovnou vytvoří jeho kód
code[ tree["left"]["name"] ] = prefix+"0"
# Pokud je potomek napravo jen "rodičovský node"
if tree["right"]["name"] == "":
getHuffmanCoding( tree["right"], prefix+"1", code )
else:
# Node je znak/čéslo, rovnou vytvoří kód
code[ tree["right"]["name"] ] = prefix+"1"
return (code) | 691d5c2545a250a7036a878cc90c47c16be22f66 | 22,746 |
from typing import Dict
from typing import OrderedDict
def _scores_to_ranks(
*, scores: Dict, reverse: bool = False
) -> Dict[str, float]:
"""
Go from a score (a scalar) to a rank (integer). If two scalars are the
same then they will have the same rank.
Takes a dictionary where the keys are the pk of the results and the values
are the scores.
Outputs a dictionary where they keys are the pk of the results and the
values are the ranks.
"""
scores = OrderedDict(
sorted(scores.items(), key=lambda t: t[1], reverse=reverse)
)
ranks = {}
current_score = current_rank = None
for idx, (pk, score) in enumerate(scores.items()):
if score != current_score:
current_score = score
current_rank = idx + 1
ranks[pk] = current_rank
return ranks | ee86ea631167e043c1c4dabee5d3c316a46d6a68 | 22,748 |
def get_resource(requests, reference):
"""
Fetches a resource.
``reference`` must be a relative reference. e.g. "Patient/12345"
"""
response = requests.get(endpoint=reference, raise_for_status=True)
return response.json() | 107fdde1ee4db029b2c279e3548f6969e2e6c16a | 22,753 |
def printTime(t):
"""
Takes time in seconds, and print in the output in human friendly format (DD:hh:mm:ss)
"""
t = round(t)
if t < 60:
return ("%d seconds" %t), (0, 0, 0, t)
else:
m = int(t/60)
s = t%60
if (m < 60) & (m > 1):
return ("%d minutes, %d seconds" %(m, s)), (0, 0, m ,s)
elif m == 1:
return ("%d minute, %d seconds" %(m, s)), (0, 0, m ,s)
else:
h = int(m/60)
m = m%60
if (h < 24) & (h > 1):
return ("%d hours, %d minutes, %d seconds" %(h, m, s)), (0, h, m ,s)
elif h == 1:
return ("%d hour, %d minutes, %d seconds" %(h, m, s)), (0, h, m ,s)
else:
d = int(h/24)
h = h%24
if d > 1:
return ("%d days, %d hours, %d minutes, %d seconds" %(d, h, m, s)), (d, h, m ,s)
else:
return ("%d day, %d hour, %d minutes, %d seconds" %(d, h, m, s)), (d, h, m ,s) | 7cb8c5bf225b66b84bd97e9e1ffefc7f905339e8 | 22,754 |
import re
def sanitizeTitle(title):
"""Sanitizes the passed title string to allow the file to be saved on windows without introducing illegal characters"""
matches = re.findall(r"[^/\\:*?\"<>|]+", title)
fullstring = ""
for match in matches:
fullstring += match
return fullstring | 26fe9ee3ca5d03eae75a7f9f621c135e318cfeef | 22,758 |
def get_alias(dataset):
"""Get alias for dataset.
Parameters
----------
dataset : dict
Dataset metadata.
Returns
-------
str
Alias.
"""
alias = f"{dataset['project']} dataset {dataset['dataset']}"
additional_info = []
for key in ('mip', 'exp', 'ensemble'):
if key in dataset:
additional_info.append(dataset[key])
if additional_info:
alias += f" ({', '.join(additional_info)})"
if 'start_year' in dataset and 'end_year' in dataset:
alias += f" from {dataset['start_year']:d} to {dataset['end_year']:d}"
return alias | 36d3322caca81a77301c0b28993e73a186f5bb8a | 22,760 |
import bz2
def compress(data):
"""
Helper function to compress data (using bz2)
"""
c = bz2.BZ2Compressor()
a = c.compress(data)
b = c.flush()
return a+b | 5d6bbcb357f71f69d80a25803279064c2458333c | 22,762 |
import six
def lowercase_value(value):
"""
Lowercase the provided value.
In case of a list, all the string item values are lowercases and in case of a dictionary, all
of the string keys and values are lowercased.
"""
if isinstance(value, six.string_types):
result = value.lower()
elif isinstance(value, (list, tuple)):
result = [str(item).lower() for item in value]
elif isinstance(value, dict):
result = {}
for key, value in six.iteritems(value):
result[key.lower()] = str(value).lower()
else:
result = value
return result | 693cbf5477abf0a54dca0e4e6914f6c7bf2d9b46 | 22,763 |
def are_checksums_equal(checksum_a_pyxb, checksum_b_pyxb):
"""Determine if checksums are equal.
Args:
checksum_a_pyxb, checksum_b_pyxb: PyXB Checksum objects to compare.
Returns: bool
- ``True``: The checksums contain the same hexadecimal values calculated with
the same algorithm. Identical checksums guarantee (for all practical
purposes) that the checksums were calculated from the same sequence of bytes.
- ``False``: The checksums were calculated with the same algorithm but the
hexadecimal values are different.
Raises:
ValueError
The checksums were calculated with different algorithms, hence cannot be
compared.
"""
if checksum_a_pyxb.algorithm != checksum_b_pyxb.algorithm:
raise ValueError(
"Cannot compare checksums calculated with different algorithms. "
'a="{}" b="{}"'.format(checksum_a_pyxb.algorithm, checksum_b_pyxb.algorithm)
)
return checksum_a_pyxb.value().lower() == checksum_b_pyxb.value().lower() | 533151d79a4f5fabc62d52f9e021b7daf6bd8d71 | 22,768 |
def unify_str_list(strlist):
"""
Remove duplicates and sort list of strings
:param strlist: list of strings
:return: sorted unique list
"""
return sorted(set(strlist)) | adb3a3752654bca6a9db7aa0206c41128697a8fc | 22,770 |
def get_header(cursor):
"""
Given a cursor object, returns the appropriate header (column names)
"""
header = []
for i in cursor.description:
s = i[0]
if "." in s:
s = s[s.find(".")+1:]
header.append(s)
return header | fa8d510fe769be4e34064e51744fdccdf3d33339 | 22,772 |
def get_other_player(player):
"""
Get the other player.
"""
return "X" if player == "O" else "O" | d3626922d2097f115511e067ded461a69a6aded8 | 22,779 |
def sep_nondummies(data):
"""
Finds the features that are:
1. have nominal values
2. have more than 2 distinct values so it needs to be dummified
Args:
data: DataFrame containing the dataset
Returns:
nominal: Array-like structure that contains the nominal features
continuous: Array-like structure that contains the names of all the continuous features
"""
nominal = []
continuous=[]
for col in data.columns:
distinct = data[col].dropna().nunique()
if distinct > 10:
continuous.append(col)
elif distinct > 2:
nominal.append(col)
return [nominal, continuous] | f666a12c009dc44eb11335fbe2d3e398bd5415c8 | 22,780 |
def PortToTag(switch, port):
"""Returns the tag for a port."""
return 'switches.%s.%d' % (switch, port) | 9c5fc26f021d20b2c14838274efada3ea2a513b9 | 22,781 |
def _compose_attribute_file(attributes):
"""Make the contents of an osg attributes file"""
def islist(var):
return isinstance(var, list)
variable_string = ""
export_string = ""
# keep a list of array variables
array_vars = {}
keys = sorted(attributes.keys())
for key in keys:
value = attributes[key]
if value is None:
variable_string += "# " + key + " is undefined\n"
continue
# Special case for SOFTWARE-1567 (let user explicitly unset OSG_APP)
if key == 'OSG_APP' and (value == 'UNSET' or (islist(value) and 'UNSET' in value)):
variable_string += 'unset OSG_APP\n'
elif islist(value):
for item in value:
variable_string += '%s="%s"\n' % (key, item)
else:
variable_string += '%s="%s"\n' % (key, value)
if len(key.split('[')) > 1:
real_key = key.split('[')[0]
if real_key not in array_vars:
export_string += "export %s\n" % key.split('[')[0]
array_vars[real_key] = ""
else:
# 'OSG_APP' is a special case for SOFTWARE-1567
if value is not None and not (key == 'OSG_APP' and value == 'UNSET'):
export_string += "export %s\n" % key
file_contents = """\
#!/bin/sh
#---------- This file automatically generated by osg-configure
#---------- This is periodically overwritten. DO NOT HAND EDIT
#---------- Instead, write any environment variable customizations into
#---------- the config.ini [Local Settings] section, as documented here:
#---------- https://opensciencegrid.github.io/docs/other/configuration-with-osg-configure/#local-settings
#--- variables -----
%s
#--- export variables -----
%s
""" % (variable_string, export_string)
return file_contents | 4528094683a29f0bda81599667cb85a97a7d21bc | 22,784 |
from typing import Optional
from typing import Dict
from typing import Any
import json
def input_dictionary_to_parameter(input_dict: Optional[Dict[str, Any]]) -> str:
"""Convert json input dict to encoded parameter string.
This function is required due to the limitation on YAML component definition
that YAML definition does not have a keyword for apply quote escape, so the
JSON argument's quote must be manually escaped using this function.
Args:
input_dict: The input json dictionary.
Returns:
The encoded string used for parameter.
"""
if not input_dict:
return ''
out = json.dumps(json.dumps(input_dict))
return out[1:-1] # remove the outside quotes, e.g., "foo" -> foo | e9470b5050e5260f38b5423d25fb47d197fdbe56 | 22,786 |
def valid_int(param, allow_zero=False, allow_negative=False):
"""Validate that param is an integer, raise an exception if not"""
pt = param
try: # a very permissive integer typecheck
pt += 1
except TypeError:
raise TypeError(
"Expected integer but found argument of type '{}'".format(type(param)))
if not allow_negative and param < 0:
raise ValueError("Expected nonnegative number but got '{}'".format(param))
if not allow_zero and param == 0:
raise ValueError("Expected nonzero number but got '{}'".format(param))
return param | e18f6ea4954ad1828ce942239d27219670b7e344 | 22,787 |
def version_to_string(version, parts=3):
""" Convert an n-part version number encoded as a hexadecimal value to a
string. version is the version number. Returns the string.
"""
part_list = [str((version >> 16) & 0xff)]
if parts > 1:
part_list.append(str((version >> 8) & 0xff))
if parts > 2:
part_list.append(str(version & 0xff))
return '.'.join(part_list) | c766a2efdede43149d2592fbf3cbea1f6f279b4a | 22,788 |
from pathlib import Path
from typing import Tuple
def create_alignment_job_directory_structure(output_directory: Path) -> Tuple[Path, Path, Path]:
"""Create directory structure for a tilt-series alignment job."""
stacks_directory = output_directory / 'stacks'
stacks_directory.mkdir(parents=True, exist_ok=True)
external_directory = output_directory / 'external'
external_directory.mkdir(parents=True, exist_ok=True)
metadata_directory = output_directory / 'tilt_series'
metadata_directory.mkdir(parents=True, exist_ok=True)
return stacks_directory, external_directory, metadata_directory | 4b5f4b2ad1854dda0b269289942b6baa42e4ade1 | 22,791 |
import struct
import pickle
def indexed_pickler_load_keys(path):
"""
Return the list of keys in the indexed pickle
"""
with open(path, "rb") as f:
index_len = struct.unpack("Q", f.read(8))[0]
return pickle.loads(f.read(index_len)).keys() | e813568941929ba444aa5aa7c94578129c68a166 | 22,794 |
import re
def convert_to_filename(sample_name):
"""
Convert to a valid filename.
Removes leading/trailing whitespace, converts internal spaces to underscores.
Allows only alphanumeric, dashes, underscores, unicode.
"""
return re.sub(r"(?u)[^-\w]", "", sample_name.strip().replace(" ", "_")) | 71fe3f2dee9f633087358f43a930820bc49d0460 | 22,796 |
import math
def get_rgba_from_triplet(incolour: list, alpha=1, as_string=False):
"""
Convert the input colour triplet (list) to a Plotly rgba(r,g,b,a) string if
`as_string` is True. If `False` it will return the list of 3 integer RGB
values.
E.g. [0.9677975592919913, 0.44127456009157356, 0.5358103155058701] -> 'rgba(246,112,136,1)'
"""
assert (
3 <= len(incolour) <= 4
), "`incolour` must be a list of 3 or 4 values; ignores 4th entry"
colours = [max(0, int(math.floor(c * 255))) for c in list(incolour)[0:3]]
if as_string:
return f"rgba({colours[0]},{colours[1]},{colours[2]},{float(alpha)})"
else:
return colours | 958b8df4ca428f583a0c94691afb86fdb18c91a4 | 22,797 |
def build_factorized(factorized: dict) -> int:
"""
Build integer from its factorized form, (reverse function of factorization)
:param factorized: factorized integer dict
:return: integer
"""
num = 1
for factor in factorized:
num *= factor ** factorized[factor]
return int(num) | 73db0c09db2c7644f4f95c76dbe21b64f54d7641 | 22,798 |
import re
def check_hgt(value):
"""
hgt (Height) - a number followed by either cm or in:
- If cm, the number must be at least 150 and at most 193.
- If in, the number must be at least 59 and at most 76.
"""
pattern = r"^([0-9]{2,3})(cm|in)$"
match = re.match(pattern, value)
if match:
num, unit = match.groups()
if unit == "cm" and 150 <= int(num) <= 193:
return True
if unit == "in" and 59 <= int(num) <= 76:
return True
return False | 0832fc4bb1039f997618abaf4c67cc15fe5678b9 | 22,799 |
from typing import List
def get_available_executors() -> List[str]:
"""
Get all the available executors that can be provided to `run_notebook` and `run_notebooks`.
"""
return [
'simple_executor',
'pandas_miner',
'plotly_miner',
'mpl_seaborn_viz_miner',
] | 6370c7c0df2792a6d30affc4594b1f1bbe7d657b | 22,803 |
import re
def convert_numerical(s):
""" try to convert a string tu numerical
:param str s: input string
:return:
>>> convert_numerical('-1')
-1
>>> convert_numerical('-2.0')
-2.0
>>> convert_numerical('.1')
0.1
>>> convert_numerical('-0.')
-0.0
>>> convert_numerical('abc58')
'abc58'
"""
re_int = re.compile(r"^[-]?\d+$")
re_float1 = re.compile(r"^[-]?\d+.\d*$")
re_float2 = re.compile(r"^[-]?\d*.\d+$")
if re_int.match(str(s)) is not None:
return int(s)
elif re_float1.match(str(s)) is not None:
return float(s)
elif re_float2.match(str(s)) is not None:
return float(s)
else:
return s | 1c97a7f2d3ebc72277736cfe9f1e979d76374232 | 22,804 |
def truncate_string(string, truncation, message = ''):
"""
Truncate a string to a given length. Optionally add a message at the end
explaining the truncation
:param string: A string
:param truncation: An int
:param message: A message, e.g. '...<truncated>'
:return: A new string no longer than truncation
"""
if truncation is None:
return string
assert isinstance(truncation, int)
if len(string)>truncation:
return string[:truncation-len(message)]+message
else:
return string | 45cce7bf6dec02c0a6fac2cf22da8f217a717948 | 22,807 |
def _(x):
"""Identity function for string extraction."""
return x | ba44c55c38d0957374f759f84d17344fd7b53c7b | 22,809 |
def take(l,n):
""" Return the first n elements from iterable l."""
itr = iter(l)
results = []
for i in range(n):
results.append(next(itr))
return results | d487d58350db09f3c9b135042e1ddf8a4e79fb09 | 22,813 |
from typing import Union
def cpf_format(cpf: Union[int, str, float]) -> str:
"""
Format CPF.
Args:
cpf (Union[int, str, float]): CPF
Returns:
str: Formated CPF "***.***.***-**"
"""
try:
if type(cpf) == float:
cpf = int(cpf)
cpf_cleaned = int(''.join(filter(str.isdigit, str(cpf))))
cpf_cleaned = str(cpf_cleaned).zfill(11)
return (f'{cpf_cleaned[:3]}.{cpf_cleaned[3:6]}.{cpf_cleaned[6:9]}-{cpf_cleaned[9:]}')
except ValueError:
return '' | 596de669ca3305976ec9f45a5a3023ceb406c937 | 22,816 |
def contains(value, arg):
""" Checks insensitive if a substring is found inside a string
Args:
value (str): The string
arg (str): The substring
Returns:
bool: True if string contains substring, False otherwise
"""
value = value.upper()
arg = arg.upper()
if arg in value:
return True
return False | 7da05cf849c3e0bce88d039852175eee6b35c221 | 22,817 |
import click
def common_cv_options(f):
"""CV-specific CLI options."""
f = click.option('--folds', type=int, default=5, help='Number of folds in CV.')(f)
f = click.option('--replicates', type=int, default=10, help='Number of replicates for CV.')(f)
f = click.option('--threads', type=int, default=1, help='Number of threads to use.')(f)
f = click.option('--phenotype', type=click.Path(exists=True),
required=True, help='Phenotype file path.')(f)
return f | 12a0e348b8da5288026dfb50fe7e5a32c445ceab | 22,818 |
from pathlib import Path
from typing import Dict
from typing import Any
import json
def load_json(path: Path) -> Dict[str, Any]:
"""Load a JSON file to create a dictionary."""
with path.open() as f:
return json.load(f) | c63a9ea869335d57096b1cdf2903db8b84be85d9 | 22,820 |
def _show_consistency(field, message, is_consistent):
"""
If `is_consistent` is false,
make `field` RED with tooltip caption `message` & return 1.
Otherwise reset colour and tooltip caption of `field` to
default values, and return 0.
"""
if is_consistent is True:
field.setStyleSheet("")
field.setToolTip("")
return 0
else:
field.setStyleSheet("QLineEdit { background-color : red;}")
field.setToolTip(message)
return 1 | 9276761a629d85005bc499071b8a6ae00bdf23dc | 22,824 |
import math
def ceil_log2(num):
""" Return integer ceil value of log2(num) """
return int(math.ceil(math.log(int(num), 2) ) ) | 36e0538f332a82377b869e428b130bd2c1045071 | 22,826 |
def prior_transform(self, unit_coords, priors, prior_args=[]):
"""An example of one way to use the `Prior` objects below to go from unit
cube to parameter space, for nested sampling. This takes and returns a
list instead of an array, to accomodate possible vector parameters. Thus
one will need something like ``theta_array=np.concatenate(*theta)``
:param unit_coords:
Coordinates on the unit prior hyper-cube. Iterable.
:param priors:
A list of `Prior` objects, iterable of same length as `unit_coords`.
:param prior_args: (optional)
A list of dictionaries of prior function keyword arguments.
:returns theta:
A list of parameter values corresponding to the given coordinates on
the prior unit hypercube.
"""
theta = []
for i, (u, p) in enumerate(zip(unit_coords, priors)):
func = p.unit_transform
try:
kwargs = prior_args[i]
except(IndexError):
kwargs = {}
theta.append(func(u, **kwargs))
return theta | 83dfe735336043a912e0795de19ced39c00aca40 | 22,827 |
def is_control_gate(op_type: str) -> bool:
"""Get whether a gate type includes a control (q)bit."""
return op_type in {
"CX",
"CY",
"CZ",
"CH",
"CRx",
"CRy",
"CRz",
"CU1",
"CU3",
"CV",
"CVdg",
"CSx",
"CSXdg",
"CSWAP",
"CnRy",
"CnX",
"CCX",
"Control",
"QControlBox",
"Conditional",
} | a26fa5175fbfa48495e72d7f60548bccb093acf5 | 22,834 |
import requests
def get_value(sensor_id):
"""Get value from Luftdaten-API"""
url = f"https://api.luftdaten.info/v1/sensor/{sensor_id}/"
r = requests.get(url, headers={"Host": "api.luftdaten.info"})
if not r.json():
return None
value = r.json()[-1]["sensordatavalues"][0]["value"]
return float(value) | 4029c10af9eed0a5b74ea59b03c0d26e5fca4b62 | 22,840 |
import math
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x) | b94799601e5a99398ee3371729ff4dc087cecbcc | 22,841 |
def service_model(service):
"""Produce the model for a service"""
return {
'name': service.name,
'admin': service.admin,
'url': service.url,
'prefix': service.server.base_url if service.server else '',
'command': service.command,
'pid': service.proc.pid if service.proc else 0,
'info': service.info
} | 2443a073efdc19dd514f71eb22abcf3d7c6f983d | 22,848 |
from pathlib import Path
def scanfolder_glob(folder):
"""
scan folder for mp3 files
:param folder: root folder of mp3 files
:return: list of mp3 files
"""
found_files = Path(folder).glob('**/*.mp3')
return found_files | 184a771b386be2734bbddb528836538bfe936002 | 22,850 |
import re
def _get_base_key(key):
"""Extracts the base key from the provided key.
Earth Engine exports `TFRecords` containing each data variable with its
corresponding variable name. In the case of time sequences, the name of the
data variable is of the form `variable_1, variable_2, ..., variable_n`,
where `variable` is the name of the variable, and n the number of elements
in the time sequence. Extracting the base key ensures that each step of the
time sequence goes through the same normalization steps.
The base key obeys the following naming pattern: `([a-zA-Z]+)`
For instance, for an input key `variable_1`, this function returns `variable`.
For an input key `variable`, this function simply returns `variable`.
Args:
key: Input key.
Returns:
The corresponding base key.
Raises:
ValueError when `key` does not match the expected pattern.
"""
match = re.fullmatch(r'([a-zA-Z]+)', key)
if match:
return match.group(1)
raise ValueError(
f'The provided key does not match the expected pattern: {key}') | be919ec7c038eac4bbfebc779cb1e05ef068dabb | 22,854 |
import click
from pathlib import Path
def _cbk_opt_inventory(ctx: click.Context, param, value):
"""callback for inventory option to read into list of strings"""
try:
return Path(value).read_text().splitlines()
except Exception as exc:
ctx.fail(f"Unable to load inventory file '{value}': {str(exc)}") | 5706a426bf1052bd85da8f7a9da7fd806f7c0100 | 22,857 |
def _get_leaf(node):
"""
Gets innermost definition of Typedef or Struct-/UnionMember.
Other nodes pass through.
"""
while getattr(node, 'definition', None):
node = node.definition
return node | a1430722e4efaeb433c371046814556cc93e786b | 22,861 |
import pkg_resources
def pkg_req(text):
"""
:param str|None text: Text to parse
:return pkg_resources.Requirement|None: Corresponding parsed requirement, if valid
"""
if text:
try:
return pkg_resources.Requirement(text)
except Exception:
return None | 6cebc9258937b6fd6a598801b3cf74a819dc6d64 | 22,863 |
def backends_mapping(custom_backend):
"""
Create 2 separate backends:
- path to Backend 1: "/echo"
- path to Backend 2: "/quotes"
"""
return {"/echo": custom_backend("echo"), "/quotes": custom_backend("quotes")} | 0619e81f10b805a53e482f76341c708c4f16d3c5 | 22,868 |
def crud_url_name(model, action, prefix=None):
"""
Returns url name for given model and action.
"""
if prefix is None:
prefix = ""
app_label = model._meta.app_label
model_lower = model.__name__.lower()
return '%s%s_%s_%s' % (prefix, app_label, model_lower, action) | 8b22a371e089c512dcaa0100e26851c1bfdc88cd | 22,876 |
import logging
def split_list(source_list, splitter_algo: str, splitter_args, randomizer):
"""
Removes a set of items from the source list and returns them in a new list based on the
splitter algorithm and splitter_args;
:param source_list: The source list to split. It is MODIFIED.
:param splitter_algo: The algorithm to use to split out the data
:param splitter_args: The arguments to the splitting algorithm.
:param randomizer: Randomizer if needed
:return: The elements removed out of the source list.
"""
shard = []
if splitter_algo == 'randomFraction':
count = round(len(source_list) * float(splitter_args['fraction']))
val_indexes = randomizer.sample(range(0, len(source_list)), count)
val_indexes = sorted(val_indexes, reverse=True)
for i in val_indexes:
shard.append(source_list[i])
del (source_list[i])
else:
logging.error(f"Unknown validation algorithm '{splitter_algo}', not splitting list.")
return shard | 35151342826ad76d714ec2615ba9f1fa7a98d126 | 22,877 |
def geoIsPointInPoly(loc, poly):
"""
Determine if a point is inside a polygon. Points that are along the perimeter of the polygon (including vertices) are considered to be "inside".
Parameters
----------
loc: list
The coordinate of the point, in [lat, lon] format
poly: list of lists
The polygon to check if the point is inside, in [[lat, lon], [lat, lon], ..., [lat, lon]] format
Returns
-------
boolean
The point is inside the polygon or not
"""
if (loc in poly):
return True
x = loc[1]
y = loc[0]
inside = False
j = len(poly) - 1
for i in range(0,len(poly)):
# Check if pt is in interior:
xi = poly[i][1]
yi = poly[i][0]
xj = poly[j][1]
yj = poly[j][0]
intersect = (yi > y) != (yj > y)
if (intersect):
intersect = (x < (xj - xi) * (y - yi) / float(yj - yi) + xi)
if (intersect):
inside = not inside
j = i
return inside | b1544f55499e6233e40c7f48eadb88b193527d8d | 22,878 |
def q2mat(q):
"""
Generate a left rotation matrix from a normalized quaternion
Parameters
q: The normalized quaternion (list)
Returns
u: The rotation matrix (2-dimensional list)
"""
u = []
for i in range(3):
u.append([])
for j in range(3):
u[i].append(0.0)
u[0][0] = q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]
u[0][1] = 2.0 * (q[1] * q[2] - q[0] * q[3])
u[0][2] = 2.0 * (q[1] * q[3] + q[0] * q[2])
u[1][0] = 2.0 * (q[2] * q[1] + q[0] * q[3])
u[1][1] = q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3]
u[1][2] = 2.0 * (q[2] * q[3] - q[0] * q[1])
u[2][0] = 2.0 * (q[3] * q[1] - q[0] * q[2])
u[2][1] = 2.0 * (q[3] * q[2] + q[0] * q[1])
u[2][2] = q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]
return u | 8f79c260eec3388c3b2d6a209c9445a91e87b0e9 | 22,879 |
def obj_to_dict(obj):
"""
Converts an :py:obj:`object` to a :py:obj:`dict` by taking the object's
properties and variables and their values and putting them into a dict.
Private and dunder (``__``) properties are ignored.
The use case for this is to enable passing of an object's data across
the app/task barrier in a format that is serializable by a JSON-based
serializer.
Args:
obj: An opaque object
Returns:
dict:
A :py:obj:`dict` that contains the attributes from ``obj``.
"""
return {
attr:getattr(obj, attr)
for attr in dir(obj)
if not attr.startswith('_')
} | 0c7a8d758357dcd7f33b0351004fe29354d0134e | 22,881 |
def selected_list(element):
""" Given an element dict, return a list of the indexes. """
if 'selected' in element:
return element['selected']
return [int(idx.strip()) for idx in element['value'].split(',')] | c1fb376825a62e11fcb7501f29055f454c1fd99d | 22,885 |
def check_inputs(input1, input2):
"""
Checks the inputs given to ensure that input1 is
a list of just numbers, input2 is a number, and input2
is within input1. Raises an exception if any of the
conditions are not true.
>>> check_inputs([1, 2.0, 3.0, 4], 4)
'Input validated'
>>> check_inputs([], 1)
Traceback (most recent call last):
...
TypeError: input2 not in input1
>>> check_inputs(1, 1)
Traceback (most recent call last):
...
TypeError: input1 is not the correct type
>>> check_inputs([1, 2, 'hi'], 4)
Traceback (most recent call last):
...
TypeError: The element at index 2 is not numeric
>>> check_inputs([1.0, 2.0, 3.0], 'hello')
Traceback (most recent call last):
...
TypeError: input2 is not the correct type
# MY DOCTESTS
"""
if not isinstance(input1, list):
raise TypeError('input1 is not the correct type')
numeric = [isinstance(i, int) or isinstance(i, float) for i in input1]
if not all(numeric):
raise TypeError('The element at index ' + str(numeric.index(False)) + ' is not numeric')
if not type(input2) in [float, int]:
raise TypeError('input2 is not the correct type')
if input2 not in input1:
raise TypeError('input2 not in input1')
return 'Input validated' | 34a2b93b9c2d72d33a1241ef014fa6cbeec265c7 | 22,895 |
import struct
def format_oath_code(response: bytes, digits: int = 6) -> str:
"""Formats an OATH code from a hash response."""
offs = response[-1] & 0xF
code = struct.unpack_from(">I", response[offs:])[0] & 0x7FFFFFFF
return ("%%0%dd" % digits) % (code % 10 ** digits) | 0601f2bcdae071ed989eefda67534096976ffb78 | 22,903 |
def select_candidates(candidate_data, number):
"""
Select top candidates from candidate dataframe, based off of lowest bandgap.
"""
candidate_data = candidate_data.sort_values('bandgap_pred', ascending=True)
top_candidates = candidate_data.head(number)
return top_candidates | d83d6520276f9d67fc913927c79f6d4e24c9b48b | 22,904 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.