content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import re
import json
def _DeviceSpecsToTFCRunTarget(device_specs):
"""Convert device specs to TFC run target format."""
groups = []
for spec in device_specs:
attrs = []
for match in re.finditer(r'([^\s:]+):(\S+)', spec):
key = match.group(1)
value = match.group(2)
attrs.append({'name': key, 'value': value})
groups.append({'run_targets': [{'name': '*', 'device_attributes': attrs}]})
obj = {'host': {'groups': groups}}
return json.dumps(obj) | 7d983f591237e015b8c4cc5ebd8eb17d5e934425 | 25,483 |
def save_v1_timecodes_to_file(filepath, timecodes, videos_fps, default_fps=10 ** 10):
"""
:param filepath: path of the file for saving
:param timecodes: timecodes in format
[[start0, end0, fps0], [start1, end1, fps1], ... [start_i, end_i, fps_i]]
:param videos_fps: float fps of video
:param default_fps: fps of uncovered pieces
:return: closed file object in which timecodes saved
"""
with open(filepath, "w") as file:
file.write("# timecode format v1\n")
file.write(f"assume {default_fps}\n")
for elem in timecodes:
elem = [int(elem[0] * videos_fps), int(elem[1] * videos_fps), elem[2]]
elem = [str(n) for n in elem]
file.write(",".join(elem) + "\n")
return file | c873f9d0d4f2e4cdbfa83e2c5c2d5b48d3ca2713 | 25,488 |
def blocks(text):
"""Split the text into blocks deliminated by a blank line."""
return text.split("\n\n") | bda99561d35b729203fb7fe945c23147c62ebc24 | 25,495 |
def get_lang_start_ind(doc_wordlens, doc_names):
"""Takes in a Pandas DataFrame containing the word length histograms
for ALL documents, doc_wordlens, and a dictionary that maps each column
name (keys) to a string that describes each document (values). Returns
a list of each unique language in the document description, doc_langs,
and the corresponding starting row index for each language, doc_lrsinds.
"""
doc_langs = []
doc_lrsinds = []
for ci, cn in enumerate(doc_wordlens.columns):
lang = doc_names[cn].split("_")[0]
if lang not in doc_langs:
doc_lrsinds.append(ci)
doc_langs.append(lang)
return (doc_langs, doc_lrsinds) | 24cdee2c7527f25f50dc4feb75f3958723d6684b | 25,499 |
def convert_string(x):
"""
Convert the string to lower case and strip all non [z-z0-9-_] characters
:param str x: the string to convert
:return: the converted string
:rtype: str
"""
# we define the things to keep this way, just for clarity and in case we want to add other things.
wanted = set()
# lower case letters
wanted.update(set(range(97, 123)))
# numbers
wanted.update(set(range(48, 58)))
# - and _
wanted.update({45, 95})
# space
wanted.add(32)
s = ''
for c in x:
if ord(c) in wanted:
s += c
elif 65 <= ord(c) <= 90:
s += chr(ord(c) + 32)
return s | e43a5da3815aac5a59bbb91a97727e257e831f14 | 25,504 |
def string_to_list(s):
"""
Convert argument string (of potentially a list of values) to a list of strings
:param s: str
:return: list[str]
"""
if s is not None:
s = [c.strip() for c in s.split(',')]
return s | a5fd5b7349f3450805157e2de88a1a1b15974390 | 25,506 |
def normalize_repr(v):
"""
Return dictionary repr sorted by keys, leave others unchanged
>>> normalize_repr({1:2,3:4,5:6,7:8})
'{1: 2, 3: 4, 5: 6, 7: 8}'
>>> normalize_repr('foo')
"'foo'"
"""
if isinstance(v, dict):
items = [(repr(k), repr(v)) for k, v in list(v.items())]
items.sort()
return "{" + ", ".join([
"%s: %s" % itm for itm in items]) + "}"
return repr(v) | aa6f5576ab5478013a850bc69e808a9a5f6958e1 | 25,517 |
def _HasReservation(topic):
"""Returns whether the topic has a reservation set."""
if topic.reservationConfig is None:
return False
return bool(topic.reservationConfig.throughputReservation) | ba10b5a0b2899a66a708d4fe746300b77e31235b | 25,524 |
def build_srcdict(gta, prop):
"""Build a dictionary that maps from source name to the value of a source property
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
prop : str
The name of the property we are mapping
Returns
-------
odict : dict
Dictionary that maps from source name to the value of the specified property
"""
o = {}
for s in gta.roi.sources:
o[s.name] = s[prop]
return o | daa069bde5642f947f212d50173db5b99be2eed4 | 25,527 |
import re
def cqlstr(string):
"""Makes a string safe to use in Cassandra CQL commands
Args:
string: The string to use in CQL
Returns:
str: A safe string replacement
"""
return re.sub('[-:.]', '_', string) | e505d855e374109edee0a1d5e76ff0cdeab64581 | 25,529 |
def _qualname(obj):
"""Get the fully-qualified name of an object (including module)."""
return obj.__module__ + '.' + obj.__qualname__ | 34c251612104afff79b2b6cd3580a4a939cd01d2 | 25,531 |
def parse_article(article):
"""
Parse article preview on newspage to extract title, link and time
Parameters
----------
article : bs4.tag
article tag to be parsed
Returns
-------
title, link, time : Tuple
a tuple containing title, link and time of the article
"""
title = article.a.text
link = article.a.get("href")
time = " ".join([x.text for x in article.find_all("span")])
return title, link, time | cb00db924478e6e916caf5f6420ff7f742246737 | 25,536 |
def join_provenances(provenance1, provenance2):
"""
Given two provenances (lists of id strings) join them together
"""
# Use a dict to join them
joined = dict(
(p, True) for p in provenance1
)
joined.update(
(p, True) for p in provenance2
)
return list(joined.keys()) | 3736c809f0cb76e5c31b8082f8bb9c3b9f594857 | 25,540 |
def get_workspace() -> str:
"""
get_workspace returns the TorchX notebook workspace fsspec path.
"""
return "memory://torchx-workspace/" | 396b3f3444357d8beb2bad0e1462bee9e663cd8d | 25,543 |
def impurity_decrease(y, membership, membership_true, membership_false, criterion):
"""
A general function that calculates decrease in impurity.
Parameters
----------
y : array-like of shape (n_samples,)
An array of labels.
membership : array-like of shape (n_samples,)
The old membership of each label.
membership_true : array-like of shape (n_samples,)
The new membership of each label.
membership_false : array-like of shape (n_samples,)
The complement of new membership of each label.
criterion: callable
The impurity function
Returns
-------
float : decrease of impurity measured by given criterion
"""
information_gain_ = criterion(y, membership) \
- (membership_true.sum() / membership.sum()) * criterion(y, membership_true) \
- (membership_false.sum() / membership.sum()) * criterion(y, membership_false)
return information_gain_ | 3f01757bbd32b7c711ba0ed11e0824620f71b055 | 25,545 |
def down_sample(x, sample_rate, k=2):
""" Performs down sampling on the audio signal. It takes
ever kth sample of the signal and returns the resulting
audio signal and the resulting sample rate.
:param x: the audio signal of shape N x C, where N
is the number of samples, and C is the number of
channels
:param k: the number of every k samples to return
:return: a tuple of sample rate and the audio signal
down-sampled to include ever kth sample. """
if len(x.shape[0]) < 2:
return sample_rate / k, x[::k]
return sample_rate / k, x[:, ::k] | c688acabd77289f074a0a95eb1a315edb58568d0 | 25,551 |
def _check_for_setattr(instance):
"""
Check if there's any setattr method inside an instance. If so, return True.
"""
module = instance.get_parent_until()
try:
stmts = module.used_names['setattr']
except KeyError:
return False
return any(instance.start_pos < stmt.start_pos < instance.end_pos
for stmt in stmts) | 9675a9236ff3de158e0f0149981bcd63f9beedd8 | 25,552 |
def blob_exists(storage_client, bucket_name, filename):
"""Checks if a file exists in the bucket."""
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(filename)
return (blob.exists()) | 4f5fa78328401930ce6399a5cea6cdcecc10a173 | 25,553 |
def get_int_from_prompt(msg, default):
"""
Return integer from prompt input
Args:
:msg: (str) Message to print
:default: (int) Default value
Returns:
:value: (int) Integer from prompt
"""
while True:
value = input(msg)
if not value:
return default
else:
try:
value = int(value)
return value
except ValueError:
print("Invalid input, try again...") | ea5c9988a25e646e81e966a10d07dffa6cd93eb2 | 25,555 |
import torch
def a2c_policy_loss(logps: torch.Tensor, advs: torch.Tensor) -> torch.Tensor:
"""
Loss function for an A2C policy. $-(logp(\pi(a|s)) * A_t)$
Args:
- logps (torch.Tensor): Log-probabilities of selected actions.
- advs (torch.Tensor): Advantage estimates of selected actions.
Returns:
- a2c_loss (torch.Tensor): A2C loss term.
"""
a2c_loss = -(logps * advs).mean()
return a2c_loss | 294ae812a3f1d0363fb0ac4f292113e9db521c51 | 25,557 |
import random
def augment_volume(wav, rate_lower=0.7, rate_upper=1.3):
"""
Increase or decrease a waveform's volume by a randomly selected rate.
:param wav: a waveform.
:param rate_lower: lower bound of rate
:param rate_upper: upper bound of rate
:return:
"""
return wav * random.uniform(rate_lower, rate_upper) | 5b0934a20423d744a8a5d63ad46846c0e14444d9 | 25,558 |
def do_something(x):
"""
Do something so we have something to test.
>>> do_something(3)
16
>>> do_something(7)
24
"""
return (x+5)*2 | d3185823bb098929f1330e7f477beff04dc2eced | 25,559 |
import imp
def LoadExtraSrc(path_to_file):
"""Attempts to load an extra source file, and overrides global values.
If the extra source file is loaded successfully, then it will use the new
module to override some global values, such as gclient spec data.
Args:
path_to_file: File path.
Returns:
The loaded module object, or None if none was imported.
"""
try:
global GCLIENT_SPEC_DATA
global GCLIENT_SPEC_ANDROID
extra_src = imp.load_source('data', path_to_file)
GCLIENT_SPEC_DATA = extra_src.GetGClientSpec()
GCLIENT_SPEC_ANDROID = extra_src.GetGClientSpecExtraParams()
return extra_src
except ImportError:
return None | 32ec066cd79d928528385aae3cc88272f4ccec32 | 25,563 |
def get_top_k_results(sorted_data):
"""
Compute the top K precisions.
Args:
sorted_data: A numpy array of sorted data.
Returns:
A list of top K precisions.
"""
results = []
for k in [10, 20, 50, 100, 200]:
results.append("P@" + str(k) + ": " + str(sorted_data[:k][:, -1].sum()))
return results | 700b8fff9ded8b46bb45e3054714a7422b03bba6 | 25,567 |
def _ylab(to_plot):
"""Returns the y-label for the plot given the type of plot.
Parameters
----------
to_plot : string
Type of thing to plot. Can be 'pmf', 'cdf', 'fid', or 'wern'.
Returns
-------
string
The y-label for the plot.
"""
labels_dict = {
'pmf' : "$\\Pr(T_n = t)$",
'cdf' : "$\\Pr(T_n \\leq t)$",
'fid' : "$F_n(t)$",
'wern' : "$W_n(t)$"
}
return labels_dict[to_plot] | 58b7217269bbf2f75cd0c378896ead0cb3bcc1be | 25,574 |
import yaml
def _ordereddict_representer(dumper, data):
"""
Generate a YAML representation for Python
``collections.OrderedDict`` objects. This converts the ordered
dictionary into a YAML mapping node, preserving the ordering of
the dictionary.
:param dumper: A YAML dumper.
:type dumper: ``yaml.Dumper``
:param data: The data to represent.
:type data: ``collections.OrderedDictionary``
:returns: A mapping node, with keys in the specified order.
:rtype: ``yaml.MappingNode``
"""
return yaml.MappingNode(
u'tag:yaml.org,2002:map',
[
(dumper.represent_data(key), dumper.represent_data(value))
for key, value in data.items()
]
) | 32c77b72e9610bb8688e7690997b181a90003461 | 25,575 |
def get_in_shape(in_data):
"""Get shapes of input datas.
Parameters
----------
in_data: Tensor
input datas.
Returns
-------
list of shape
The shapes of input datas.
"""
return [d.shape for d in in_data] | ae54409d425189c33fe9fe1bdb0487cc854f9510 | 25,579 |
def _normalize(options):
"""Return correct kwargs for setup() from provided options-dict.
"""
retval = {
key.replace("-", "_"): value for key, value in options.items()
}
# Classifiers
value = retval.pop("classifiers", None)
if value and isinstance(value, str):
classifiers = value.splitlines()
while "" in classifiers:
classifiers.remove("")
retval["classifiers"] = classifiers
# Long description from file
description_file = retval.pop("long_description_file", None)
if description_file:
try:
with open(description_file) as fdesc:
retval["long_description"] = fdesc.read()
except IOError:
retval["long_description"] = "Read the accompanying {}".format(
description_file
)
return retval | 174bf546559d38606e2e55270398c28b52218f7d | 25,580 |
def sanitize_for_path(value, replace=' '):
"""Replace potentially illegal characters from a path."""
il_text='<>\"?\\/*:'
hm_text='<>˝?\/*:'
for i,j in zip(*[il_text,hm_text]):
value=value.replace(i,j)
return value | a885cf2b801ab182033a2a436ea2069ab03e754d | 25,584 |
import time
def timeit(function):
"""
Decorator to measure time take to execute a function
"""
def wrapper(*args):
"""
Wrapper definition for the function
"""
start_time = time.process_time()
output = function(*args)
print(
"Module: {} took: {}s".format(
function.__name__,
time.process_time() - start_time))
return output
return wrapper | c099166f12536bd253fcaed9a14a6f14f0825661 | 25,596 |
def CMDdisconnected(parser, args):
"""Lists disconnected slaves."""
_, args, buildbot = parser.parse_args(args)
if args:
parser.error('Unrecognized parameters: %s' % ' '.join(args))
for slave in buildbot.slaves:
if not slave.connected:
print(slave.name)
return 0 | 9466d40e2cfb59c7a916978f3f296fb6b6734bda | 25,598 |
def normalize_data(data, maxv, minv):
"""
Normalizes the data given the maximum and minimum values of each variable.
Parameters
----------
data: numpy n x m
Numpy array comprising n realisations (instances) of m variables.
maxv, minv: numpy m
The maximum and minimum values of each variable. One and zero, resp.
if the variable is categorical.
Returns
-------
data: numpy n x m
The normalized data.
"""
data = data.copy()
for v in range(data.shape[1]):
if maxv[v] != minv[v]:
data[:, v] = (data[:, v] - minv[v])/(maxv[v] - minv[v])
return data | 41873142cd9ba0d25d12c2e89783d8097b2fd9e0 | 25,600 |
from typing import List
import random
def generate_random_sequences(length: int, number: int, alphabet: str) -> List[str]:
"""Generate random sequences of particular length."""
return [
"".join([random.choice(alphabet) for _ in range(length)]) for _ in range(number)
] | eec4be8e90441e7476f0eed8162759cf8aeea843 | 25,603 |
def verify_workflow(workflow):
"""Verifies if the workflow can be executed, and specifies where the
error occurs and why.
"""
result = workflow.verify()
return result | d2158949f9088692aed85fc52725bd9938f1130b | 25,615 |
def oui_ou_non(question):
"""Pose une question jusqu'à ce que le joueur réponde O pour oui ou N pour non
La fonction retourne vrai (True) si la réponse était oui"""
# Une boucle infinie dont nous ne sortirons avec `return ...' uniquement
# si nous avons une réponse qui nous convient:
while True:
reponse = input(question).upper()
if reponse in "ON":
return reponse == 'O' | 30d88d363062d2b6454dda2f526b94bc813f297b | 25,619 |
def _is_namespace_param(namespace: str) -> bool:
"""Returns whether a dataset namespace is a parameter"""
return namespace.lower().startswith("param") | c6e4a847d0d5d60bd670cd2a1004f83d4b89324d | 25,620 |
def common_filenames(file_list1, file_list2):
"""
Find elements common in 2 lists
:param file_list1: a list to compare
:param file_list2: a list to compare
:return: list of common items
"""
return set(file_list1).intersection(file_list2) | 29ecd33dd09a33ec42bcad796a96e184377273ef | 25,622 |
def insertion_sort_recursive(integers):
"""Performs insertion sort recursively."""
integers_clone = list(integers)
def helper(arr, n):
if n > 0:
helper(arr, n-1)
while arr[n] < arr[n-1] and n > 0:
arr[n], arr[n-1] = arr[n-1], arr[n]
n -= 1
helper(integers_clone, len(integers_clone) - 1)
return integers_clone | 7045b07c93a00970d9df10880e4460a0ecc8118b | 25,623 |
import random
def spread(topic):
"""
Return a fictional spread in bps, tight triangular
distribution in most cases, except for Fabrikam where
the spreads are more scattered, higher, and with a longer tail.
"""
if topic.startswith("Fabrikam"):
if " 1Y CDS Spread" in topic:
return random.triangular(140, 280, 180)
elif " 3Y CDS Spread" in topic:
return random.triangular(200, 400, 300)
else:
assert False
else:
if " 1Y CDS Spread" in topic:
return random.triangular(140, 150)
elif " 3Y CDS Spread" in topic:
return random.triangular(150, 160)
else:
assert False | 7c1f559c516396564ac618767f79630f6ce515b8 | 25,637 |
def create_dim(a, dim=''):
"""create dimension array for n-nested array
example:
>>> create_dim([[1,2],[3,4],[5,6,[7,8],]])
[['0-0', '0-1'], ['1-0', '1-1'], ['2-0', '2-1', ['2-2-0', '2-2-1']]]
>>> create_dim(5)
''
>>> create_dim([5,5])
['0', '1']
"""
if isinstance(a, list):
if dim:
prefix = dim + '-'
else:
prefix = ''
return([create_dim(a_, prefix + str(i)) for i, a_ in enumerate(a)])
else:
return(dim) | 2a5fbea0ad0a26c81d90551a1e907a31c6362192 | 25,638 |
import copy
def dict_path(from_d,to_d={},l=[]):
"""
Returns a dictionary with the path in which each of the keys is found
Parameters:
from_d : dict
Dictionary that contains all the keys, values
to_d : dict
Dictionary to which the results will be appended
Example:
dict_path({'level1':{'level2':{'level3':'value'}}})
Returns
{'level1': [],
'level2': ['level1'],
'level3': ['level1', 'level2']
}
"""
for k,v in list(from_d.items()):
if isinstance(v,dict):
to_d[k]=l
_l=copy.deepcopy(l)
_l.append(k)
to_d=dict_path(from_d[k],to_d,_l)
else:
to_d[k]=l
_to_d=to_d.copy()
to_d={}
return _to_d | a80363e99deb199111c9e4b3d4bcd9d3c65d4a67 | 25,640 |
def keep_intersection_of_columns(train, test):
""" Remove the columns from test and train set that are not in
both datasets.
params
------
train: pd.DataFrame containing the train set.
test: pd.DataFrame containing the test set.
return
------
train and test where train.columns==test.columns by
keeping only columns that were present in both datasets.
"""
shared_cols = list(set(train.columns).intersection(set(test.columns)))
return train[shared_cols], test[shared_cols] | 750c17d874b7cfad3eb7e48b84323aa25fd251da | 25,643 |
def rst_heading(value, arg):
"""Provides an underline for restructured text heading.
Syntax::
{{ value|rst_heading:"=" }}
Results in:
``value``
``=====``
"""
return ''.join([value, '\n', arg*len(value)]) | 5e6379dccd5c15b24e0688b8f95d181a7e8b1783 | 25,644 |
def summary_candidate_ranking_info(top_ks, found_info, data_size):
"""Get a string for summarizing the candidate ranking results
Parameters
----------
top_ks : list of int
Options for top-k evaluation, e.g. [1, 3, ...].
found_info : dict
Storing the count of correct predictions
data_size : int
Size for the dataset
Returns
-------
string : str
String summarizing the evaluation results
"""
string = '[strict]'
for k in top_ks:
string += ' acc@{:d}: {:.4f}'.format(k, found_info['top_{:d}'.format(k)] / data_size)
string += ' gfound {:.4f}\n'.format(found_info['ground'] / data_size)
string += '[molvs]'
for k in top_ks:
string += ' acc@{:d}: {:.4f}'.format(
k, found_info['top_{:d}_sanitized'.format(k)] / data_size)
string += ' gfound {:.4f}\n'.format(found_info['ground_sanitized'] / data_size)
return string | 877cbeaf6be01a0be8daf54ccdba88338c08343b | 25,651 |
async def eval_issue_1(serialized_citation):
"""
Return 1 if the `npl_publn_id` is in the `when`field, else None
See: https://github.com/cverluise/SciCit/issues/1
:param serialized_citation: dict
:return: int or None
"""
if "when" in serialized_citation.keys():
return (
1
if str(serialized_citation["npl_publn_id"]) in serialized_citation["when"]
else None
) | 2bf50fe83c59a99181dcbbf9a16854d93cd0210f | 25,655 |
def _require_positive_y(y):
"""Make targets strictly positive"""
offset = abs(y.min()) + 1
y = y + offset
return y | fa38ed8cc729e185ce97a6f63abd3a39bebcf6d9 | 25,657 |
def list_to_ranges(s):
"""
>>> list_to_ranges([])
''
>>> list_to_ranges([1])
'1'
>>> list_to_ranges([1,2])
'1-2'
>>> list_to_ranges([1,2,3])
'1-3'
>>> list_to_ranges([1,2,3,5])
'1-3,5'
>>> list_to_ranges([1,2,3,5,6,7])
'1-3,5-7'
>>> list_to_ranges(range(1,4001))
'1-4000'
"""
def f():
if last_start == last_end:
return str(last_start)
else:
return "%d-%d" % (last_start, last_end)
last_start = None
last_end = None
r = []
for i in sorted(s):
if last_end is not None and i == last_end + 1:
last_end += 1
else:
if last_start is not None:
r += [f()]
last_start = i
last_end = i
if last_start is not None:
r += [f()]
return ",".join(r) | 1adccba785970a025e6474631a4b8d4f21dd35ca | 25,659 |
def commit_diff(c):
"""Return the set of changed files.
Args:
c (git.Commit)
Returns:
set[str]: a set of file paths (relative to the git repo's root directory).
"""
changed = set()
def add_path(blob):
if blob is not None:
changed.add(blob.path)
prev_c = c.parents[0]
for x in c.diff(prev_c):
add_path(x.a_blob)
add_path(x.b_blob)
return changed | de6d7a2a1dfbadec2c4237259670118b7538ce81 | 25,661 |
import re
def calc_query_pos_from_cigar(cigar, strand):
"""Uses the CIGAR string to determine the query position of a read
The cigar arg is a string like the following: 86M65S
The strand arg is a boolean, True for forward strand and False for
reverse
Returns pair of ints for query start, end positions
"""
cigar_ops = [[int(op[0]), op[1]] for op in re.findall('(\d+)([A-Za-z])', cigar)]
order_ops = cigar_ops
if not strand: # - strand
order_ops = order_ops[::-1]
qs_pos = 0
qe_pos = 0
q_len = 0
for op_position in range(len(cigar_ops)):
op_len = cigar_ops[op_position][0]
op_type = cigar_ops[op_position][1]
if op_position == 0 and ( op_type == 'H' or op_type == 'S' ):
qs_pos += op_len
qe_pos += op_len
q_len += op_len
elif op_type == 'H' or op_type == 'S':
q_len += op_len
elif op_type == 'M' or op_type == 'I' or op_type == 'X':
qe_pos += op_len
q_len += op_len
return qs_pos, qe_pos | a3a5366b52aefbf628a92155193684f91c60c208 | 25,666 |
def mesh_error(mesh1, mesh2):
"""Error (intersection over union) of the two meshes"""
intersection = mesh1.intersection(mesh2)
union = mesh1.union(mesh2)
error = intersection.volume/union.volume
return error | f6fb92b950020a7e5f0945179839838f5a613d44 | 25,667 |
from typing import List
def split_filter(string: str, delim: str) -> List[str]:
"""Split the given string with the given delimiter.
If the given string is empty, an empty list is returned.
:param string: String to split.
:param delim: Delimiter character.
"""
if not string:
return []
return string.split(delim) | 743b29818fb13b90e2f9eff9ddd7c778dbc1b3dc | 25,668 |
from typing import Tuple
def index_to_tuple(index: int, size: int) -> Tuple[int, int]:
"""
Returns a tuple to indicate 2d-array for the given index
:param index:
:param size:
:return:
"""
assert 0 <= index < size * size, "Out of bound"
return int(index / size), int(index % size) | d3f5af92671bf5680dd2328f0d0b1f4a53615964 | 25,669 |
def find_maxProfit(weights: "list[int]", profits: "list[int]", capacity: "int",
greedy_method: "str" = "max") -> float:
"""Fractional Knapsack Problem
Find maximum profit using greedy method
Args:
weights (List[int]): list/array of weights
profits (List[int]): list/array of profits
capacity (int): total capacity of knapsack
greedy_method (str):
"min " - find maximum profit by considering minimum values first
"max" - find maximum profit by considering maximum values first
"optimal" - find maximum profit by considering profit/weight
Returns:
float: Maximum profit
"""
if len(weights) != len(profits):
print("Please provide correct values for profits and weights")
return -1
# make items/objects
items = [{"weight": w, "profit": p} for w, p in zip(weights, profits)]
# sort the items
if greedy_method == "min":
items = sorted(items, key=lambda x: x['weight'], )
elif greedy_method == "max":
items = sorted(items, key=lambda x: x['weight'], reverse=True)
elif greedy_method == "optimal":
items = sorted(
items, key=lambda x: x['profit'] / x['weight'], reverse=True)
else:
raise Exception("please provide correct value for 'greedy_method' ")
cur_weight, total_profit = 0, 0
for i in range(len(weights)):
if (cur_weight + items[i]['weight']) <= capacity:
cur_weight += items[i]['weight']
total_profit += items[i]['profit']
else:
remains = capacity - cur_weight
total_profit += items[i]['profit'] * (remains / items[i]['weight'])
break
return total_profit | 75d12d63877bd792704fab00caf6690ff0e13f31 | 25,671 |
def replace_at(word, line, index):
""" Replace the text in-line.
The text in line is replaced (not inserted) with the word. The
replacement starts at the provided index. The result is cliped to
the input length
Arguments
---------
word : str
The text to copy into the line.
line : str
The line where the copy takes place.
index : int
The index to start coping.
Returns
-------
result : str
line of text with the text replaced.
"""
word_length = len(word)
result = line[:index] + word + line[(index + word_length):]
return result[:len(line)] | 1587e97e4886d75d509ec6558aedd66759028b06 | 25,673 |
def _normalize_string(text):
"""Trims the text, removes all the spaces from text and replaces every sequence of lines with a single line.
Args:
text (str): The schema definition.
Returns:
str: The normalized text.
"""
text = text.strip()
result = ""
whitespaces = [" ", "\t"]
in_comment = False
in_string = False
string_char = None
for c in text:
if c == "#":
in_comment = True
if c == "\n":
in_comment = False
if c == '"' or c == "'":
if in_string and c == string_char:
in_string = False
elif not in_string and not in_comment:
in_string = True
string_char = c
if not in_comment and not in_string and c in whitespaces:
continue
if c == "\n" and result.endswith("\n"):
continue
result += c
return result | 0bc387e7563a4c961a9fe189547c5146337899bc | 25,680 |
def normalize_rgb(rgb):
"""
Normalize rgb to 0~1 range
:param rgb: the rgb values to be normalized, could be a tuple or list of tuples
:return:
"""
if isinstance(rgb, tuple):
return tuple([float(a)/255 for a in rgb])
elif isinstance(rgb, list):
norm_rgb = []
for item in rgb:
norm_rgb.append(normalize_rgb(item))
return norm_rgb
else:
raise NotImplementedError('Data type: {} not understood'.format(type(rgb))) | b2eecc75cdae5d26714768a26d888bad18adb548 | 25,685 |
def justReturn(inval):
"""
Really, just return the input.
Parameters
----------
input : anything
Returns
-------
input : anything
Just return whatever you sent in.
"""
return inval | 9e7ae43cf4aa2456e67cbe12f08e01a6fbc682a9 | 25,687 |
def find_dialogue_event(dialogue_event_name, dialogue_event_defs):
""" Find a dialogue event by name in a list of dialogue event definitions.
:param dialogue_event_name: the name of the dialogue event to look for.
:param dialogue_event_defs: a list of dialogue event definitions.
:return: the dialogue_event_def with matching name, or None.
"""
for dialogue_event_def in dialogue_event_defs:
if dialogue_event_def.name == dialogue_event_name:
return dialogue_event_def
return None | 114ec4a2ee426d789ebcf76eb46756194e108d19 | 25,688 |
def split_by_pred(pred, iterable, constructor=list):
"""Sort elements of `iterable` into two lists based on predicate `pred`.
Returns a tuple (l1, l2), where
* l1: list of elements in `iterable` for which pred(elem) == True
* l2: list of elements in `iterable` for which pred(elem) == False
"""
pred_true = constructor()
pred_false = constructor()
for item in iterable:
if pred(item):
pred_true.append(item)
else:
pred_false.append(item)
return pred_true, pred_false | dc0c43cd5d34869566f283d92038aaa4a53d5c62 | 25,689 |
from typing import Dict
def _map_dtypes(sql_dtypes: Dict[str, str]) -> Dict[str, type]:
"""
Create mapping from SQL data types to Python data types.
:param sql_dtypes: A mapping from the column names in a SQL table
to their respective SQL data types.
Example: {"ct_id": int(10) unsigned NOT NULL AUTO_INCREMENT}
:type sql_dtypes: Dict[str, str]
:return: A mapping from the column names in a SQL table
to their respective Python data types. Example: {"ct_id": int}
:rtype: Dict[str, type]
"""
types: Dict[str, type] = {}
for key, val in sql_dtypes.items():
if "int" in val:
types[key] = int
elif any(dtype in val for dtype in ("float", "double", "decimal", "numeric")):
types[key] = float
else:
types[key] = str
return types | 70360c323f9193f49de40acf37ddaa58010467f8 | 25,692 |
from typing import Sequence
from typing import Any
from typing import Counter
def sequence_equals(a: Sequence[Any], b: Sequence[Any]) -> bool: # pylint: disable=invalid-name
"""
Returns a value indicating whether two sequences contain the same elements.
More specifically, returns ``True`` if the two sequences are of the same size, contain the same distinct elements,
and each element has equal appearance frequency on both collections.
This method runs in time proportional to the size of the arguments and uses extra space that is also proportional
to the size of the arguments.
:param Sequence[Any] a: one collection
:param Sequence[Any] b: the other collection
:return: ``True`` if ``a`` and ``b`` contain the same elements, otherwise ``False``
:rtype: bool
"""
if len(a) != len(b):
return False
if a is b:
return True
counter_a: Counter = Counter(a)
counter_b: Counter = Counter(b)
return counter_a == counter_b | bb4bc84e7be1491f49f7e3212e34d755298178ea | 25,694 |
from bs4 import BeautifulSoup
def process_sitemap(s):
"""
:param s: Sitemap content in xml format
:return: A list of URLs of all the web sites listed in site map
"""
soup = BeautifulSoup(s, features='html.parser')
result = []
for loc in soup.findAll('loc'):
result.append(loc.text)
return result | a5bc184794284cba83705f418833093f1f7f7976 | 25,698 |
import json
def read_class_names(class_names_path: str):
"""Reads class names from text file.
Supports .txt and .json.
Args:
class_names_path: `str`, path to json/txt file containing classes.
Text file should contain one class name per line.
Json file should contain only one dictionary, `Mapping[int, str]`
"""
names = {}
if class_names_path.endswith('.txt'):
with open(class_names_path, 'r') as data:
for idx, name in enumerate(data):
names[idx] = name.strip('\n')
elif class_names_path.endswith('.json'):
with open(class_names_path) as f:
names = json.load(f)
if type(list(names.keys())[0]) == str and type(list(names.values())[0]) == int:
names = dict((v,k) for k,v in names.items())
else:
raise NotImplementedError('File type is not .txt or .json, path %s' %class_names_path)
if type(list(names.keys())[0]) != int:
raise ValueError('Loaded dict %s has wrong key type %s' %(
class_names_path, type(list(names.keys())[0])))
if type(list(names.values())[0]) != str:
raise ValueError('Loaded dict %s has wrong value type %s' %(
class_names_path, type(list(names.values())[0])))
return names | 8808507206c491a297a1ad04d4a30f58b3977ca9 | 25,703 |
import math
def factorial(x):
"""Returns the factorial of x"""
return math.factorial(x) | ad881ed82c3bc40fc726b15597563774286ba681 | 25,705 |
def pd_rolling_mean(series, window_size):
"""
Compute rolling mean on a Series
with the given window_size
and return only non-None rows,
i.e. starning from row number window_size - 1
and until the end.
"""
rolling_mean = series.rolling(window_size).mean()
return rolling_mean[window_size - 1:] | 1f2b03d29b61c1f2d1bf1b98dfa92a9e852667a2 | 25,706 |
import itertools
def flatten(iterable):
"""Flatten the input iterable.
>>> list(flatten([[0, 1], [2, 3]]))
[0, 1, 2, 3]
>>> list(flatten([[0, 1], [2, 3, 4, 5]]))
[0, 1, 2, 3, 4, 5]
"""
return itertools.chain.from_iterable(iterable) | 6860f65582952819ae56178cf97cd2eb2133bbf1 | 25,710 |
def build_url(url, store_name, key):
"""This method combine the different parts of the urls to build the url to
acces the REST-API.
Parameters
----------
url : str
the base url
store_name : str
the name of the voldemort store
key : str
the url part which represents the key or keys
Returns
-------
str
the combined url of the REST-API
"""
return "%s/%s/%s" % (url, store_name, key) | 6cf0c8e86721b139907dda491d92e413a913ae38 | 25,711 |
def interpret_numbers(user_range):
"""
:param user_range: A string specifying a range of numbers. Eg.
interpret_numbers('4-6')==[4,5,6]
interpret_numbers('4,6')==[4,6]
interpret_numbers('4,6-9')==[4,6,7,8,9]
:return: A list of integers, or None if the input is not numberic
"""
if all(d in '0123456789-,' for d in user_range):
numbers_and_ranges = user_range.split(',')
numbers = [n for lst in [[int(s)] if '-' not in s else range(int(s[:s.index('-')]), int(s[s.index('-')+1:])+1) for s in numbers_and_ranges] for n in lst]
return numbers
else:
return None | dc3a156bdb392e8a54edf95fc4182dfd5965010a | 25,716 |
from datetime import datetime
def same_date(date1: datetime, date2: datetime) -> bool:
"""Check whether two given datetime object are in the same date"""
return date1.day == date2.day and date1.month == date2.month and date1.year == date2.year | 27abc8d14dfdc7e02f004696ceed028aef27aa0e | 25,720 |
def get_targ_pairs(start_cond, end_cond):
"""Return boolean mask for paired target conditions.
This function returns a mask indicating which start/end target conditions
in the provided lists come from a paired set. For example, the entry for a
a target condition 'T1 -> T5' will have a corresponding value of True if
'T5 -> T1' also exists in the data.
Inputs:
start_cond - List of strings indicating starting conditions
end_cond - List of strings indicating ending conditions
Returns:
mask - List of boolean indicating whether each of the input elements is
part of a pair
"""
# Define target pair and flipped target pair conditions
cond = [''.join([s, e]) for s, e in zip(start_cond, end_cond)]
filp_cond = [''.join([e, s]) for s, e in zip(start_cond, end_cond)]
# If the flipped version of a target condition appears in the set of unique
# target conditions, then the target has a matching condition
uni_cond = set(cond)
mask = [True if c in uni_cond else False for c in filp_cond]
return mask | aa353dc9f5c17f210e1f82669d7fef24a43aa035 | 25,739 |
def fls(val: int, v6: bool) -> int:
"""Find last set - returns the index, counting from 0 (from the right) of the
most significant set bit in `val`."""
# if b is zero, there is no first set bit
if val == 0:
return 0
# gradually set all bits right of MSB
# this technique is called 'bit smearing'
# if ipv6, max bit index we want to smear is 2^7=64,
# otherwise it's 2^4=16
max_power_of_2 = 7 if v6 else 5
n = val | val >> 1
for i in range(1, max_power_of_2+1):
n |= n >> 2**i
# increment diff by one so that there's only
# one set bit which is just before original MSB
n += 1
# shift it so it's in the original position
n >>= 1
# figure out the ordinal of the bit from LSB
pos = 0
while (n & 1) == 0:
n >>= 1
pos += 1
return pos | 913127b72e3cab96423d5a7fcee8b5f5f1fb3f19 | 25,741 |
def _canonize_validator(current_validator):
"""
Convert current_validator to a new list and return it.
If current_validator is None return an empty list.
If current_validator is a list, return a copy of it.
If current_validator is another type of iterable, return a list version of it.
If current_validator is a single value, return a one-list containing it.
"""
if not current_validator:
return []
if isinstance(current_validator, (list, tuple)):
current_validator = list(current_validator)
else:
current_validator = [current_validator]
return current_validator | 32c3df654e048c3551a1e665ff773b57e59524d6 | 25,744 |
def ra_as_hours(ra_degrees, seconds_decimal_places=2):
""" Takes Right Ascension degrees as float, returns RA string. TESTS OK 2020-10-24.
:param ra_degrees: Right Ascension in degrees, limited to 0 through 360. [float]
:param seconds_decimal_places: number of places at end of RA string (no period if zero). [int]
:return: RA in hours/hex format. [string, or None if outside RA range]
"""
if (ra_degrees < 0) | (ra_degrees > 360):
return None
seconds_decimal_places = int(max(0, seconds_decimal_places)) # ensure int and non-negative.
total_ra_seconds = ra_degrees * (3600 / 15)
int_hours = int(total_ra_seconds // 3600)
remaining_seconds = total_ra_seconds - 3600 * int_hours
int_minutes = int(remaining_seconds // 60)
remaining_seconds -= 60 * int_minutes
if seconds_decimal_places > 0:
seconds, fract_seconds = divmod(remaining_seconds, 1)
int_fract_seconds = int(round(fract_seconds * 10 ** seconds_decimal_places))
else:
seconds, fract_seconds, int_fract_seconds = round(remaining_seconds), 0, 0
int_seconds = int(seconds)
if seconds_decimal_places > 0:
if int_fract_seconds >= 10 ** seconds_decimal_places:
int_fract_seconds -= 10 ** seconds_decimal_places
int_seconds += 1
if int_seconds >= 60:
int_seconds -= 60
int_minutes += 1
if int_minutes >= 60:
int_minutes -= 60
int_hours += 1
if int_hours >= 24:
int_hours -= 24
if seconds_decimal_places > 0:
format_string = '{0:02d}:{1:02d}:{2:02d}.{3:0' + str(int(seconds_decimal_places)) + 'd}'
else:
format_string = '{0:02d}:{1:02d}:{2:02d}'
ra_string = format_string.format(int_hours, int_minutes, int_seconds, int_fract_seconds)
return ra_string | e23cce78633cdb4d182babe095cf13b351ddf68f | 25,747 |
def add(x: int, y: int):
"""A function to add stuff.
:param x: A number, x
:param y: A number, y
:return: A number, x + y
"""
return x + y | 3358800af03e094463b22296b393f6e935bf154c | 25,752 |
def to_title(str):
"""returns a string formatted as a title"""
return str.replace("_", " ").capitalize() | 13336936174445f61d5209ce1fabd19d7ae66fa2 | 25,753 |
def _read_count_normalize(X):
"""Read depth normalization by sample. Assumes samples are columns and guides are rows."""
return (X / X.sum(axis=0)) * 1e6 | 799b5b7b4c207a6e89da68775c8f708acd1feb7f | 25,754 |
def get_key_value_from_tokens(tokens):
""" Converts a list of tokens into a single key/value pair.
:param tokens: The tokens, as strings.
:type tokens: [str]
:returns: (key, value)
:rtype: (string, string)
"""
key_tokens = []
value_tokens = []
found_equals_sign = False
for token in tokens:
# Mark and skip the equals sign.
if token == "=":
found_equals_sign = True
continue
if not found_equals_sign:
key_tokens.append(token)
else:
value_tokens.append(token)
# Combine the tokens into a string
if len(key_tokens) == 0:
key = None
else:
key = "".join(key_tokens)
if len(value_tokens) == 0:
value = None
else:
value = "".join(value_tokens)
return (key, value) | 3af0003992a07fb8daf70c17cbaa3a414a59a9e0 | 25,763 |
import requests
import json
def number_of_subscribers(subreddit):
"""api call to reddit to get the number of subscribers
"""
base_url = 'https://www.reddit.com/r/'
headers = {
'User-Agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) \
Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)'
}
# grab info about all users
url = base_url + '{}/about.json'.format(subreddit)
response = requests.get(url, headers=headers)
resp = json.loads(response.text)
try:
# grab the info about the users' tasks
data = resp.get('data')
subscribers = data.get('subscribers')
except:
return 0
if subscribers is None:
return 0
return int(subscribers) | 05964f0f8a8b3c1901b5ad7667fbbeb19f25ff78 | 25,766 |
def parse_squad(dataset):
"""
Parses SQUAD database into more readable format. In this case I only care
about question/answers pairs in order to make a seq2seq model that would
generate questions out of a paragraph.
Inputs:
dataset: squad dataset in json format
Returns:
squad_json: parsed squad dataset in json format
"""
total_topics = 0
total_questions = 0
squad_json = []
# Iterate through every topic in the dataset
for topic in dataset:
total_topics += 1
# Iterate through every text passage in the topic
for passage in topic['paragraphs']:
# Iterate through every question/answer pairs in the passage
for qas in passage['qas']:
total_questions += 1
text_question_pair = {}
# Append the title
text_question_pair['topic'] = topic['title']
# Append the text paragraph
text_question_pair['paragraph'] = passage['context']
# Append the question
text_question_pair['question'] = qas['question']
# Iterate through available answers
answers = []
for answer in qas['answers']:
answers.append(answer['text'])
# And append them all together
text_question_pair['answers'] = answers
# Append found dictionary to the full parsed dataset array
squad_json.append(text_question_pair)
print('Found ' + str(total_topics) + ' topics in total.')
print('Found ' + str(total_questions) + ' questions in total.')
return squad_json | 15971b1bd8dd241af5e458fafe363b8859303e4f | 25,767 |
def nbi_growth(nbi, nvi, nb, nb0, nv, C, f, R, g, c0, alpha, e, pv, eta):
"""
Single clone growth rate for bacteria.
Inputs:
nbi : 1D vector in time of bacteria clone sizes
nvi : 1D vector in time of corresponding phage clone sizes
nb : 1D vector in time of total bacteria population size
nb0 : 1D vector in time of number of bacteria without spacers
nv : 1D vector in time of total phage population size
C : 1D vector in time of total nutrients
f, g, c0, alpha, e, pv, eta : simulation parameters
Output:
s : 1D vector of phage clone growth rate per bacterial generation (not multiplied by population size)
"""
F = f*g*c0
r = R*g*c0
s = g*C - F - r - alpha*pv*(nv - e*nvi) + alpha*eta*nb0*nvi*(1-pv)/nbi
return s/(g*c0) | b647bdbdda973407854e59ab98b9d290d00a2961 | 25,773 |
import json
def load_ee_params(filepath):
"""
Loads parameters for a posteriori error estimation for the PINN from the json file provided via param filepath.
returns
- K: as used for trapezoidal rule
- mu: smoothing parameter for delta function
- L_f: Lipschitz constant or spectral abscissa
- delta_mean: average deviation of approximated ODE/PDE from target ODE/PDE
"""
with open(filepath, "r") as jsonfile:
data = json.load(jsonfile)
jsonfile.close()
return float(data['K']), float(data['mu']), float(data['L_f']), float(data['delta_mean']) | 9c226d39e1c14d66ff1da1ea4afd99a5a9498e7a | 25,777 |
import math
def euclidean_distance(xyz1, xyz2):
"""
Simple function for calculating euclidean distance between two points.
"""
dist = [(a - b)**2 for a,b in zip(xyz1, xyz2)]
return math.sqrt(sum(dist)) | eb14ec300a4e4eab65a725b8d6b013f33ca09ae5 | 25,778 |
import fnmatch
import click
def _match_pattern(what, items, pattern):
"""
Match given pattern against given items.
Parameters
----------
what: str
Describes what is filterd.
items: Iterable[str]
Items to be filtered
include_pattern: str
Comma separated items which should be included. Can contain glob patterns.
"""
result = set()
for part in pattern.split(","):
found = set(fnmatch.filter(items, part.strip()))
if not found:
raise click.UsageError(
"Could not find {what} {part}".format(what=what, part=part)
)
result |= found
return result | 9688e8677d2206876d93e36abe17a336ea2be92a | 25,788 |
def get_shipping_voucher_discount(voucher, total_price, shipping_price):
"""Calculate discount value for a voucher of shipping type."""
voucher.validate_min_amount_spent(total_price)
return voucher.get_discount_amount_for(shipping_price) | be3538f219d219d0ec7c4d343d0dee5a62573bfc | 25,792 |
def _define_tabledict_keys(header, fields, separator):
"""
Define the keys for the tabledict dictionary.
Note: this function is only used by parse_table_from_file().
:param header: header string.
:param fields: header content string.
:param separator: separator character (char).
:return: tabledict (dictionary), keylist (ordered list with dictionary key names).
"""
tabledict = {}
keylist = []
if not header:
# get the dictionary keys from the header of the file
for key in fields:
# first line defines the header, whose elements will be used as dictionary keys
if key == '':
continue
if key.endswith('\n'):
key = key[:-1]
tabledict[key] = []
keylist.append(key)
else:
# get the dictionary keys from the provided header
keys = header.split(separator)
for key in keys:
if key == '':
continue
if key.endswith('\n'):
key = key[:-1]
tabledict[key] = []
keylist.append(key)
return tabledict, keylist | 6c41aa138597ca5b0915df0409381ea3caa17d94 | 25,793 |
def pv_efficiency(eta_PVref, beta, NOCT, NOCT_ref, NOCT_sol, T_amb, I):
"""
Calculates time resolved PV efficiency [-]
:param eta_PVref: Reference PV efficiency under NOCT [-]
:param beta: Temperature coefficient [-]
:param NOCT: Nominal operating cell temperature [deg C]
:param NOCT_ref: Reference temperature [deg C]
:param NOCT_sol: Reference irradiance [W/m2]
:param T_amb: Ambient temperature [deg C]
:param I: Irradiance on panel [W/m2]. 8760 time series
:return: Time resolved PV efficiency [-], 8760 entries
"""
horizon = len(T_amb)
etapv = [0.0] * horizon
for i in range(0, horizon):
Tpv = T_amb[i] + ((NOCT - NOCT_ref) / NOCT_sol) * I[i]
etapv[i] = eta_PVref * (1 - beta * (Tpv - 25))
return etapv | 7b9c34811c0a17d734f1b707789fa188ab15014b | 25,798 |
def ip_exists(ip_address, client):
"""Determines whether an IP address already exists in database
Args:
ip_address
client: pymongo client used to connect to mongodb database
Return:
exists: boolean indicating whether an IP address exists in database
"""
user_collection = client.db.user
exists = bool(user_collection.find_one({'ipAddress': ip_address}))
return exists | 91ef98dbda81cb7272ec5641e80d5176cb0c3af6 | 25,805 |
def _get_subgroup(file: str) -> str:
"""Function that will pull the subgroup substring out of the filepath
Parameters
file : str
filepath to the ERSA file
Returns
returns a substring that has the subgroup. Ex "sub1"
"""
return file[-4:] | a7e8a396b27a98223c2fd3595a3922bf1bc73662 | 25,807 |
def getfield(f):
"""convert values from cgi.Field objects to plain values."""
if isinstance(f, list):
return [getfield(x) for x in f]
else:
return f.value | d49f997213b4ca0825d40f890cfcb05979da3c22 | 25,809 |
import sympy
def _jrc(a, n):
"""Get the Jacobi recurrence relation coefficients."""
return (
sympy.Rational((a + 2 * n + 1) * (a + 2 * n + 2), 2 * (n + 1) * (a + n + 1)),
sympy.Rational(a * a * (a + 2 * n + 1), 2 * (n + 1) * (a + n + 1) * (a + 2 * n)),
sympy.Rational(n * (a + n) * (a + 2 * n + 2), (n + 1) * (a + n + 1) * (a + 2 * n))
) | c11527f9b568924cf3b84b3ac36e1641b3ed022e | 25,812 |
async def get_story_data(session, story_id, story_rank):
"""
Gets the given story data - title and url
"""
url = 'https://hacker-news.firebaseio.com/v0/item/{}.json'.format(story_id)
async with session.get(url) as response:
result_data = await response.json()
story_url = ""
if "url" in result_data: # The url key might not be in the results data
story_url = result_data['url']
return story_rank, result_data['title'], story_url | cbac4d05915a82ab11854b9365acddb9c42944bd | 25,816 |
def color_string(guess: str, target: str) -> str:
"""
Returns Wordle colors for guess given target.
"""
c_string = ""
for pos, letter in enumerate(guess):
if target[pos] == letter:
c_string += "g"
elif letter in target:
c_string += "y"
else:
c_string += "b"
return c_string | f5caa33850d50e6d3c98532d77da5a740a255d10 | 25,821 |
import requests
def get_uniprot(accession,
uniprot_url='https://www.uniprot.org/uniprot/{0}.txt'):
"""
Retrieve Uniprot Annotation file from Uniprot ID e.g. Q15858
"""
try:
results = requests.get(
uniprot_url.format(accession.strip()), allow_redirects=True
)
except ValueError:
raise ValueError('no Uniprot results retrieved for {0}'.format(accession))
if results:
return results.content.decode("utf-8")
else:
raise ValueError('no Uniprot results retrieved for {0}'.format(accession)) | 852d801c7110e14d0e33d188828bd4c839194589 | 25,835 |
import json
import io
def parse_header_json(
header,
key_mod=lambda x: x,
value_mod=lambda x: x
):
"""
Parse an HTTP header returning a dict where the headers are the keys and
the values are the values
Parameters
----------
header : str or list
HTTP header to parse, either as a string containing json or as a list
of dicts with 'h' and 'v' keys
key_mod : callable, optional
Function mapping str to str that modifies the header names
value_mod : callable, optional
Function mapping str to str that modifies the header values
Returns
-------
data : dict
dict with header names as keys and header values as values
"""
if not isinstance(header, str):
raise ValueError("header has type '%s'- expected str" % type(header))
try:
header_json_parsed = json.load(io.StringIO(header))
except ValueError:
return None
return {
key_mod(_header['h']): value_mod(_header['v'])
for _header in header_json_parsed
} | 2c6f80a21150f74ce864715dd1b24e48faf74333 | 25,838 |
def in_3d_box(box, coords):
"""
Check if point is in a box
Args:
box (tuple): ((x0, x1), (y0, y1), (z0, z1)).
coords (tuple): (x, y, z).
Returns
bool
"""
cx = coords[0] >= box[0][0] and coords[0] <= box[0][1]
cy = coords[1] >= box[1][0] and coords[1] <= box[1][1]
cz = coords[2] >= box[2][0] and coords[2] <= box[2][1]
return cx and cy and cz | 4580e67c89b02565b0ac4d1b5c1d11dd5396f74a | 25,840 |
def _jwt_decode_handler_no_defaults(token): # pylint: disable=unused-argument
"""
Accepts anything as a token and returns a fake JWT payload with no defaults.
"""
return {} | 0d24d14c41ad427cfbfb7796b64ca6d2fb8830a6 | 25,841 |
import time
def timestamp_decorator(func):
"""Decorator that stamps the time a function takes to execute."""
def wrapper(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print(f' Finished in {end-start:.3} secs')
return wrapper | c517b56646e0ab6f0f89effb61d68373a36327f7 | 25,844 |
def bool_not(x):
"""Implement `bool_not`."""
return not x | b97c8ee15bf48729b9110f5d69ad760e3cd611c2 | 25,851 |
def hexStr2Bytes(hexStr: str) -> bytes:
"""
Convert an hexadecimal string in bytes
:param hexStr: The hexadecimal string
:type hexStr: str
:return: The bytes of the hexadecimal string
:rtype: bytes
"""
return bytes.fromhex(hexStr) | 720ae814a2252db7497fc5850eb02a262b32aa0c | 25,856 |
def convert_window_size(ws):
"""
This function converts the shorthand input window size
and returns an integer of the same value (i.e. "100kb" == int(100000))
Args:
ws: window size (bp/kb/mb)
Returns: Integer of window size
"""
window_size = None
if "bp" in ws:
window_size = int(ws.strip("bp"))*100
elif "kb" in ws:
window_size = int(ws.strip("kb"))*1000
elif "mb" in ws:
window_size = int(ws.strip("mb"))*10000
return window_size | c6c93cc78ec260862fbe1e91dddaf74394aa58ea | 25,858 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.