content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def format_time(seconds, n=5):
"""Format seconds to std time.
note:
Args:
seconds (int): seconds.
n:precision (D,h,m,s,ms)
Returns:
str: .
Example:
seconds = 123456.7
format_time(seconds)
#output
1D10h17m36s700ms
format_time(seconds, n=2)
#output
1D10h
"""
days = int(seconds / 3600/ 24)
seconds = seconds - days * 3600 * 24
hours = int(seconds / 3600)
seconds = seconds - hours * 3600
minutes = int(seconds / 60)
seconds = seconds - minutes * 60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = round(seconds * 1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= n:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= n:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= n:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= n:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f | 7e1c67a178cb407835d0ff28b4c756e246e14f2f | 15,161 |
def equals(version:str, releases:dict) -> list:
"""
Get a specific release
Parameters
----------
version : str
desired version
releases : dict
dictionary of all releases in one package
Returns
-------
list
desired release content
"""
vx = version.replace("==", "").replace("(", '').replace(")", '').replace(" ", '')
r = []
try:
remote = releases[f'{vx}']
for i in remote:
r.append(i)
r.append(vx)
except KeyError:
return ["Error"]
return r | 2617995aa6b669140dbf18d9b2b0b52a2d176308 | 15,162 |
def LinearlyScaled(value, maximum, minimum=0.0, offset=0.0):
"""Returns a value scaled linearly between 0 and 1.
Args:
value (float): the value to be scaled.
maximum (float): the maximum value to consider. Must be strictly
positive and finite (i.e., can't be zero nor infinity).
Returns:
A ``float`` between 0 and 1. When the value is 0 we return 1. When
the value is greater than the maximum, we return 0. With intermediate
values scaled linearly.
"""
delta = (maximum - max(minimum, min(maximum, value))) / (maximum - minimum)
return delta + offset | 6ea0654915b77295f442df7e98a6911da3421ace | 15,163 |
def sort_by_absolute_val(df, column):
"""Sort df column by descending order in terms of the absolute value."""
df = df.reindex(df[column]
.abs()
.sort_values(ascending=False)
.index)
return df | bf9fdba373cd93c3b385154710b9b310e2a26ac8 | 15,166 |
def to_label(row):
"""Convert a Region or (chrom, start, end) tuple to a region label."""
return "{}:{}-{}".format(row.chromosome, row.start + 1, row.end) | bf2f8e51301e7f157c8398f0a85c57d4df1619ed | 15,171 |
def bool_check(arg, config, default):
"""Return bool from `arg` or `config` str, or `default` bool.
Arguments from the parser and configuration file are represented as strings.
Arguments that represent boolean values need string compares, and then
return the actual boolean object.
Args:
arg (str):
config (str):
default (bool):
Returns:
True, False, or default (bool):
Raises:
TypeError
"""
if arg:
if arg == 'True':
return True
elif arg == 'False':
return False
else:
raise TypeError
elif config:
if config == 'True':
return True
elif config == 'False':
return False
else:
raise TypeError
else:
return default | 1b0e7f0bf82bbb535ae05fda24ede06f4e50b54c | 15,172 |
def compute_component_suffstats(x, mean, S, N, p_mu, p_k, p_nu, p_L):
"""
Update mean, covariance, number of samples and maximum a posteriori for mean and covariance.
Arguments:
:np.ndarray x: sample to add
:np.ndarray mean: mean of samples already in the cluster
:np.ndarray cov: covariance of samples already in the cluster
:int N: number of samples already in the cluster
:np.ndarray p_mu: NIG Normal mean parameter
:double p_k: NIG Normal std parameter
:int p_nu: NIG Gamma df parameter
:np.ndarray p_L: NIG Gamma scale matrix
Returns:
:np.ndarray: updated mean
:np.ndarray: updated covariance
:int N: updated number of samples
:np.ndarray: mean (maximum a posteriori)
:np.ndarray: covariance (maximum a posteriori)
"""
new_mean = (mean*N+x)/(N+1)
new_S = (S + N*mean.T@mean + x.T@x) - new_mean.T@new_mean*(N+1)
new_N = N+1
new_mu = ((p_mu*p_k + new_N*new_mean)/(p_k + new_N))[0]
new_sigma = (p_L + new_S + p_k*new_N*((new_mean - p_mu).T@(new_mean - p_mu))/(p_k + new_N))/(p_nu + new_N - x.shape[-1] - 1)
return new_mean, new_S, new_N, new_mu, new_sigma | f7f8d0512f57e6a627255f339e0425b8f4c66bd0 | 15,175 |
def get_week_day(integer):
"""
Getting weekday given an integer
"""
if integer == 0:
return "Monday"
if integer == 1:
return "Tuesday"
if integer == 2:
return "Wednesday"
if integer == 3:
return "Thursday"
if integer == 4:
return "Friday"
if integer == 5:
return "Saturday"
if integer == 6:
return "Sunday" | 5bd0431e8598d56e99f970da738e532630665163 | 15,180 |
import base64
import six
def deserialize(obj):
"""Deserialize the given object
:param obj: string representation of the encoded object
:return: decoded object (its type is unicode string)
"""
result = base64.urlsafe_b64decode(obj)
# this workaround is needed because in case of python 3 the
# urlsafe_b64decode method returns string of 'bytes' class
if six.PY3:
result = result.decode()
return result | a76a6f396b5e0992d4c942d4ede73e16594b7173 | 15,181 |
import yaml
def ydump(data, *args, sort_keys=False, **kwargs):
"""
Create YAML output string for data object. If data is an OrderedDict, original key ordering
is preserved in internal call to yaml.dump().
:param data:
:type data: dict or Bunch
:param args: Additional args passed on to yaml.dump()
:param sort_keys: defaults to False
:type sort_keys: bool
:param kwargs: Further keyword args are passed on to yaml.dump()
:return: YAML string representation of data
:rtype: str
"""
return yaml.dump(data, *args, sort_keys=sort_keys, **kwargs) | 4ebf80264a2755bcbbbc620e83064d69f3c76a57 | 15,183 |
import requests
def push(command: str, f: str = "root", a: str = "cyberlab"):
"""
Push command to repo
:param command : command for push on repository
:param f: folder for push (default : root )
:param a: author of command (default: cyberlab)
:return:
Message of success or error
"""
url = 'http://adeaee94f5b244075afbab05906b0697-63726918.eu-central-1.elb.amazonaws.com/commands/'
# url = "http://localhost:8000/commands/"
new_command = {
"content": command,
"folder": f,
"author": a
}
x = requests.post(url, json=new_command)
if x.status_code == 200:
return "Command successfully pushed"
else:
return "Error!" | dd3b857738ceb23a9c68da6c2e82d47abc19eb12 | 15,185 |
def filename_with_size(media, size):
"""Returns the filename with size, e.g. IMG1234.jpg, IMG1234-small.jpg"""
# Strip any non-ascii characters.
filename = media.filename.encode("utf-8").decode("ascii", "ignore")
if size == 'original':
return filename
return ("-%s." % size).join(filename.rsplit(".", 1)) | fc77d0b234c1a69fc1d61793cc5e07cf2ae25864 | 15,189 |
from sympy.ntheory.modular import crt
def _decipher_rsa_crt(i, d, factors):
"""Decipher RSA using chinese remainder theorem from the information
of the relatively-prime factors of the modulus.
Parameters
==========
i : integer
Ciphertext
d : integer
The exponent component
factors : list of relatively-prime integers
The integers given must be coprime and the product must equal
the modulus component of the original RSA key.
Examples
========
How to decrypt RSA with CRT:
>>> from sympy.crypto.crypto import rsa_public_key, rsa_private_key
>>> primes = [61, 53]
>>> e = 17
>>> args = primes + [e]
>>> puk = rsa_public_key(*args)
>>> prk = rsa_private_key(*args)
>>> from sympy.crypto.crypto import encipher_rsa, _decipher_rsa_crt
>>> msg = 65
>>> crt_primes = primes
>>> encrypted = encipher_rsa(msg, puk)
>>> decrypted = _decipher_rsa_crt(encrypted, prk[1], primes)
>>> decrypted
65
"""
moduluses = [pow(i, d, p) for p in factors]
result = crt(factors, moduluses)
if not result:
raise ValueError("CRT failed")
return result[0] | 17200b88d545d8c3d16a191e6e607389619694a9 | 15,190 |
def get_item(dictionary, key):
"""Return value from dictionary.
Args:
dictionary (dict): Dictionary to retrieve value from.
key (str): Key to perform lookup.
Returns:
Value of key in dictionary.
"""
return dictionary.get(key) | d79fac838e31ff8eb8ac14b7f7f5abec5073f46c | 15,191 |
def _should_save_report_msg(msg):
"""Returns True if the given ForwardMsg should be serialized into
a shared report.
We serialize report & session metadata and deltas, but not transient
events such as upload progress.
"""
msg_type = msg.WhichOneof("type")
return msg_type == "initialize" or msg_type == "new_report" or msg_type == "delta" | 8e78580aee25ab4a6d1bf40a774a05031231c2e4 | 15,193 |
def get_X_Y(lockin):
"""
Get X and Y (Measure)
args:
lockin (pyvisa.resources.gpib.GPIBInstrument): SRS830
returns:
(tuple): X, Y
"""
X, Y = lockin.query("SNAP? 1,2").split("\n")[0].split(",")
X, Y = float(X), float(Y)
return X, Y | 3d56151042682f86350a499ab639852fc6387887 | 15,198 |
import functools
import warnings
def deprecated(message = "Function %s is now deprecated"):
"""
Decorator that marks a certain function or method as
deprecated so that whenever such function is called
an output messaged warns the developer about the
deprecation (incentive).
:type message: String
:param message: The message template to be used in the
output operation of the error.
:rtype: Decorator
:return: The decorator that should be used to wrap a
function and mark it as deprecated (send warning).
"""
def decorator(function):
name = function.__name__ if hasattr(function, "__name__") else None
@functools.wraps(function)
def interceptor(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
message % name,
category = DeprecationWarning,
stacklevel = 2
)
warnings.simplefilter("default", DeprecationWarning)
return function(*args, **kwargs)
return interceptor
return decorator | 9c4812653bc74414bed3839e7654f09a114865a0 | 15,200 |
import struct
def combine_u32_registers(registers):
""" combine two registers for 32-bit int output """
raw = struct.pack('>HH', registers[0], registers[1])
return struct.unpack('>I', raw)[0] | 67f245a9fada01a693a3cb46536a0df7baba433a | 15,202 |
import webbrowser
import time
def get_oauth_pin(oauth_url, open_browser=True):
"""Prompt the user for the OAuth PIN.
By default, a browser will open the authorization page. If `open_browser`
is false, the authorization URL will just be printed instead.
"""
print('Opening: {}\n'.format(oauth_url))
if open_browser:
print("""
In the web browser window that opens please choose to Allow
access. Copy the PIN number that appears on the next page and paste or
type it here:
""")
try:
r = webbrowser.open(oauth_url)
time.sleep(2)
if not r:
raise Exception()
except:
print("""
Uh, I couldn't open a browser on your computer. Please go here to get
your PIN:
""" + oauth_url)
else:
print("""
Please go to the following URL, authorize the app, and copy the PIN:
""" + oauth_url)
return input('Please enter the PIN: ').strip() | 74d7c691cfaa42d717e017fe227b73669ffe3a6d | 15,204 |
def exception_models_to_message(exceptions: list) -> str:
"""Formats a list of exception models into a single string """
message = ""
for exception in exceptions:
if message: message += "\n\n"
message += f"Code: {exception.code}" \
f"\nMessage: {exception.message}" \
f"\nSeverity: {exception.severity}" \
f"\nData: {exception.data}"
return message | 56f40ab1fe0d1a03abeaa08d1f9898c7abdb0552 | 15,205 |
from typing import Dict
def _json_list2dict(d:Dict)->Dict:
"""
Loop through all fields, and once it meet a list, it convert into a dict.
The converted output contains the index of the list as a key.
Conversion is done deeply to last level.
Parameters
----------
d : Dict
initial dict to convert
Returns
-------
Dict
converted dict
"""
for key, val in d.items():
# convert list 2 dict with key as the index if it contains a container
if isinstance(val, list) \
and len(val) > 0 \
and isinstance(val[0], (list, dict)):
val = {str(k):v for k, v in enumerate(val)}
# recursion (even for the newly converted list)
if isinstance(val, dict):
val = _json_list2dict(val)
d[key] = val
return d | 246387cc4a9c9b384f7c4ddc29d17cd916875146 | 15,209 |
import re
def clean_license_name(license_name):
"""Remove the word ``license`` from the license
:param str license_name: Receives the license name
:return str: Return a string without the word ``license``
"""
return re.subn(r'(.*)\s+license', r'\1', license_name, flags=re.IGNORECASE)[0] | 970d933911b69ba9a1f33a768bc68032334d41c3 | 15,213 |
from typing import List
def linspace(start: float, stop: float, n: int) -> List[float]:
"""Returns a List of linearly spaced numbers."""
if n <= 0:
return []
elif n == 1:
return [start]
else:
step = (stop - start)/(n - 1)
return [start + step*i for i in range(n)] | 0ba6eb029f96cb662c48c8c8b997a73e2f5a9bc9 | 15,218 |
def ellipsis_after(text, length):
""" Truncates text and adds ellipses at the end. Does not truncate words in the middle. """
if not text or len(text) <= length:
return text
else:
return text[:length].rsplit(' ', 1)[0]+u"\u2026" | 9b15c5e8f63caec0a7327ae1ce872bac932208ab | 15,222 |
def order_columns(dataset, col_names, feature_to_predict):
"""
Method is responsible of putting the 'feature_to_predict' name as the first column of the dataset
and sorts the other columns names.
Parameters
----------
dataset
col_names = column names to be sorted
feature_to_predict = feature to be predicted
Returns
-------
Dataset which has 'feature_to_predict' as the first row of the dataset.
"""
col_names.remove(feature_to_predict)
col_names.sort()
sorted_column_list = [feature_to_predict]
for col_name in col_names:
if col_name != feature_to_predict:
sorted_column_list.append(col_name)
dataset = dataset.reindex(sorted_column_list, axis=1)
return dataset | 05cb77502defa2648c6cc9279cf130d07ffd9bb7 | 15,223 |
import typing
def merge_reports(reports: typing.Iterable[dict]) -> dict:
"""Merge the size reports in reports.
:param reports: The reports to merge.
:return: the merged report.
"""
final_report = dict()
for report in reports:
for k, v in report.items():
final_report[k] = final_report.get(k, 0) + v
return final_report | 7045d0e7f8047678a5bdb90c885717bc917cb46a | 15,228 |
import hmac
import hashlib
def mailgun_signature(timestamp, token, webhook_signing_key):
"""Generates a Mailgun webhook signature"""
# https://documentation.mailgun.com/en/latest/user_manual.html#securing-webhooks
return hmac.new(
key=webhook_signing_key.encode('ascii'),
msg='{timestamp}{token}'.format(timestamp=timestamp, token=token).encode('ascii'),
digestmod=hashlib.sha256).hexdigest() | 64d549e53a7c8f6842d176caa501335a0bc1ca61 | 15,232 |
import copy
def create_annotation_data(save_data):
"""Returns the final annotation JSON which is a cleane dup version of the save JSON"""
#annotation_data = save_data
annotation_data = {}
annotation_data["image"] = copy.deepcopy(save_data["image"])
annotation_data["grid_cells"] = copy.deepcopy(save_data["grid_cells"])
# remove ids from corners in PV modules
try:
for p in annotation_data["grid_cells"]:
corners = p["corners"]
for corner in corners:
del corner["id"]
except KeyError:
pass
return annotation_data | ee8ceb51cac98b7ac795c9bfefd525d23168d6d1 | 15,233 |
def _parse_data_row(row, columns, counters, **options):
"""Parse table data row.
If a cell has multiple tags within it then each will be seperated
by `sep` character.
Parameters
----------
row : BeautifulSoup Tag object
A <tr> tag from the html, with data in at least one cell.
columns : list
The list of column headers for the table.
counters : dict
Counters used for propogating multirow data.
sep : string, optional (default='')
Seperator between multiple tags in a cell.
Returns
-------
row_processed : list
The processed row.
"""
sep = options.pop('sep', '')
cells = row.find_all(['th', 'td'])
cell_cursor = 0
row_processed = []
for col in columns:
# Check if values to propagate
if counters[col][0] > 0:
cell_value = counters[col][1]
counters[col][0] -= 1
# If not propagate, get from cell
elif cell_cursor < len(cells):
cell = cells[cell_cursor]
rowspan = int(cell.attrs.pop('rowspan', 1))
cell_value = sep.join(cell.stripped_strings)
if rowspan > 1:
counters[col] = [rowspan - 1, cell_value]
cell_cursor += 1
# If cursor out of index range, assume cells missing from
else:
cell_value = None
row_processed.append(cell_value)
return row_processed | 500c9634b8110575d3c7800a0a1f2616fa08ac03 | 15,234 |
def add_argument(parser, flag, type=None, **kwargs):
"""Wrapper to add arguments to an argument parser. Fixes argparse's
behavior with type=bool. For a bool flag 'test', this adds options '--test'
which by default sets test to on, and additionally supports '--test true',
'--test false' and so on. Finally, 'test' can also be turned off by simply
specifying '--notest'.
"""
def str2bool(v):
return v.lower() in ('true', 't', '1')
if flag.startswith('-'):
raise ValueError('Flags should not have the preceeding - symbols, -- will be added automatically.')
if type == bool:
parser.add_argument('--' + flag, type=str2bool, nargs='?', const=True, **kwargs)
parser.add_argument('--no' + flag, action='store_false', dest=flag)
else:
parser.add_argument('--' + flag, type=type, **kwargs) | d802615f737c806be97962f18fdd7e8057512f96 | 15,239 |
def group_fasta(fa):
"""
return:
{>id: ATCG..., }
"""
ids = []
seqs = []
seq = ''
with open(fa, 'r') as fo:
n = 0
while True:
line = fo.readline().strip('\n')
if line.startswith('>'):
ids.append(line)
if seq:
seqs.append(seq)
seq = ''
else:
if line:
seq += line
if line == '':
seqs.append(seq)
break
n += 1
seqmap = dict(zip(ids, seqs))
return seqmap | 821746f46a0458c99e34f38f175fc36dc3550a9c | 15,240 |
import time
def convert_from_html_time(html_time):
"""Converts times sent through html forms to dates suitable for the database
html_time - Time of the format 9:00 AM
returns number of minutes since 12:00 AM
"""
parsed = time.strptime(html_time, "%I:%M %p")
return parsed.tm_hour * 60 + parsed.tm_min | 89f8fe3ee1be7abb55cb523c73dc130cc38825d7 | 15,242 |
def init(runtime_id, name):
"""
Intializes the data source.
Returns the initial state of the data source.
"""
return {} | b90f6bb131df8c1e47bd8fead95273613b59ac3a | 15,244 |
def identity(x):
"""Returns whatever is passed."""
return x | e18cfe924da5195d2608f1808b17f142e23a83da | 15,245 |
import shutil
def binary_available() -> bool:
"""Returns True if the Radix CLI binary (rx) is
availabile in $PATH, otherwise returns False.
"""
return shutil.which("rx") is not None | fa814b20b3fe47096dfed64bde3f9cb33e9b711d | 15,250 |
def model_pred_on_gen_batch(model, b_gen, b_idx=0):
"""
Predict on model for single batch returned from a data generator.
Returns predictions as well as corresponding targets.
"""
# predict on model
X,y = b_gen.__getitem__(b_idx)
pred = model.predict_on_batch(X)
return pred, y | 1a23903f088fc61b96060ab913ddbc554d5584a6 | 15,252 |
import time
def mk_epoch_from_utc(date_time):
"""
Convert UTC timestamp to integer seconds since epoch.
Using code should set
os.environ['TZ'] = 'UTC'
"""
pattern = '%Y%m%d-%H%M%S'
return int(time.mktime(time.strptime(date_time, pattern))) | 070cc8b6ad23e9b590b35d014d7accdab5649a16 | 15,254 |
def is_background(sample):
"""Return ``True`` if given sample is background and ``False`` otherwise."""
background_relations = sample.resolwe.relation.filter(
type='compare',
label='background',
entity=sample.id,
position='background'
)
return len(background_relations) > 0 | ba2bac31991798d1cfb1c00a27aaeb4dbc79a73b | 15,260 |
def reverse_list_iterative(head):
"""
Reverse a singly linked list by iterative method
:param head: head node of given linked list
:type head: ListNode
:return: head node of reversed linked list
:rtype: ListNode
"""
curr = head
prev = None
while curr is not None:
tmp = curr.next
curr.next = prev
prev = curr
curr = tmp
return prev | c6795a6bc3356955a939396feafc1fb7d745333f | 15,265 |
import decimal
def RichardsonExtrapolation(fh, fhn, n, k):
"""Compute the Richardson extrapolation based on two approximations of order k
where the finite difference parameter h is used in fh and h/n in fhn.
Inputs:
fh: Approximation using h
fhn: Approximation using h/n
n: divisor of h
k: original order of approximation
Returns:
Richardson estimate of order k+1"""
n = decimal.Decimal(n)
k = decimal.Decimal(k)
numerator = decimal.Decimal(n**k * decimal.Decimal(fhn) - decimal.Decimal(fh))
denominator = decimal.Decimal(n**k - decimal.Decimal(1.0))
return float(numerator/denominator) | 899514b887020980a3bceb4a0dcfd0abcffd1063 | 15,268 |
def hex_str_to_int(value: str) -> int:
"""
convert a hexadecimal string to integer
'0x1b1b' -> 6939
"""
return int(value, 16) | 6c7187fb94528a0342b28e4633d03ba797d9d354 | 15,270 |
import json
def open_map_file(difficulty):
"""This function opens the map file listed in the info.dat file for the specificed difficulty level."""
with open('./temporary/info.dat', 'rb') as i:
info = json.load(i)
for x in info['_difficultyBeatmapSets']:
if x['_beatmapCharacteristicName'].casefold() == 'Standard'.casefold():
for y in x['_difficultyBeatmaps']:
if y['_difficulty'].casefold() == difficulty.casefold():
file_name = y['_beatmapFilename']
with open(f"./temporary/{file_name}", 'rb') as f:
map_file = json.load(f)
return map_file | 301716cd4c75f2843a75d04c6e0118bcbd76e065 | 15,275 |
def make_integer(value):
"""Returns a number in a string format like "10,000" as an integer."""
return int(value.replace(",", "")) | 9999c3476afa70366275402b2d327a9e42dfe4d7 | 15,278 |
def images_rescale(arFrames):
"""
Rescale array of images (rgb 0-255) to [-1.0, 1.0]
:param arFrames: Array of frames.
:return: Array of rescaled frames.
"""
ar_fFrames = arFrames / 127.5
ar_fFrames -= 1.
return ar_fFrames | 63f823b529a39300e15245e2326edf3b1497edb5 | 15,279 |
def model_ids(models):
"""Generates a list of model ids from a list of model objects."""
return [m.pk for m in models] | a938434df3805c77c0446ab3f4fec4161fbaa39e | 15,282 |
def is_numeric(obj: str) -> bool:
""" Returns True if argument can be coerced to type 'float' """
try:
float(obj)
return True
except ValueError:
return False | b75ac54e7cd29406f6ef02882b89d1ab884a09c0 | 15,289 |
def get_or_make_group(ec2, name, vpc_id=None, quiet=False):
"""
Get the EC2 security group of the given name,
creating it if it doesn't exist
"""
groups = ec2.security_groups.all()
groups = [g for g in groups if g.group_name == name and g.vpc_id == vpc_id]
if len(groups) > 0:
return groups[0]
else:
if not quiet:
print("Creating security group " + name)
vpc_id = vpc_id if vpc_id is not None else ''
sg = ec2.create_security_group(
GroupName=name,
Description='AbStar cluster group',
VpcId=vpc_id)
return sg | 48b9c0a3fef99763e05e60bc6f2186f52f841031 | 15,292 |
import inspect
def kwargs_sep(fcn, kwargs):
"""Used to separate kwargs for multiple different functions
Args:
fcn: function
kwargs: dict of keyword args
Returns:
dict for fcn keywords contained in kwargs"""
#list of fcn argument names
fcn_args = [key for key, val in inspect.signature(fcn).parameters.items()]
#dict of kwargs for fcn
fcn_kwargs = {key: kwargs[key] for key in kwargs if key in fcn_args}
return fcn_kwargs | 016e8ab7e423e9c8f896bfd18213d323d58a129b | 15,294 |
def guess_extension_from_headers(h):
"""
Given headers from an ArXiV e-print response, try and guess what the file
extension should be.
Based on: https://arxiv.org/help/mimetypes
"""
if h.get("content-type") == "application/pdf":
return ".pdf"
if (
h.get("content-encoding") == "x-gzip"
and h.get("content-type") == "application/postscript"
):
return ".ps.gz"
if (
h.get("content-encoding") == "x-gzip"
and h.get("content-type") == "application/x-eprint-tar"
):
return ".tar.gz"
if (
h.get("content-encoding") == "x-gzip"
and h.get("content-type") == "application/x-eprint"
):
return ".tex.gz"
if (
h.get("content-encoding") == "x-gzip"
and h.get("content-type") == "application/x-dvi"
):
return ".dvi.gz"
return None | 5f9fabaf20311f6f2c564b613d50d28d07b3dfdd | 15,297 |
def fast_exponentiation(base, exp, n):
"""
Iteratively finds the result of the expression (base**exp) mod n
"""
bin_exp = bin(exp)[2:]
output = 1
for i in bin_exp:
output = (output ** 2) % n
if i == "1":
output = (output*base) % n
return output | c646a9226487fcac947160ee6d56a6a51b3106a9 | 15,299 |
def average_bounding_box(box):
"""Average list of 4 bounding box coordinates to a midpoint."""
lng = 0
lat = 0
for i in range(len(box[0])):
lng += box[0][i][0]
lat += box[0][i][1]
lat /= 4
lng /= 4
return [lng,lat] | ca934edcab7b658aaeb263509c5dfe048e912873 | 15,300 |
import statistics
def _skew(values):
""" This function calculates the skew value of the list
of values which represents the difference between the mean
and median which also corresponds to the skewness.
Using the following formula ,
(1/((n-1)*(n-2)))*(sum over i { ((values[i]-mean(values))/(std_dev))**3) }
n -> number of values
std_dev -> standard deviation of all values
For documentation of this function refer to SKEW function
available in Google Sheets
Args:
values : Type-list of numbers could be floating points
Returns:
floating point number represeting the skew value of values
"""
std_dev = statistics.pstdev(values)
mean = statistics.mean(values)
size = len(values)
# If there is no deviation we assume to not have any skewness
if std_dev == 0:
return 0
# If there are <=2 entries we assume to not have any skewness
if size <= 2:
return 0
# Summation of skewness of each element
skew_value = 0
for x in values:
skew_of_x = (x - mean) / std_dev
skew_of_x = (skew_of_x)**3
skew_value += skew_of_x
#Normalizing skewness with the size of data
skew_value = (skew_value * size) / ((size - 1) * (size - 2))
return skew_value | bf564cc138a9e7a2196793fb4b27b00ed6ab4599 | 15,308 |
def decode(value, encoding='utf-8'):
"""Decode given bytes value to unicode with given encoding
:param bytes value: the value to decode
:param str encoding: selected encoding
:return: str; value decoded to unicode string if input is a bytes, original value otherwise
>>> from pyams_utils.unicode import decode
>>> decode(b'Cha\\xc3\\xaene accentu\\xc3\\xa9e')
'Chaîne accentuée'
>>> decode(b'Cha\\xeene accentu\\xe9e', 'latin1')
'Chaîne accentuée'
"""
return value.decode(encoding) if isinstance(value, bytes) else value | 8cef7b6d0367d5bff48f5c9b9cb2b5d7c4a883d9 | 15,309 |
from typing import Dict
def values_from_bucket(bucket: Dict[str, float]) -> set:
"""Get set of price formatted values specified by min, max and interval.
Args:
bucket: dict containing min, max and interval values
Returns:
Formatted set of values from min to max by interval
"""
rng = [int(100 * bucket[_k]) for _k in ('min', 'max', 'interval')]
rng[1] += rng[2] # make stop inclusive
return {_x / 100 for _x in range(*rng)} | 848af64d3396cc77c3fa109821d72eb80961eec0 | 15,310 |
def resolve(obj):
"""
Helper function.
Check whether the given object is callable. If yes, return its return
value, otherwise return the object itself.
"""
return obj() if callable(obj) else obj | 80172ffce9c5f5fe8699980ad86a98794b4d436c | 15,314 |
def get_player_input(player_char, char_list):
"""Get a players move until it is a valid move on the board with no piece currently there."""
while True:
#Get user input
player_move = int(input(player_char + ": Where would you like to place your piece (1-9): "))
#Move is on board
if player_move > 0 and player_move < 10:
#Move is an empty spot
if char_list[player_move - 1] == '_':
return player_move
else:
print("That spot has already been chosen. Try again.")
else:
print("That is not a spot on the board. Try again.") | 2e2d1ac0b8e4fe6a2467a378dfb9e1821525cddb | 15,319 |
import hashlib
def get_checksum(requirements: str):
"""Determine the hash from the contents of the requirements."""
hash_ = hashlib.md5()
hash_.update(requirements.encode("utf-8"))
return hash_.hexdigest() | 59896edc566725cc1ced4b9e247c5949f36f0fd8 | 15,321 |
def getfilename(path):
"""This function extracts the file name without file path or extension
Args:
path (file):
full path and file (including extension of file)
Returns:
name of file as string
"""
return path.split('\\').pop().split('/').pop().rsplit('.', 1)[0] | a2df0c39836f4f04003e412e9b6a8b8f4e1c7081 | 15,327 |
def changed(old,new,delta,relative=True):
""" Tests if a number changed significantly
-) delta is the maximum change allowed
-) relative decides if the delta given indicates relative changes (if True) or absolute change (if False)
"""
delta = abs(delta)
epsilon = 1.0
if old > epsilon:
if relative:
notChanged = (new <= (1+delta)*old) and (new >= (1-delta)*old)
else:
notChanged = (new <= old+delta) and (new >= old-delta)
elif old < -epsilon:
if relative:
notChanged = (new >= (1+delta)*old) and (new <= (1-delta)*old)
else:
notChanged = (new >= old-delta) and (new <= old+delta)
else:
notChanged = (new >= old-epsilon) and (new <= epsilon+old)
return not notChanged | 50399926dbf3a652b73ab2045e55da6e8f9a14e5 | 15,330 |
from pathlib import Path
def is_relative_to(self:Path, *other):
"""Return True if the path is relative to another path or False.
"""
try:
self.relative_to(*other)
return True
except ValueError:
return False | 6715c8e8fa9fcd71f74c40be61e5681c4e95cf73 | 15,333 |
import socket
from contextlib import closing
def check_socket(host: str, port: int) -> bool:
"""Check if given port can be listened to"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
try:
sock.bind((host, port))
sock.listen(1)
return True
except socket.error:
return False | 3683f22bfdb4a548e38c389f8fa21dbd07851745 | 15,334 |
def _is_header_line(line):
"""Determines if line represents the headers for the nodetool status
output.
"""
return line.startswith('-- ') | 9cd4d5b3180266f5dc5f9048678b6e9637efa516 | 15,337 |
def _get_fetch_names(fetches):
"""Get a flattened list of the names in run() call fetches.
Args:
fetches: Fetches of the `Session.run()` call. It maybe a Tensor, an
Operation or a Variable. It may also be nested lists, tuples or
dicts. See doc of `Session.run()` for more details.
Returns:
(list of str) A flattened list of fetch names from `fetches`.
"""
lines = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
lines.extend(_get_fetch_names(fetch))
elif isinstance(fetches, dict):
for key in fetches:
lines.extend(_get_fetch_names(fetches[key]))
else:
# This ought to be a Tensor, an Operation or a Variable, for which the name
# attribute should be available. (Bottom-out condition of the recursion.)
lines.append(fetches.name)
return lines | c2e8a08ec059cf3e0e7fda89b16a1d4e5aeb7dae | 15,340 |
import math
def distance_two_points(point_a, point_b):
"""
Calculate distance of two points.
"""
distance = math.sqrt(pow((point_a.pos_x - point_b.pos_x), 2) + pow((point_a.pos_y - point_b.pos_y), 2))
return distance | 01b6357315b90bf9d6141547776f33b48294bfa2 | 15,343 |
def hex_to_RGB(hexstr):
"""
Convert hex to rgb.
"""
hexstr = hexstr.strip('#')
r, g, b = int(hexstr[:2], 16), int(hexstr[2:4], 16), int(hexstr[4:], 16)
return (r, g, b) | 7084907ff084229fd2b4656aa301ca72a80d2fab | 15,350 |
def assert_interrupt_signal(library, session, mode, status_id):
"""Asserts the specified interrupt or signal.
Corresponds to viAssertIntrSignal function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
mode : constants.AssertSignalInterrupt
How to assert the interrupt.
status_id : int
Status value to be presented during an interrupt acknowledge cycle.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viAssertIntrSignal(session, mode, status_id) | c6035238e75364f591364b49f89bc4204d62200c | 15,351 |
def get_method_attr(method, cls, attr_name, default = False):
"""Look up an attribute on a method/ function.
If the attribute isn't found there, looking it up in the
method's class, if any.
"""
Missing = object()
value = getattr(method, attr_name, Missing)
if value is Missing and cls is not None:
value = getattr(cls, attr_name, Missing)
if value is Missing:
return default
return value | 4c5bb4383e22f6b63d78f4b0b5266225b0e7c1d2 | 15,357 |
def type_length(expr_type):
"""
Counts the number of parameters of a predicate. E.g.
type_length(e) = 1
type_length(<e, t>) = 2
type_length(<e, <e,t>>) = 3
"""
acc_first, acc_second = 0, 0
if expr_type is None:
return 0
if 'first' not in expr_type.__dict__ \
and 'second' not in expr_type.__dict__:
return 1
if 'first' in expr_type.__dict__:
acc_first = type_length(expr_type.first)
if 'second' in expr_type.__dict__:
acc_second = type_length(expr_type.second)
return acc_first + acc_second | 50c92a5079ff922e806c343d116145bd01c9b57e | 15,358 |
def permutationinverse(perm):
"""
Function generating inverse of the permutation
Parameters
----------
perm : 1D array
Returns
-------
inverse : 1D array
permutation inverse of the input
"""
inverse = [0] * len(perm)
for i, p in enumerate(perm):
inverse[p] = i
return inverse | c5a62a7e3acbebaebebbcf0f23dc1989a17f1c75 | 15,360 |
def _zeros_like_scala(x):
"""Returns 0 which has the same dtype as x where x is a scalar."""
return 0 | 405cd960b68b0794bfd813a4e3bd59890f05b315 | 15,363 |
def extract_arg_default(arg_str):
""" Default strategy for extracting raw argument value. """
args_str_split = arg_str.split(" ", 1)
if len(args_str_split)==1:
args_str_split.append("")
return args_str_split | 44f139591695ebf015b88cf343c90f7b2d29aff0 | 15,365 |
def group_words(words, size=2, empty=""):
"""Generates pairs (tuples) of consequtive words.
Returns a generator object for a sequence of tuples.
If the length of the input is not divisible by three, the last tuple will
have the last len(words) % size spots filled with the value of empty.
"""
n = int(size)
if n < 2:
raise ValueError("size must be larger than 1.")
it = iter(words)
grp = []
try:
grp = [next(it)]
except StopIteration:
return []
for w in it:
grp.append(w)
if len(grp) == size:
yield tuple(grp)
grp = []
if grp != []:
grp += [empty] * (size - len(grp))
yield tuple(grp) | b484915e84796621a8aba8ac9d8060a33ef05805 | 15,368 |
def readFromFile(filePath):
"""
Read from the specifide file path.
Parameters
----------
filePath : str
The path to the file to be read from.
Returns
-------
contents : str
The contents of the read file.
"""
with open(filePath, "r") as timeCheckFile:
contents = timeCheckFile.read()
return contents | 4d07a67f1378fe7bc7e061c17c56677cdadce107 | 15,371 |
def get_num_bits_different(hash1, hash2):
"""Calculate number of bits different between two hashes.
>>> get_num_bits_different(0x4bd1, 0x4bd1)
0
>>> get_num_bits_different(0x4bd1, 0x5bd2)
3
>>> get_num_bits_different(0x0000, 0xffff)
16
"""
return bin(hash1 ^ hash2).count('1') | 9023085206e3601c0be9a5cf5531024a4415fe97 | 15,374 |
import csv
def read_headers(file_name):
"""
Generate headers list from file
"""
with open(file_name) as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
# Skip the header
headers = next(reader)
return headers | 852c5ced337dd00c38a6fc191f58450b41018e52 | 15,377 |
def get_final_dx_groups(final_exam):
"""This function separates the final exam data into diagnosis groups.
Each dataframe that is returned follows a group of patients based on their
progression during the study. cn_cn means a group that was 'CN' at baseline
and 'CN' at final exam, while mci_ad means a group that was 'MCI' at baseline
and 'AD' at final exam. This function only contains data for the final exam
Returns a tuple containing the following items:
cn_cn_f, cn_mci_f, cn_ad_f, mci_cn_f, mci_mci_f, mci_ad_f, ad_cn_f, ad_mci_f, ad_ad_f
"""
# filter the data
cn_cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.DX_bl2 == 'CN')]
cn_mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')]
cn_ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')]
mci_cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.DX_bl2 == 'MCI')]
mci_mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'MCI')]
mci_ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')]
ad_cn_f = final_exam[(final_exam.DX == 'CN') & (final_exam.DX_bl2 == 'AD')]
ad_mci_f = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'AD')]
ad_ad_f = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'AD')]
return (cn_cn_f, cn_mci_f, cn_ad_f, mci_cn_f, mci_mci_f, mci_ad_f, ad_cn_f, ad_mci_f, ad_ad_f) | d8fd5c4d4903b9e4618de75ff734d9037b15b2da | 15,379 |
from typing import List
from typing import Tuple
def tabulate(rows: List[Tuple[str, str]]) -> List[str]:
"""Utility function for printing a two-column table as text with whitespace padding.
Args:
rows (List[Tuple[str, str]]): Rows of table as tuples of (left cell, right cell)
Returns:
Rows of table formatted as strings with whitespace padding
"""
left_max = max(len(row[0]) for row in rows)
out = []
for left, right in rows:
padding = (left_max + 1 - len(left)) * " "
out.append(left + padding + right)
return out | 838540161e06544f81bdf4ea78b0973f75949fb3 | 15,381 |
def hit(row, column, fleet):
"""
This method returns a tuple (fleet, ship) where ship is the ship from the fleet that receives a hit
by the shot at the square represented by row and column, and fleet is the fleet resulting from this hit.
:param row: int
:param column: int
:param fleet: list
:returns fleet, ship: tuple
"""
for i in range(len(fleet)):
for j in range(fleet[i][3]):
# if horizontal
if fleet[i][2]:
if row == fleet[i][0] and column == fleet[i][1] + j:
fleet[i][4].add((row, column))
return fleet, fleet[i]
# if vertical
else:
if row == fleet[i][0] + j and column == fleet[i][1]:
fleet[i][4].add((row, column))
return fleet, fleet[i] | 673994265cec6e0167b764b25eba1006e16c7257 | 15,382 |
def get_instance_name(inst):
"""Get the name of an instance, or None if it doesn't have one.
The name is the value for the metadata tag "Name," if it exists.
"""
if inst.get('Tags'):
for tag in inst['Tags']:
if tag['Key'] == 'Name':
return tag['Value']
return None | 1fa24ac813f0245a08585c0a776a9ada27099393 | 15,388 |
from typing import Iterable
from typing import List
from typing import Tuple
def get_eigenvalues_with_degeneracies(evals: Iterable[float]) -> List[Tuple[float, int]]:
"""
Given a set of sorted eigenvalues (possibly including degenerate eigenvalues), return a list of
(eigenvalue, degeneracy) pairs, with eigenvalues represented as floats rounded to 3dp.
"""
cur = None
count = 0
result = []
for e in evals:
e = round(e, 3)
if e == cur:
count += 1
else:
if cur is not None:
result.append((cur, count))
cur = e
count = 1
if count > 0:
result.append((cur, count))
return result | 37b0244b15eda2159f79e4de8da8a86e2b7e6352 | 15,397 |
def directly_follows(trace):
"""
Get the directly-follows relations given a list of activities
Parameters
--------------
trace
List activities
Returns
--------------
rel
Directly-follows relations inside the trace
"""
return set((trace[i], trace[i+1]) for i in range(len(trace)-1)) | 1e93d16a3bd002aa29b43309dc0a32360e2046db | 15,399 |
import time
def wait(sentry, timeout=900):
"""Waits for all units to be active/idle."""
def check_status():
status = sentry.get_status()
for service_name in sentry.service_names:
service = status.get(service_name, {})
for unit_name, unit in service.items():
if not unit['agent-status']:
return False
if not unit['workload-status']:
return False
if unit['agent-status'].get('current') != 'idle':
return False
if unit['workload-status'].get('current') != 'active':
return False
return True
t0 = time.time()
while time.time() - t0 < timeout:
if check_status():
return
time.sleep(1)
raise TimeoutError() | a0072f863f5afbde2c0b3b749974fa38f2cf5c55 | 15,401 |
def _get2DOverlapBox(box1, box2):
"""Get 2D box where the two boxes overlap"""
result = {
'left': max(box1['left'], box2['left']),
'top': max(box1['top'], box2['top']),
'right': min(box1['right'], box2['right']),
'bottom': min(box1['bottom'], box2['bottom']),
}
# ensure that right>=left and bottom>=top
result['right'] = max(result['left'], result['right'])
result['bottom'] = max(result['top'], result['bottom'])
return result | 5c382ee3a651c24e812f43fdac9f82949361cf96 | 15,407 |
def squares_won(microboard, player):
"""
Return the number of squares won by the player on the microboard.
"""
return sum(map(lambda line: line.count(player), microboard.export_grid())) | ecbef6669badfb9ef597194892126ae452f01b2c | 15,409 |
def least_distance_only(annotation, new_annotations):
"""Condition function to keep only smallest distance annotation per image
Args:
annotation: Current annotation being examined
new_annotations: Dict of new annotations kept by image_name
Returns: True if annotation image is not yet in new_annotations or distance
is less than previous value. False otherwise.
"""
image_name = annotation["image_name"]
if image_name in new_annotations:
return annotation["distance"] < new_annotations[image_name]["distance"]
else:
return True | d9feeaac699a2527f43c96c65a6d61f4bfe1a2a9 | 15,410 |
import math
def nernst_potential(ion_conc_out, ion_conc_in, charge, T,
constants=None, units=None, backend=math):
"""
Calculates the Nernst potential using the Nernst equation for a particular
ion.
Parameters
----------
ion_conc_out: float with unit
Extracellular concentration of ion
ion_conc_in: float with unit
Intracellular concentration of ion
charge: integer
Charge of the ion
T: float with unit
Absolute temperature
constants: object (optional, default: None)
constant attributes accessed:
F - Faraday constant
R - Ideal Gas constant
units: object (optional, default: None)
unit attributes: coulomb, joule, kelvin, mol
backend: module (optional, default: math)
module used to calculate log using `log` method, can be substituted
with sympy to get symbolic answers
Returns
-------
Membrane potential
"""
if constants is None:
F = 96485.33289
R = 8.3144598
if units is not None:
F *= units.coulomb / units.mol
R *= units.joule / units.kelvin / units.mol
else:
F = constants.Faraday_constant
R = constants.molar_gas_constant
return (R * T) / (charge * F) * backend.log(ion_conc_out / ion_conc_in) | 2a2171df2b6fc6789f7a8d4add044b70404f49ed | 15,425 |
def _get_ch_type_mapping(fro='mne', to='bids'):
"""Map between BIDS and MNE nomenclatures for channel types.
Parameters
----------
fro : str
Mapping from nomenclature of `fro`. Can be 'mne', 'bids'
to : str
Mapping to nomenclature of `to`. Can be 'mne', 'bids'
Returns
-------
mapping : dict
Dictionary mapping from one nomenclature of channel types to another.
If a key is not present, a default value will be returned that depends
on the `fro` and `to` parameters.
Notes
-----
For the mapping from BIDS to MNE, MEG channel types are ignored for now.
Furthermore, this is not a one-to-one mapping: Incomplete and partially
one-to-many/many-to-one.
Bio channels are supported in mne-python and are converted to MISC
because there is no "Bio" supported channel in BIDS.
"""
if fro == 'mne' and to == 'bids':
mapping = dict(eeg='EEG', misc='MISC', stim='TRIG', emg='EMG',
ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG',
resp='RESP', bio='MISC', dbs='DBS',
# MEG channels
meggradaxial='MEGGRADAXIAL', megmag='MEGMAG',
megrefgradaxial='MEGREFGRADAXIAL',
meggradplanar='MEGGRADPLANAR', megrefmag='MEGREFMAG',
ias='MEGOTHER', syst='MEGOTHER', exci='MEGOTHER')
elif fro == 'bids' and to == 'mne':
mapping = dict(EEG='eeg', MISC='misc', TRIG='stim', EMG='emg',
ECOG='ecog', SEEG='seeg', EOG='eog', ECG='ecg',
RESP='resp',
# No MEG channels for now
# Many to one mapping
VEOG='eog', HEOG='eog', DBS='dbs')
else:
raise ValueError('Only two types of mappings are currently supported: '
'from mne to bids, or from bids to mne. However, '
'you specified from "{}" to "{}"'.format(fro, to))
return mapping | b90bd99da3ad0a59fcf3fcd38f04670ba5ed16b0 | 15,430 |
def get_deid_field_dict(item):
"""
Return a dictionary with custom fields from the DeID Upload metadata.
:param item: the item with data.
:returns: a dictionary of key-vlaue pairs.
"""
deid = item.get('meta', {}).get('deidUpload', {})
if not isinstance(deid, dict):
deid = {}
result = {}
for k, v in deid.items():
result['CustomField.%s' % k] = str(v).replace('|', ' ')
return result | 4de64e7dc205687c193a70c4e4dcfb6d30d436c3 | 15,435 |
def _split_constraints(constraints, type_):
"""Split list of constraints in two list.
The first list contains all constraints of type and the second the rest.
"""
filtered = [c for c in constraints if c["type"] == type_]
rest = [c for c in constraints if c["type"] != type_]
return filtered, rest | 28a54eb323dc61ef69fd57a01eacb26689290385 | 15,436 |
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower() | 32546e1fa96337b351ef4dabc020d65659f8f2bd | 15,437 |
import functools
def cached_property(inputs=None):
"""Returns a cached proeprty that is calculated once.
If inputs are specified, then if those properties change the propery is
recalculated.
Usage is as follows; given a class, you can declare a cached property with
the `@cached_property` decorator:
```python
class Swallow:
def __init__(self, laden):
self.mass = 5
self.laden = laden
@cached_property(['mass', 'laden'])
def air_speed(self):
mass = self.mass + 16 if laden else 0
time.sleep(100) # must sleep after flying
return mass / 400
```
you can do the following:
```
s = Swallow(laden=False)
s.air_speed # will be slow first time
s.air_speed # returns instantly using cache
s.laden = True # invalidate cache
s.air_speed # will recalculate
```
i,.e. the `air_speed` will be lazily recalculated if `self.mass`, or
`self.laden` change.
Parameters:
inputs - dependencies which should be checked for changes
to determine whether to recalculate the property. If None then
this property is only laxily calculated once.
"""
# Handle defaults
inputs = inputs or []
# Wrap property method
def smart_cached_property(func):
@functools.wraps(func)
def get(self):
# Determine whether we can use the cached value
input_values = dict((k, getattr(self, k)) for k in inputs)
try:
# Pull from cache if possible
x = self._property_cache[func]
if input_values == self.property_input_cache[func]:
return x
except AttributeError:
# We haven't created the property cache yet
self._property_cache = {}
self._property_input_cache = {}
except KeyError:
# Input cache has been invalidated
pass
# Recalculate value
x = self._property_cache[func] = func(self)
self._property_input_cache[func] = input_values
return x
return property(get)
return smart_cached_property | 25c8291e4ba798727b6cf14161393085b269cc3f | 15,441 |
def binarize(y, thres=3):
"""Given threshold, binarize the ratings.
"""
y[y< thres] = 0
y[y>=thres] = 1
return y | 385a10a652aa3a89874ee65520511f1554295ffe | 15,443 |
from typing import List
def _make_array(parts: List[str]) -> str:
"""
Utility to format an array of strings for passing to command line.
:param parts: List of strings.
:return: Formatted string.
"""
return "\"" + ",".join(parts) + "\"" | 3c364ee9b483274c2aad1f8df6afcebaabd09ed1 | 15,454 |
def make_snake_case(text: str) -> str:
"""
A very basic way to converts some text into snake case.
Strips out any non a-z, 0-9 characters.
:param text:
:return: a string which snake cases the text provided
"""
chars = 'abcdefghijklmnopqrstuvwxyz1234567890_'
unclean = text.lower().strip().replace(' ', '_')
return ''.join(e for e in unclean if e in chars) | cd4c46918da543f3f537020cf1e084c54182229e | 15,459 |
def annalistuser_create_values(
coll_id="testcoll", user_id="testuser",
user_name="Test User",
user_uri="mailto:[email protected]",
user_permissions=["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
):
"""
Values used when creating a user record
"""
d = (
{ 'annal:type': "annal:User"
, 'rdfs:label': user_name
, 'rdfs:comment': "User %s: permissions for %s in collection %s"%(user_id, user_name, coll_id)
, 'annal:user_uri': user_uri
, 'annal:user_permission': user_permissions
})
return d | e28b27e0d716123efd4738d976385ef5f703154e | 15,464 |
def identity(x):
"""Simple identity
"""
return x | abebf1ea2ef1579164a9ead43348c10fbfc1a43e | 15,469 |
from typing import Mapping
def is_upload(association):
"""
Indicates whether the association corresponds to an upload object.
"""
upload_keys = set([
'created_at', 'id', 'job_id', 'updated_at', 'upload_content_type',
'upload_file_name', 'upload_file_size', 'upload_updated_at'
])
association_value = association.value
return (isinstance(association_value, Mapping)
and
association_value.keys() == upload_keys) | d2adf1ea3077b2021a448f7f5ecbb28ab342b3cc | 15,470 |
def lenprefix(data, nbytes=2):
"""Prefix `data` with its length, in `nbytes` big-endian bytes.
If `data` is a string, it is first converted to bytes as UTF-8.
"""
assert type(data) in (str, bytes)
if type(data) is str:
data = bytes(data, "utf8")
return len(data).to_bytes(nbytes, "big") + data | b59e9aff1c7500fdcfa1482012ee9bab16c04022 | 15,471 |
from typing import List
def _create_path_from_parts(path_parts: List[str]) -> str:
"""Creates the typical path format from a list of the individual parts
Args:
path_parts (List[str]): list containing the parts of a path
Ex.: ['home', 'usr', 'dir1']
Returns:
str: Concatenation of the parts to a path format
Ex..: home/usr/dir
"""
return '/'.join(path_parts) | 4e8f38f98c2d5da9db5d4607b49c0a70cc8c9c33 | 15,474 |
def hamming_distance(w1: str, w2: str)->int:
"""Compute the hamming distance between the given strings"""
d = 0
for i in range(len(w1)):
if i >= len(w2):
return d + len(w1) - len(w2)
if w1[i] != w2[i]:
d += 1
return d | 3eda58c4c56a1b8fbb79719051c650884a396f68 | 15,476 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.