content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def dict2cfgString(dictionary, separator="\n", assigner="="):
"""
Converts a dictionary into a string
Parameters
----------
dictionary : dict
The dictionary to be transformed.
separator : str, optional
The character to be used to separate individual
entries. The default is "\n".
assigner: str, optional
The character to represent the assignment from the key
to the value. The default is "=".
Returns
-------
str
"""
return "{}".format(separator).join([f"{k}{assigner}{v}" for k, v in dictionary.items()]) | 12c1f3b102429c22d1bb15714631908be41a63f2 | 22,010 |
def convert_indentation(indentation):
"""
Converts integer- or string- indentation into string indentation for prepending.
:param int | str indentation:
:return: str
"""
return " " * indentation if isinstance(indentation, int) else indentation | 97b223e0b1d3a210e0d3cc875d6b7bd4d4486d73 | 22,015 |
def intersect(l0, l1):
"""Given two lists return the intersection."""
return [e for e in l0 if e in l1] | 4dbc2307eabfbe6312407b19e0924952c6dcc9cc | 22,017 |
import re
def seems_like_section_name(line):
"""Check whether `line` starts with 'Para' or ends with ':', ignoring case and whitespace."""
return bool(
re.search(r'(^[^a-záéíóúü0-9]*para\b|:\s*$)', line, re.IGNORECASE)
) | d1c10116319c39cb5e0b5c57a2007703578da75c | 22,019 |
import copy
def get_recurs_class(g, derivLink):
"""Find the recurs_class property in the contents.
Return its value and the dictionary with recurs_value removed."""
recursClass = 0
if derivLink['content'] is None or len(derivLink['content']) <= 0:
return 0, derivLink
newDerivLink = copy.deepcopy(derivLink)
for iObj in range(len(newDerivLink['content']))[::-1]:
obj = newDerivLink['content'][iObj]
if obj['name'] == 'recurs_class':
try:
recursClass = int(obj['value'])
except ValueError:
g.raise_error('Incorrect recurs_class value: ' +
obj['value'])
newDerivLink['content'].pop(iObj)
return recursClass, newDerivLink | e787614a5d433352551c684d134d5d85b870ec4a | 22,026 |
def city_state(city, state, population=0):
"""Return a string representing a city-state pair."""
output_string = city.title() + ", " + state.title()
if population:
output_string += ' - population ' + str(population)
return output_string | ae958598a57128cf36f63ff6bfb8181f9d07db31 | 22,028 |
import random
def coin_flip(p=0.5):
"""Simulate a coin flip."""
return True if random.random() > p else False | 51cd54a946cedb60589fdc24eb5f061afb713681 | 22,031 |
def getBoundingBox(veclist):
"""Calculate bounding box (pair of vectors with minimum and maximum
coordinates).
>>> getBoundingBox([(0,0,0), (1,1,2), (0.5,0.5,0.5)])
((0, 0, 0), (1, 1, 2))"""
if not veclist:
# assume 3 dimensions if veclist is empty
return (0,0,0), (0,0,0)
# find bounding box
dim = len(veclist[0])
return (
tuple((min(vec[i] for vec in veclist) for i in range(dim))),
tuple((max(vec[i] for vec in veclist) for i in range(dim)))) | a2c035f85071e5a9f8dfee2c98cc46e86439a0cc | 22,033 |
def feed_options_str(feed_options):
"""Convert a FeedOptions dict of values into an appropriate string value.
Amazon docs for VAT upload with details:
https://m.media-amazon.com/images/G/01/B2B/DeveloperGuide/vat_calculation_service__dev_guide_H383rf73k4hsu1TYRH139kk134yzs.pdf
(section 6.4)
Example:
feed_options = {
"shippingid": "283845474",
"totalAmount": 3.25,
"totalvatamount": 1.23,
"invoicenumber": "INT-3431-XJE3",
"documenttype": "CreditNote",
"transactionid": "amzn:crow:429491192ksjfhe39s",
}
print(feed_options_str(feed_options))
>>> "metadata:shippingid=283845474;metadata:totalAmount=3.25;metadata:totalvatamount=1.23;
metadata:invoicenumber=INT-3431-XJE3;metadata:documenttype=CreditNote;
metadata:transactionid=amzn:crow:429491192ksjfhe39s"
"""
if not feed_options:
return None
if not isinstance(feed_options, dict):
raise ValueError("`feed_options` should be a dict or None")
output = []
for key, val in feed_options.items():
outval = val
if outval is True or outval is False:
# Convert literal `True` or `False` to strings `"true"` and `"false"`
outval = str(outval).lower()
output.append(f"metadata:{key}={outval}")
return ";".join(output) | 995b2927efb94cd92733b1058f423e863ca9c6e2 | 22,034 |
from typing import OrderedDict
def create_param_dict(param_file_name, outputPrefix):
"""
Create a dictionary with the parameters and file with posterior density
:param param_file_name: original parameter file
:param outputPrefix: prefix given in ABCtoolbox config file for estimation
:return: param_dict: ordered dict with the parameters and file with posterior density
"""
param_file = open(param_file_name, "r")
param_dict = OrderedDict()
for line in param_file:
if "=" in line:
param_dict[line.split("=")[0].strip()] = '{}model0_MarginalPosteriorDensities_Obs0.txt'.format(outputPrefix)
param_file.close()
return param_dict | 9344c1727b5f7d23d3df1b79987d38e9bdff6191 | 22,039 |
def isempty(line):
""" Checks if a line is empty (contains only witespaces or tabs)"""
if len(line.replace("\n","").replace(" ","").replace("\t","")) == 0:
return True
else:
return False | abd8e429125cea6c9575d5e539f69f6a39a7ccfe | 22,041 |
def filter_by_gender(df, male):
"""Filters the data by gender.
Args:
df: DataFrame.
male: True if male, False otherwise.
Returns:
DataFrame.
"""
gender = 1 if male else 0
return df[df.male == gender] | ce8a339a24cbf930fd4e96dda9acc786dda45e07 | 22,042 |
def get_matched_dyads(
dyads,
d0_key="doctype",
d1_key="doctype",
d0_values=["foxnews"],
d1_values=["foxnews"],
):
"""Filter which returns dyads which match the specified conditions.
Args:
dyads (list): list of 2-item tuples. Each item is a dictionary (document).
d0_key (str)
d1_key (str)
d0_values (list): any values to match
d1_values (list): any values to match
Returns:
matched_dyads (list): filtered version of dyads
"""
matched_dyads = []
for dyad in dyads:
d0 = dyad[0]
d1 = dyad[1]
if d0[d0_key] in d0_values and d1[d1_key] in d1_values:
matched_dyads.append(dyad)
return matched_dyads | bffe8e624d9c08d377d712d111be4f0a396ff07c | 22,043 |
def clean_consecutive_duplicates(
move_data, subset=None, keep='first', inplace=False
):
"""
Removes consecutive duplicate rows of the Dataframe, optionally only
certain columns can be consider.
Parameters
----------
move_data : dataframe
The input trajectory data
subset : Array of Strings, optional, default None(None by default)
Specifies Column label or sequence of labels, considered for
identifying duplicates. By default all columns are used.
keep : 'first', 'last', optional, default 'first'
If keep is set as first, all the duplicates except for
the first occurrence will be dropped.
On the other hand if set to last, all duplicates except for
the last occurrence will be dropped.
If set to False, all duplicates are dropped.
inplace : boolean, optional, default False
if set to true the original dataframe will be altered,
the duplicates will be dropped in place,
otherwise a copy will be returned.
Returns
-------
dataframe or None
The filtered trajectories points without consecutive duplicates.
"""
if keep == 'first':
n = 1
else:
n = -1
if subset is None:
filter_ = (move_data.shift(n) != move_data).any(axis=1)
else:
filter_ = (move_data[subset].shift(n) != move_data[subset]).any(axis=1)
return move_data.drop(index=move_data[~filter_].index, inplace=inplace) | 574ba4f6fba2d65f9680869178be1be9b45f0b97 | 22,047 |
from pathlib import Path
def hmm_data_exists(file_path: Path) -> bool:
"""
Checks if HMM data exists in the local data path.
:param file_path: Path to where `profiles.hmm` should be
:return: True if both the `hmm` directory and `profiles.hmm` exist, else False
"""
return file_path.parent.is_dir() and file_path.is_file() | 417e98366a6e458badd8c7b16d58468efdb9af53 | 22,049 |
def get_program_number(txt_row):
""" Checks if the current line of text contains a program
definition.
Args:
txt_row (string): text line to check.
Returns:
An integer number. If program number cannot be found, or is
invalid in some way, a large negative number is returned.
"""
num = -9999
len_txt = len(txt_row)
if (len_txt < 2):
return num
if (txt_row[0] == "O"):
numeric_part = txt_row[1:len_txt]
try:
conv_num = int(numeric_part)
num = conv_num
except ValueError:
pass
return num | 92f67bef6f36ad4e88e4840faa0878e72c0bb78c | 22,052 |
def write_dfs_to_filepaths(dfs, filepaths):
"""
Accepts a list of pandas dataframes - dfs
and a parralel list of filepaths - pathlib path objects
Writes the dataframes to the filepaths as csvs with no index
Returns the number of files written integer
"""
n = 0
for df, filepath in zip(dfs, filepaths):
if not filepath.exists():
filepath.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(filepath, index=False)
n += 1
return n | 2e66c419f1a0a4472de0527cb157d2bf3bf33de8 | 22,053 |
def gnome_sort(lst: list) -> list:
"""
Pure implementation of the gnome sort algorithm in Python
Take some mutable ordered collection with heterogeneous comparable items inside as
arguments, return the same collection ordered by ascending.
Examples:
>>> gnome_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> gnome_sort([])
[]
>>> gnome_sort([-2, -5, -45])
[-45, -5, -2]
>>> "".join(gnome_sort(list(set("Gnomes are stupid!"))))
' !Gadeimnoprstu'
"""
if len(lst) <= 1:
return lst
i = 1
while i < len(lst):
if lst[i - 1] <= lst[i]:
i += 1
else:
lst[i - 1], lst[i] = lst[i], lst[i - 1]
i -= 1
if i == 0:
i = 1
return lst | 5aae393eabd046c20f51125e405d211555ed9bdb | 22,057 |
def get_numeric_trace_attribute_value(trace, trace_attribute):
"""
Get the value of a numeric trace attribute from a given trace
Parameters
------------
trace
Trace of the log
Returns
------------
value
Value of the numeric trace attribute for the given trace
"""
if trace_attribute in trace.attributes:
return trace.attributes[trace_attribute]
raise Exception("at least a trace without trace attribute: " + trace_attribute) | fd757861972dce8d9624efa13773bd4624cf9ace | 22,058 |
def get_in_out_tensors(graph):
"""
Get the input and output tensors from the TensorFlow graph 'graph'.
"""
# Get the graph nodes that perform computation on tensors
ops = graph.get_operations()
# Initialize input and output tensors
inputs = []
outputs_set = set(ops)
# Process operations
for op in ops:
# The input nodes are nodes without input
if len(op.inputs) == 0 and op.type != 'Const':
inputs.append(op)
# The output nodes are nodes without output
else:
for input_tensor in op.inputs:
if input_tensor.op in outputs_set:
outputs_set.remove(input_tensor.op)
outputs = list(outputs_set)
return inputs, outputs | 8c7c5c068bcb11d5d4f80f191af899dbf7e7aab1 | 22,061 |
import re
def _match_channel_pattern(channel_name):
"""Returns a regex match against the expected channel name format.
The returned match object contains three named groups: source, detector,
and wavelength. If no match is found, a ValueError is raised.
Parameters
----------
channel_name : str
The name of the channel.
"""
rgx = r'^S(?P<source>\d+)_D(?P<detector>\d+) (?P<wavelength>\d+)$'
match = re.fullmatch(rgx, channel_name)
if match is None:
msg = f'channel name does not match expected pattern: {channel_name}'
raise ValueError(msg)
return match | e40ba1b1c075d2fabae570bab36869b168ef6121 | 22,062 |
from datetime import datetime
import pytz
def with_gmt_offset(timezones, now=None):
"""
Given a list of timezones (either strings of timezone objects),
return a list of choices with
* values equal to what was passed in
* display strings formated with GMT offsets and without
underscores. For example: "GMT-05:00 America/New York"
* sorted by their timezone offset
"""
now = now or datetime.utcnow()
_choices = []
for tz in timezones:
tz_str = str(tz)
delta = pytz.timezone(tz_str).utcoffset(now)
display = "GMT{sign}{gmt_diff} {timezone}".format(
sign='+' if delta == abs(delta) else '-',
gmt_diff=str(abs(delta)).zfill(8)[:-3],
timezone=tz_str.replace('_', ' ')
)
_choices.append((delta, tz, display))
_choices.sort(key=lambda x: x[0])
choices = [(one, two) for zero, one, two in _choices]
return choices | b1916a3889016d3c608806fe0dab62f05801d5eb | 22,068 |
def apply_activation_forward(forward_pass):
"""Decorator that ensures that a layer's activation function is applied after the layer during forward
propagation.
"""
def wrapper(*args):
output = forward_pass(args[0], args[1])
if args[0].activation:
return args[0].activation.forward_propagation(output)
else:
return output
return wrapper | 950f2b96cadfc8df075763b25961c7f088ec9232 | 22,069 |
def get_option_name(name): # type: (str) -> str
"""Return a command-line option name from the given option name."""
if name == 'targets':
name = 'target'
return f'--{name.replace("_", "-")}' | 77d4625be99944fb0748f6f3524d453fbf511795 | 22,071 |
def get_latest_file_url(files: dict, starts_with: str, file_extension: str) -> str:
"""
Get the url to a file which should start and have a specific file extension.
Parameters
----------
files : dict
Keys are the filenames and the values are the urls.
starts_with : str
Start of the filename.
file_extension : str
End of the filename. For example the file extension.
Returns
-------
str
URL of the most recent file.
"""
filtered_files = {
fname: link
for fname, link in files.items()
if (starts_with in fname[: len(starts_with)])
and file_extension in fname
and "volume" not in fname
}
if len(filtered_files) == 0:
return ""
newest_file = sorted(filtered_files.keys())[-1]
link = files.get(newest_file, "")
return link | 4b9d115c983f9cdcf1a96905d7c198b688851dea | 22,075 |
def _extract_from_subworkflow(vs, step):
"""Remove internal variable names when moving from sub-workflow to main.
"""
substep_ids = set([x.name for x in step.workflow])
out = []
for var in vs:
internal = False
parts = var["id"].split("/")
if len(parts) > 1:
if parts[0] in substep_ids:
internal = True
if not internal:
var.pop("source", None)
out.append(var)
return out | bd565b444084aa2b5dfab0444aaf13344240d17d | 22,078 |
def chebyshev_distance(a, b):
"""
Calculate the Chebyshev distance of two vectors.
"""
distances = []
for x, y in zip(a, b):
distances.append(abs(x - y))
distance = max(distances)
return distance | aa6fccc804ccbeb312e0bb41693feb05787d84f4 | 22,081 |
def sort(li):
"""
Performs a mini radix sort on the top ten documents by first sorting
on document ids, then sorting on document ranking. As sorted() is stable,
this ensures that any documents with identical rankings will be sorted on
their document ids in increasing order
"""
#first sort on document id
li = sorted(li,key=lambda x: x[0])
#then sort on document ranking
li = sorted(li,key=lambda x: x[1], reverse=True)
#sort on window length
li = sorted(li,key=lambda x: x[3])
#then sort on number of present words
li = sorted(li,key=lambda x: x[2], reverse=True)
return li | 1bd1c440e8e2492f22d67b78215d9a8c1f483c60 | 22,084 |
def lammps_equilibrated_npt(job):
"""Check if the lammps equilibration step has run and passed is_equilibrated for the job."""
return job.isfile("equilibrated_npt.restart") and True | 4d4ff20fc020b29319393989f852a7b2479627f0 | 22,086 |
def _flatten(d):
""" Pack a hierarchical dictionary of variables into a list
Sorting is important as it ensures the function is called with
the inputs in the same order each time!
"""
l = []
# This sorting is important!
for (k,v) in sorted(d.items(), key=lambda t: t[0]):
if isinstance(v, dict):
lv = _flatten(v)
for v2 in lv:
l.append(v2)
else:
l.append(v)
return l | d1aea2b85e161747262ec424e9b63fb336967236 | 22,093 |
def get_logging_options_string(args):
""" This function extracts the flags and options specified for logging options
added with add_logging_options. Presumably, this is used in "process-all"
scripts where we need to pass the logging options to the "process" script.
Args:
args (namespace): a namespace with the arguments added by add_logging_options
Returns:
string: a string containing all logging flags and options
"""
args_dict = vars(args)
# first, pull out the text arguments
logging_options = ['log_file', 'logging_level', 'file_logging_level',
'stdout_logging_level', 'stderr_logging_level']
# create a new dictionary mapping from the flag to the value
logging_flags_and_vals = {'--{}'.format(o.replace('_', '-')) : args_dict[o]
for o in logging_options if len(args_dict[o]) > 0}
s = ' '.join("{} {}".format(k,v) for k,v in logging_flags_and_vals.items())
# and check the flags
if args.log_stdout:
s = "--log-stdout {}".format(s)
if args.no_log_stderr:
s = "--no-log-stderr {}".format(s)
return s | 070ed0cd906845abf784bd566118a1959af875f2 | 22,097 |
def to_bytes(text, encoding='utf-8'):
"""Make sure text is bytes type."""
if not text:
return text
if not isinstance(text, bytes):
text = text.encode(encoding)
return text | 367da58c31dddd4c243c56a9780b47ef6682bee2 | 22,098 |
def email_get_unread(imap, from_email_address):
"""Returns (status, list of UIDs) of unread emails from a sending email address.
"""
search = '(UNSEEN UNFLAGGED FROM "{}")'.format(from_email_address)
status, response = imap.search(None, search)
if status != 'OK':
return status, response
# Return status and list of unread email UIDs.
return status, response[0].split() | 48c4cf036e24acadec425bb7bb8ac9395488229a | 22,099 |
def _lambda_risk_mapper(risk_level: int) -> str:
"""Helper methods
Parameters
----------
risk_level: int
number from range 0-4 represents risk factor for given vault
Returns
-------
string:
text representation of risk
"""
mappings = {0: "Non Eligible", 1: "Least", 2: "Low", 3: "Medium", 4: "High"}
return mappings.get(risk_level, "Non Eligible") | e19ef85b82d4b36bb6e0dfdcae373f25a894dbff | 22,100 |
import math
def torsion_angle(c1, c2, c3, c4):
"""
float <- torsion_angle(a, b, c, d)
returns the torsion angle in degrees between 3D pts a,b,c,d
"""
v1 = (c1[0]-c2[0], c1[1]-c2[1], c1[2]-c2[2])
v2 = (c2[0]-c3[0], c2[1]-c3[1], c2[2]-c3[2])
v3 = (c3[0]-c4[0], c3[1]-c4[1], c3[2]-c4[2])
p = (v2[1]*v1[2] - v1[1]*v2[2],
v1[0]*v2[2] - v2[0]*v1[2],
v2[0]*v1[1] - v1[0]*v2[1])
q = (v3[1]*v2[2] - v2[1]*v3[2],
v2[0]*v3[2] - v3[0]*v2[2],
v3[0]*v2[1] - v2[0]*v3[1])
n = 1.0 / math.sqrt( p[0]*p[0] + p[1]*p[1] + p[2]*p[2] )
p = (p[0]*n, p[1]*n, p[2]*n )
n = 1.0 / math.sqrt( q[0]*q[0] + q[1]*q[1] + q[2]*q[2] )
q = (q[0]*n, q[1]*n, q[2]*n )
xtheta = p[0]*q[0] + p[1]*q[1] + p[2]*q[2]
if xtheta > 1.0: xtheta = 1.0
if xtheta < -1.0: xtheta = -1.0
theta = math.acos(xtheta) * 57.29578
absth = math.fabs(theta)
if absth < 0.001:
return 0.0
elif math.fabs(absth - 180.0) < 0.001:
return 180.0
s = v1[0]*q[0] + v1[1]*q[1] + v1[2]*q[2]
if s < 0.0:
theta = 360.0 - theta
if theta > 180.0:
theta = theta - 360.0
return theta | 32eb380fd8e4d0645481ab816a32e01144fb3c7b | 22,101 |
def accumulate(instructions: list) -> dict:
"""
Read and execute instructions until an infinite loop if found:
acc +12 -> add 12 to 'acc'
jmp +48 -> jump to the instruction located at 'index' + 48
nop -56 -> do nothing & go to the next instruction
if an instruction has already been executed, stop and return the current 'acc' value
"""
acc = 0
index = 0
seen = []
while index < len(instructions):
if index in seen:
return {'acc': acc, 'infinite': True}
seen.append(index)
op, val = instructions[index].split(' ')
if op == 'acc':
acc += int(val)
elif op == 'jmp':
index += int(val)
continue
index += 1
return {'acc': acc, 'infinite': False} | bf5a5bb278e71e783968eafed6a790ff2bdf77c1 | 22,105 |
def build_sampler( sampler_class, pe_method, force_method, T = 1.0e-4, \
dt = 1.0e-1, traj_len = 100, absxmax = 1.0e2, dt_max = None, min_rate = 0.6, \
max_rate = 0.7, gaussianprior_std = None ):
"""Builds a sampling.Sampler class object of type sampler_class.
Args:
sampler_class : Sampler class from module sampling. Eg. sampling.Hmc
pe_method : A method for evaluating the potential energy.
force_method : A method for evaluating the forces.
T (float) : Dimensionless temperature of the system: T=1/beta. (Default 1.0).
dt (float) : Initial time step (or step size). This will be updated algorithmically, but a
good starting point saves time. (Default 1.0e-4).
traj_len (int) : The number of time steps in a single trajectory. (Default 100).
absxmax (single float or numpy array of floats, with length 1 or length numdim) : During the
main calculation, the sampler is restricted to a region x in [-absxmax,absxmax].
(Default: 1.0e2).
dt_max (float) : maximum step size (time step). (Default: median(absxmax), which is set in
module sampling.)
min_rate (float) : minimum acceptance rate of trajectories. Used for setting step size (time
step). (Default: 0.6. The optimal acceptance rate for HMC on a multivariate Gaussian is 0.65
http://www.mcmchandbook.net/HandbookChapter5.pdf, section 5.4.4.3).
max_rate (float) : maximum acceptance rate of trajectories. Used for setting step size (time
step). (Default 0.7. The optimal acceptance rate for HMC on a multivariate Gaussian is 0.65
http://www.mcmchandbook.net/HandbookChapter5.pdf, section 5.4.4.3).
gaussianprior_std (single float or numpy array of floats, with length 1 or length numdim) : If
this is set to a real value then an additional term is applied to (H)MC acceptance/rejection,
such that the target distribution is proportional to a multivariate Gaussian with this
standard deviation for each dimension. (Default: None.)
Return:
sampling.Sampler class object of type sampler_class.
"""
sampler = sampler_class( pe_method, force_method, dt, traj_len, absxmax, \
dt_max, 1.0/T, min_rate, max_rate, gaussianprior_std )
return sampler | 528966ec88dd8d4a910753290e19849f7919cf22 | 22,107 |
import time
from datetime import datetime
def SecondsToZuluTS(secs=None):
"""Returns Zulu TS from unix time seconds.
If secs is not provided will convert the current time.
"""
if not secs: secs = int(time.time())
return(datetime.utcfromtimestamp(secs).strftime("%Y-%m-%dT%H:%M:%SZ")) | 2ca96ed779020037eb360a1250243dfe628f6195 | 22,109 |
def _LookupMeOrUsername(cnxn, username, services, user_id):
"""Handle the 'me' syntax or lookup a user's user ID."""
if username.lower() == 'me':
return user_id
return services.user.LookupUserID(cnxn, username) | 68ef5ea6d6c3076717660848a0b8a9c3cb4847d4 | 22,111 |
import hashlib
import ctypes
def size_t_hash(key):
"""Hash the key using size_t.
Args:
key (str): The key to hash.
Returns:
str: The hashed key.
"""
hash_digest = hashlib.blake2b(key.encode()).hexdigest() # pylint: disable=no-member
return '%u' % ctypes.c_size_t(int(hash_digest, 16)).value | 19aa7ec430c9c0fbbba45baa9812f755d19a4cfe | 22,120 |
def check_scenarios(scenes):
"""
Make sure all scenarios have unique case insensitive names
"""
assert len(scenes) == len(dict((k.lower(), v) for k, v in scenes))
return scenes | c9b437d396a4d0ca17c17a85cb99e8574cc78fe3 | 22,127 |
def fix_tract(t):
"""Clean up census tract names.
:param t: Series of string tract names
:returns: Series of cleaned tract names
"""
if type(t) == str:
return t
return str(t).rstrip("0").rstrip(".") | e8e2f6bee61596c57bb805717e9a34eb88f1c91e | 22,132 |
def moving_avg_features(df):
"""
Function to calculate the exponential moving averages and moving averages over different intervals of days
Input: Dataframe
Output: Dataframe with new moving average features
"""
df['EMA_9'] = df['Close'].ewm(9).mean().shift()
df['EMA_9'] = df['EMA_9'].fillna(0)
df['SMA_5'] = df['Close'].rolling(5).mean().shift()
df['SMA_5'] = df['SMA_5'].fillna(0)
df['SMA_10'] = df['Close'].rolling(10).mean().shift()
df['SMA_10'] = df['SMA_10'].fillna(0)
df['SMA_15'] = df['Close'].rolling(15).mean().shift()
df['SMA_15'] = df['SMA_15'].fillna(0)
df['SMA_30'] = df['Close'].rolling(30).mean().shift()
df['SMA_30'] = df['SMA_30'].fillna(0)
return df | 820eb6b0ad42592393d50d93d5718a6a10562907 | 22,135 |
def _old_style_nesting(vocab):
"""Detect old-style nesting (``dict[str, List[Tuple[str, Field]]]``)."""
return isinstance(vocab, dict) and \
any(isinstance(v, list) for v in vocab.values()) | bce0ba4ee2fe7de4916f970c6f765b493ccdf0db | 22,136 |
from datetime import datetime
def convert_date(date_string):
"""Convert the date_string to dd-mm-YYYY format."""
date = datetime.strptime(date_string, "%d.%m.%Y")
return date.strftime('%d-%m-%Y') | 33a67ba33fed1f195812e9839a811411de1e2987 | 22,140 |
def squeeze_whitespace(text):
"""Remove extra whitespace, newline and tab characters from text."""
return ' '.join(text.split()) | ef476f4ed6cd524c1cb115345151e4bc18c616b5 | 22,142 |
def iso_string_to_sql_date_sqlite(x: str) -> str:
"""
Provides SQLite SQL to convert a column to a ``DATE``, just by taking the
date fields (without any timezone conversion). The argument ``x`` is the
SQL expression to be converted (such as a column name).
"""
return f"DATE(SUBSTR({x}, 1, 10))" | 349c560589d4f03938538f74dbc188577ac63a2d | 22,143 |
from pathlib import Path
def save_NN_sequential(model, model_name):
"""
Saving a Neural Network as h5 file
:param model: sequential model
:param model_name: name to save the model
:return: True
"""
file_name = 'Model_' + model_name
file_path = Path().joinpath('Pickles', file_name + ".h5")
print("The file ", file_path, "was save.")
model.save(file_path)
return True | a7481ace8971debb1be3af2140f6fdd33b3f679b | 22,144 |
def get_second_validate_param(tel_num):
"""
Assemble param for get_second_validate
:param tel_num: Tel number
:return: Param in dict
"""
param_dict = dict()
param_dict['act'] = '1'
param_dict['source'] = 'wsyytpop'
param_dict['telno'] = tel_num
param_dict['password'] = ''
param_dict['validcode'] = ''
param_dict['authLevel'] = ''
param_dict['decode'] = '1'
param_dict['iscb'] = '1'
return param_dict | efbd31c56e3fdd4cb0e75bcae92cf51d996aac5d | 22,146 |
def typeless_equals(entity1, entity2, check_class, check_instance):
"""
Checks if entities are equal. The check is different whether
entities are classes or instances, which is specified in
corresponding parameters. If neither checks are specified,
True is returned
"""
if check_class:
return isinstance(entity1, entity2)
if check_instance:
return entity1 == entity2
return True | a0f1360509d8f2f1191c158426fa29ec42d3a3c8 | 22,149 |
def node_name(node):
"""Return lxml node name without the namespace prefix."""
try:
return node.tag.split('}')[-1]
except AttributeError:
pass | 0e61ae70563784b5c845a73a82c3b2265ce63202 | 22,150 |
def get_most_read_or_features(
amb_sams: list,
counts: dict) -> list:
"""
Get the samples that have the most
counts (reads of features).
Parameters
----------
amb_sams : list
Sample IDs.
counts : dict
Count per Sample ID.
Returns
-------
cur_best_samples : list
Selected samples (with max reads/features).
"""
cur_counts = dict((amb_sam, counts[amb_sam]) for amb_sam in amb_sams)
count_max = max(cur_counts.values())
cur_best_samples = [amb_sam for amb_sam in amb_sams
if cur_counts[amb_sam] == count_max]
return cur_best_samples | 147a9e3a1df579f1673a832bf7dd3a7267ac4394 | 22,151 |
def find_index_unsafe(val, bin_edges):
"""Find bin index of `val` within binning defined by `bin_edges`.
Validity of `val` and `bin_edges` is not checked.
Parameters
----------
val : scalar
Assumed to be within range of `bin_edges` (including lower and upper
bin edges)
bin_edges : array
Returns
-------
index
See also
--------
find_index : includes bounds checking and handling of special cases
"""
# Initialize to point to left-most edge
left_edge_idx = 0
# Initialize to point to right-most edge
right_edge_idx = len(bin_edges) - 1
while left_edge_idx < right_edge_idx:
# See where value falls w.r.t. an edge ~midway between left and right edges
# ``>> 1``: integer division by 2 (i.e., divide w/ truncation)
test_edge_idx = (left_edge_idx + right_edge_idx) >> 1
# ``>=``: bin left edges are inclusive
if val >= bin_edges[test_edge_idx]:
left_edge_idx = test_edge_idx + 1
else:
right_edge_idx = test_edge_idx
# break condition of while loop is that left_edge_idx points to the
# right edge of the bin that `val` is inside of; that is one more than
# that _bin's_ index
return left_edge_idx - 1 | 08f89f8f6096d930e5d6c043374567d6646e9a37 | 22,156 |
import time
def func_to_time(x):
"""This is sleeps for x seconds for timing tests."""
time.sleep(x)
return 'Slept for {0} second(s)'.format(x) | 8698514f6efe4b7b58aab3da5687cb72c2083fd7 | 22,158 |
def to_float_str(
val):
"""to_float_str
convert the float to a string with 2 decimal points of
precision
:param val: float to change to a 2-decimal string
"""
return str("%0.2f" % float(val)) | c15c4e43e788ab416170f21413906fc2d218b345 | 22,160 |
def limits2slice(limits):
"""
Create a set of slice objects given an array of min, max limits.
Parameters
----------
limits: tuple, (ndarray, ndarray)
Two tuple consisting of array of the minimum and maximum indices.
Returns
-------
slices : list
List of slice objects which return points between limits
See Also
--------
find_limits : Find the minimum and maximum limits from a list of points.
slice2limits : Find a minimum and maximum limits for a list of slices.
"""
mins, maxs = limits
return tuple([slice(i, j + 1) for i, j in zip(mins, maxs)]) | f796f1e468f560d72e7d037e27a110ee50d3a45d | 22,166 |
def has_clockwise_numbering(coords):
""" tests if a polygon has clockwise vertex numbering
approach: Sum over the edges, (x2 − x1)(y2 + y1). If the result is positive the curve is clockwise.
from:
https://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order
:param coords: the list of (x,y) coordinates representing the polygon to be tested
:return: true if the polygon has been given in clockwise numbering
"""
total_sum = 0.0
p1 = coords[-1]
for p2 in coords:
x1, y1 = p1
x2, y2 = p2
total_sum += (x2 - x1) * (y2 + y1)
p1 = p2
return total_sum > 0 | e9d55bfe9c5ef66e4b3e611c579c3ca940ba98d2 | 22,168 |
def convert_to_float(value: str) -> float:
"""
Get the float value from, for example, "R9 323.46".
"""
return float(value[1:].replace(' ', '')) | 5ebea176aa6cbfaf6318b246fda73ff443efd092 | 22,169 |
def sift(iterable, predicate):
"""
Sift an iterable into two lists, those which pass the predicate and those who don't.
:param iterable:
:param predicate:
:return: (True-list, False-list)
:rtype: tuple[list, list]
"""
t_list = []
f_list = []
for obj in iterable:
(t_list if predicate(obj) else f_list).append(obj)
return (t_list, f_list) | 347ae7cd9f79bccdc6fc6ae1efa1a29b5563322a | 22,170 |
def compare_datetime(date, span):
"""
Compare information within datetime object with a span
Parameters
----------
date: datetime
Datetime to compare.
span: Span
Span to compare.
Returns
-------
bool
True if match.
"""
return span.text in str(date) | d2bd942d7b6c536ac35d1cd18b10ed57a465622a | 22,173 |
def reduce_list(data_set):
""" Reduce duplicate items in a list and preserve order """
seen = set()
return [item for item in data_set if
item not in seen and not seen.add(item)] | e84b87b45c8a7aea14beee3b7822f55a0a99151e | 22,174 |
def validate_knot(knot):
""" Confirm a knot is in the range [0, 1]
Parameters
----------
knot : float
Parameter to verify
Returns
-------
bool
Whether or not the knot is valid
"""
return (0.0 <= knot <= 1.0) | 61ab023d61248268db74febd72df6ecf3ef0c056 | 22,175 |
def _get_all_deps(*, deps, split_deps_keys = []):
"""Returns a list of all dependencies from a Label list and optional split attribute keys.
Args:
deps: Label list of (split) dependencies to traverse.
split_deps_keys: (optional) List of split attribute keys to use on split deps.
Returns:
List of all dependencies. If split_deps_keys is not provided, return deps.
"""
if type(deps) == "list":
return deps
if not split_deps_keys:
return deps.values()
all_deps = []
for split_deps_key in split_deps_keys:
all_deps += deps[split_deps_key]
return all_deps | 30521b0ce646c3ee51b1d45d06e05aeb71058456 | 22,180 |
def get_time_string(codetime):
"""
Utility function that takes the codetime and
converts this to a human readable String.
Args:
codetime (`float`):
Code execution time in seconds (usually the difference of two time.time() calls)
Returns:
`str`: A string indicating the total execution time
"""
if codetime < 60.0:
retstr = 'Execution time: {0:.2f}s'.format(codetime)
elif codetime / 60.0 < 60.0:
mns = int(codetime / 60.0)
scs = codetime - 60.0 * mns
retstr = 'Execution time: {0:d}m {1:.2f}s'.format(mns, scs)
else:
hrs = int(codetime / 3600.0)
mns = int(60.0 * (codetime / 3600.0 - hrs))
scs = codetime - 60.0 * mns - 3600.0 * hrs
retstr = 'Execution time: {0:d}h {1:d}m {2:.2f}s'.format(hrs, mns, scs)
return retstr | 2cdc53ba83e06297c3c09b59095553db72d41643 | 22,184 |
def remove_duplicates(lst):
"""
This function removes all duplicate object from a list.
:param lst: A list.
:return: The same list, with all its elements appearing just once.
"""
if len(lst) == 1:
return lst
return [i for n, i in enumerate(lst) if i not in lst[:n]] | bf28a109a1af4760c39e31fdda88ea30b1e55f8a | 22,185 |
def aln_abuts_unknown_bases(tx, fasta):
"""
Do any exons in this alignment immediately touch Ns?
:param tx: a GenePredTranscript object
:param fasta: pyfasta Fasta object for genome
:return: boolean
"""
chrom = tx.chromosome
for exon in tx.exon_intervals:
if exon.start == 0: # we are at the edge of the contig
left_base = None
else:
left_base = fasta[chrom][exon.start - 1]
if exon.stop >= len(fasta[chrom]): # we are at the edge of the contig
right_base = None
else:
right_base = fasta[chrom][exon.stop]
if left_base == 'N' or right_base == 'N':
return True
return False | ceb31739be0091b3b52f763c0c0d6d15b1aebd19 | 22,188 |
import json
import hashlib
def get_md5(obj, trans_func=None):
"""get a object md5, if this obj is not supported by `json.dumps` please provide a trains_func.
Args:
obj (object): obj to get md5
trans_func (function, optional): use this to trans obj to str. Defaults to None.
"""
if trans_func is None:
trans_func = json.dumps
obj_str = trans_func(obj)
hl = hashlib.md5()
hl.update(obj_str.encode(encoding='utf-8'))
return hl.hexdigest() | ae7da1f0bab1815a2d357617dd93d26e020a2316 | 22,190 |
import re
def extract_courts(s: str) -> list:
"""
Extract a list of court numbers listed in the statute's text.
Args:
s (str): The text of the statute that lists the court numbers.
Returns:
(list): A list court numbers, all cleaned up.
"""
my_s = re.sub(r'[^0-9\s]', '', s)
my_s = re.sub(r'\s{2,}', ' ', my_s)
courts = my_s.strip().split(' ')
result = []
for court in courts:
if court not in result:
result.append(court)
return sorted(result) | d15e279ca2368d23fc75c7c2151329cfd60bb43a | 22,191 |
def fa_attachment(extension):
"""
Add fontawesome icon if found. Else return normal extension as string.
:param extension: file extension
:return: matching fontawesome icon as string
"""
if extension == 'pdf':
return "<i class='fa fa-file-pdf-o fa-lg'></i>"
elif extension == 'jpg' or extension == 'png':
return "<i class='fa fa-picture-o fa-lg'></i>"
elif extension == 'doc' or extension == 'docx':
return "<i class='fa fa-file-word-o fa-lg'></i>"
elif extension == 'xls' or extension == 'xlsx':
return "<i class='fa fa-file-excel-o fa-lg'></i>"
elif extension == 'extern':
return "<i class='fa fa-external-link'></i>"
elif extension == 'zip':
return "<i class='fa fa-file-archive-o fa-lg'></i>"
else:
return extension | 3add6bf4c177cba893a2242df352fd0ae619ee90 | 22,194 |
def get_worksheet_keys(data_dict, result_info_key):
"""Gets sorted keys from the dict, ignoring result_info_key and 'meta' key
Args:
data_dict: dict to pull keys from
Returns:
list of keys in the dict other than the result_info_key
"""
keys = set(data_dict.keys())
keys.remove(result_info_key)
if 'meta' in keys:
keys.remove('meta')
return sorted(keys) | 1092eee46980a5e4f745d3a99ff6abe7d5c9db62 | 22,196 |
def add_options(click_options):
"""
Decorator that adds multiple Click options to the decorated function.
The list is reversed because of the way Click processes options.
Note: This function has its origins in the
https://github.com/pywbem/pywbemtools project (Apache 2.0 license)
Parameters:
click_options (list): List of `click.option` objects.
"""
def _add_options(func):
"""
Apply the Click options to the function in reversed order.
"""
for option in reversed(click_options):
func = option(func)
return func
return _add_options | fe6c5bda8f0606cc7fcdec87dd3332d1abb9b695 | 22,197 |
import itertools
def _expand(the_set, expand_fn):
"""Returns a concatenation of the expanded sets.
I.e.
Returns a set of all elements returned by the expand_fn function for all
elements in the_set.
E.g.
With expand_fn = lambda x: (10*x, 100*x) and the_set = set([1, 2, 3])
this function returns set([10, 100, 20, 200, 30, 300])
Args:
the_set: A set of elements.
expand_fn: Function returning an interable given some element in the_set.
Returns: a concatenation of the expanded sets.
"""
return set(itertools.chain(*[expand_fn(x) for x in the_set])) | 694661c0cc6d2d09d72d65ea63cc1241fc32d4d5 | 22,198 |
import re
def ingredients_from_food(food):
"""
Extract ingredients from food description
:param food: A string, value from the Food column
:return: A list of ingredients (strings) or empty list
"Салат "Папарать-Кветка"(Говядина,ветчина,помидоры,огурцы)" -> ["говядина", "ветчина", "помидоры", "огурцы"]
"Капуста тушеная" -> []
"""
re_ingr = re.compile(r'\(([^\)]+)\)')
ingr = re_ingr.findall(food)
if ingr:
ingr_parts = ingr[0].split(',')
ingr_parts = [p.strip().lower() for p in ingr_parts]
return ingr_parts
else:
return [] | 4314bce50e602f0e3c0d8db4d7c22ef2bd693c2a | 22,201 |
import json
def json_content(directory, file_name):
"""
This function gets the content of a json file and
returns as dictionary.
:param directory: String
The folder where the json file is located
:param file_name: String
The name of the json file
:return: dict
The content of the json file as python
dictionary
"""
f = open(directory + file_name + '.json', "r")
if f.mode == 'r':
contents = f.read()
contents = json.loads(contents)
return contents | 98cf97b4a1d6853cae9b4087fd02a1d316afb492 | 22,202 |
def get_cai_ptr(X):
"""
Function gets the pointer from an object that supports the
__cuda_array_interface__. Raises TypeError if `X` does not support it.
"""
if hasattr(X, '__cuda_array_interface__'):
return X.__cuda_array_interface__['data'][0]
else:
raise TypeError("X must support `__cuda_array_interface__`") | 467e4437b34693e37f3f90e822899dc3a548710e | 22,209 |
def validate_float(s):
"""Convert s to float or raise a ValueError."""
try:
return float(s)
except ValueError:
raise ValueError('Could not convert {0!r} to float'.format(s)) | 1559e4b8465e4d380c74784f0dab68aaf7965dbc | 22,210 |
def show_element(elem):
""" Output whole a element as it is. """
return elem | bcb8d2ae273c105524a7518a2f4247a5aa48410f | 22,212 |
import re
import warnings
def parse_psp_name(psp_name):
"""
Parse the name of vasp's psp
Parameter
psp_name: str
The name of vasp's psp, e.g. GGA, LDA, potpaw_LDA
Return
psp_name_norm: str
The normalized psp name
"""
psp_name = psp_name.upper()
psp_name_list = re.split(r'\.|\_|\-|\=|\+|\*|\s',psp_name)
flag_us = False
for psp_name_i in psp_name_list:
if "US" in psp_name_i:
flag_us = True
break
if "LDA" in psp_name_list:
if "52" in psp_name_list:
psp_name_norm = "POT_LDA_PAW_52"
elif "54" in psp_name_list:
psp_name_norm = "POT_LDA_PAW_54"
elif flag_us:
psp_name_norm = "POT_LDA_US"
else:
psp_name_norm = "POT_LDA_PAW"
elif "PBE" in psp_name_list:
if "52" in psp_name_list:
psp_name_norm = "POT_GGA_PAW_PBE_52"
elif "54" in psp_name_list:
psp_name_norm = "POT_GGA_PAW_PBE_54"
else:
psp_name_norm = "POT_GGA_PAW_PBE"
elif "GGA" in psp_name_list:
if flag_us:
psp_name_norm = "POT_GGA_US_PW91"
else:
psp_name_norm = "POT_GGA_PAW_PW91"
else:
warnings.warn("{} is not a proper name of vasp's pseudopotential, please ref \
https://github.com/PhasesResearchLab/dfttk/blob/master/docs/Configuration.md. \
This folder will be ignored.".format(psp_name))
psp_name_norm = None
return psp_name_norm | 0b5e025fa503f23fc17101690b978d414003197b | 22,214 |
def _format_list_items(list_items):
"""Generate an indented string out of a list of items."""
list_string = ''
if list_items:
for item in list_items:
list_string = list_string + " '" + item + "',\n"
list_string = "[\n {}\n]".format(list_string.strip()[:-1])
else:
list_string = '[]'
return list_string | dd677277650e5d3105c01f6636518b8bbd2a1bff | 22,216 |
import torch
import math
def importance_sampling_cross_validation(logp):
"""Compute the importance-sampling cross validation (ISCV) estimate.
The ISCV estimates the holdout log-likelihood from just an approximation to
the posterior predictive log-likelihoods on the training data.
### References:
[1]: Alan E. Gelfand, Dipak K. Dey, Hong Chang.
Model determination using predictive distributions with implementation via
sampling-based methods.
Technical report No. 462, Department of Statistics,
Stanford university, 1992.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.860.3702&rep=rep1&type=pdf
[2]: Aki Vehtari, Andrew Gelman, Jonah Gabry.
Practical Bayesian model evaluation using leave-one-out cross-validation and
WAIC.
arXiv:1507.04544
https://arxiv.org/pdf/1507.04544.pdf
[3]: Sumio Watanabe. Mathematical Theory of Bayesian Statistics.
CRC Press. 2018
https://www.crcpress.com/Mathematical-Theory-of-Bayesian-Statistics/Watanabe/p/book/9781482238068
Args:
logp: Tensor, shape (B,M,...), containing log p(y_i | x_i, theta_j)
for i=1,..,B instances and j=1,...,M models.
Returns:
iscv_logp: Tensor, (...), the ISCV estimate of the holdout log-likelihood.
iscv_logp_sem: Tensor, (...), the standard error of th emean of `iscv_logp`.
"""
logse = torch.logsumexp(-logp, 1)
iscv_logp = logse.mean(0)
iscv_logp_var = logse.std(0)
m = int(logp.shape[1])
iscv_logp -= math.log(m)
iscv_logp = -iscv_logp
iscv_logp_sem = (iscv_logp_var / float(m)).sqrt()
return iscv_logp, iscv_logp_sem | 9bef3b3c3775e359d52a321a8e72b69d38f0fcb7 | 22,217 |
import re
def convert_character(text : str):
"""
Convert consecutive full-size numbers to half-size numbers.
Convert a single half-size number into a full-size number.
Convert half-size English characters to full-size ones.
Parameters
----------
text : str
input text
Returns
----------
output : str
converted text
"""
list_text = list(text)
half_nums = re.findall('[0-9]+', text)
full_nums = re.findall('[0-9]+', text)
c_half_nums = []
for half_num in half_nums:
if len(half_num) == 1:
c_half_nums.append(half_num)
c_full_nums = []
for full_num in full_nums:
if len(full_num) > 1:
c_full_nums.append(full_num)
#half to full
for c_half_num in c_half_nums:
index = text.find(c_half_num)
convert = c_half_num.translate(str.maketrans({chr(0x0021 + i): chr(0xFF01 + i) for i in range(94)}))
list_text[index] = convert
#full to half
for c_full_num in c_full_nums:
index = text.find(c_full_num)
converts = c_full_num.translate(str.maketrans({chr(0xFF01 + i): chr(0x21 + i) for i in range(94)}))
for i, convert in enumerate(converts):
list_text[index + i] = convert
output = "".join(list_text)
return output | f388de9eac9c92daceb96a46fce3efc525ce3eff | 22,218 |
import time
def retry(func, exc=Exception, tries=3, delay=1):
"""
Call ``func()`` up to ``tries`` times, exiting only if the function
returns without an exception. If the function raises an exception on
the final try that exception is raised.
If given, ``exc`` can be either an `Exception` or a tuple of `Exception`s
in which only those exceptions result in a retry, and all other exceptions
are raised. ``delay`` is the time in seconds between each retry, and
doubles after each retry.
"""
while True:
try:
return func()
except exc:
tries -= 1
if tries == 0:
raise
time.sleep(delay)
delay *= 2 | 5384afd77840b77b2cb278502d8fc64890af6be7 | 22,227 |
import json
def _get_entry_count(doi_file):
"""
Given a file path, will return the number of entries in that file. If the file reading files, returns None.
"""
try:
with open(doi_file) as f:
content = json.load(f)
return len(content)
except:
return None | fba5a3152811fbc01f4d91b72bdbe5659a65a152 | 22,230 |
from typing import Dict
from typing import Any
def as_lta_record(catalog_record: Dict[str, Any]) -> Dict[str, Any]:
"""Cherry pick keys from a File Catalog record to include in Bundle metadata."""
# As created by the nersc_verifier component...
# ---------------------------------------------
# "uuid": bundle["uuid"],
# "logical_name": hpss_path,
# "checksum": bundle["checksum"],
# "locations": [
# {
# "site": "NERSC",
# "path": hpss_path,
# "hpss": True,
# "online": False,
# }
# ],
# "file_size": bundle["size"],
# # note: 'lta' is an application-private metadata field
# "lta": bundle,
KEYS = ['checksum', 'file_size', 'logical_name', 'meta_modify_date', 'uuid']
lta_record = {k: catalog_record[k] for k in KEYS}
return lta_record | 96eb177bb8de6a8faa5f2647e922e70f2516187e | 22,239 |
import decimal
import math
def factorPR(n: int) -> int:
"""Return a factor of n using the Pollard Rho method.
The return value is 1, if n is prime, and a non-trivial factor,
otherwise. Note: This method will occasionally fail to find a
non-trivial factor when one exists.
Examples:
>>> factorPR(2017*2027*12353948231) # product of primes
2017
>>> factorPR(8) == factorPR(4) == 2 # fails
False
"""
numsteps = 2 * int(decimal.Decimal(n).sqrt().sqrt())
for slow in [2, 3, 4, 6]:
fast = slow
for _ in range(numsteps):
slow = (slow * slow + 1) % n
fast = (fast * fast + 1) % n
fast = (fast * fast + 1) % n
g = math.gcd(fast - slow, n)
if g != 1:
if g == n:
break
else:
return g
return 1 | a429f4e5c7fa603615a8bd4f687fc076a4becc56 | 22,242 |
from typing import List
from typing import Any
def _get_lemmas(synsets: List[Any]) -> List[str]:
"""
Return all the lemma names associated with a list of synsets.
"""
return [lemma_name for synset in synsets for lemma_name in synset.lemma_names()] | 43b6be39b733c9fee82958476b32b4ab296c5276 | 22,245 |
def normalize(x):
"""
Normalize input data.
Args:
x (NDarray):
Returns:
Normalized NDarray
"""
return (x-x.min())/(x.max()-x.min()) # Normalize (0.0-1.0) | 5e6adbaff542afd54490665bc1764aa3c2688545 | 22,250 |
def rsplit(_str, seps):
"""
Splits _str by the first sep in seps that is found from the right side.
Returns a tuple without the separator.
"""
for idx, ch in enumerate(reversed(_str)):
if ch in seps:
return _str[0:-idx - 1], _str[-idx:] | 9755b328e0b414721a7db4fe52293100bb03d1a8 | 22,258 |
def compare(numA, numB):
"""
compare(numA, numB):
Compares two numbers. Returns:
1, if the first number is greater than the second,
0, if they are equal,
-1, if the first number is smaller than the second.
Parameters
----------
numA: integer or float
numB: integer of float
Returns
-------
integer
"""
if numA > numB:
return 1
if numA < numB:
return -1
if numA == numB:
return 0 | 7ad62cb677882d22b32adb517a31a4149685ecef | 22,259 |
def ratio_col(df, df_cols): # Tested [Y]
"""
This function computes the ratio between two columns and returns a Dataframe containing the ratio as a column.
Args
df (pd.DataFrame): Dataframe containing the columns to compute a ratio.
df_cols (tuple): A tuple containing the names of columns in the Dataframe to use in computing the ratio.
Format is (<numerator>, <denominator>)
Return
(pd.Series) The inital dataframe entered with addition columns for the ratio of the two columns specified in the df_cols argument
"""
df[df_cols[0]] = df[df_cols[0]].div(df[df_cols[1]].values, axis=0)
return df[df_cols[0]] | a4bfc13a5e87604ddae865f6df3b9b123359be52 | 22,264 |
def clean_hanging_newline(t):
"""
Many editors will silently add a newline to the final line of a
document (I'm looking at you, Vim). This function fixes this common
problem at the risk of removing a hanging newline in the rare cases
where the user actually intends it.
"""
if t and t[-1] == "\n":
return t[:-1]
return t | 16c80b00530ef333ce1ad39ebf083e12a04ab58b | 22,266 |
def has_video_search_args(request):
"""
Returns whether the object has any video search filter args in it
"""
search_kws = (
"q",
"location",
"channel_ids",
"collection_id",
"tag_ids",
"date",)
for kw in search_kws:
if getattr(request, kw, None) is not None:
return True | 49dcbdc681be0867c80fc9c8e90a1cbf10a9f5bb | 22,267 |
def _parse_one_level_list(dat):
"""Get list information from queued message
Args:
dat (bytes): received message data
Returns:
list: list of information
"""
results = []
count = int.from_bytes(dat[:2], 'big')
for i in range(count):
base = 2 + 32 * i
results.append(dat[base:base + 32])
return results | 6c090cb9e396c1a5ae273f250fd7b0a88fcf418a | 22,269 |
def parse_predecessor_ids(predecessor_ids_string):
"""Parses a comma seperated list of task IDs
Args:
predecessor_ids_string: [string] comma separated task IDs
Returns:
List of task IDs as integers
"""
predecessor_ids = []
predecessor_ids_strings = predecessor_ids_string.split(',')
for predecessor_id_string in predecessor_ids_strings:
if predecessor_id_string != '':
predecessor_ids.append(int(predecessor_id_string))
return predecessor_ids | a8c344c5c51a5735899515c10df7b51349d4971e | 22,270 |
import socket
def _receive_devices(port: int, sock: socket.socket, devices: list) -> list:
"""
After sending the device discovery request, collect all available
devices
Args:
port (int): Local port
sock (socket.socket): Socket object
devices (list): List of available devices.
[(device_name: str, ip: str)]
Returns:
list: Updated list of available devices.
[(device_name: str, ip: str)]
"""
try:
while True:
data, _ = sock.recvfrom(port)
split_data = data.decode("utf-8").split(":")
if len(split_data) == 3:
devices.append((split_data[0].strip(), split_data[2].strip()))
else:
raise ValueError(data)
except (socket.timeout, OSError):
pass
return devices | 176a6606e5179f28edefa56c0d79474715d15b5b | 22,274 |
def extract_gif_param(proc: str):
"""
Extracts the parameter for an animated GIF, currently just the frame
display duration in milliseconds, from a string that ends with an
integer, in parentheses.
"""
a = proc.strip(")").split("(")
assert len(a) == 2
return int(a[1]) | 0b9f5f6cc7ecfe38ad36731fc28d932316b5b0b3 | 22,280 |
def R(a0,a1,a2,a3,a4,T):
"""
Troscompt et al (2009) coefficients using Faure et al (2004) equation:
log10(R) = sum(a_n T^{-n/6})
where n=0..4, R is presumably cm^3 s^-1
"""
return a0 + a1*T**(-1./6.) + a2*T**(-2./6.) + a3*T**(-3./6.) + a4*T**(-4./6.) | ea5111f0c745bfc396271092596b932822cd4ada | 22,285 |
def permission_check(check):
"""
Class decorator for subclasses of PublicTask to sprinkle in re-usable
permission checks::
@permission_check(user_id_matches)
class MyTask(PublicTask):
def run_public(self, user_id):
pass
"""
def decorator(cls):
cls.check_permission = staticmethod(check)
return cls
return decorator | 14880bf052c7659447dbd9388174860f8f74a133 | 22,286 |
import math
def calcFrag(values):
"""Given a set of parsed values that were generated by macs2 predictd,
see get_size fn, this fn calculates the estimated fragment size and
the sd.
**IMPORTANT: this fn is a python translation of the R code in
chilin2/modules/macs2_fragment/qc.py -- stat_frag_std
RETURNS: (estimated frag, sd)
"""
#calculate the estimated frag size: xmax
ymax = max(values['ycorr'])
i_ymax = values['ycorr'].index(ymax)
xmax = values['xcorr'][i_ymax]
#print(ymax, xmax)
#find expected
p_expect=sum([x* p/100.0 for (x,p) in zip(values['x'],values['positive'])])
m_expect=sum([x* m/100.0 for (x,m) in zip(values['x'],values['minus'])])
#print(p_expect, m_expect)
#calc sd
p_sd = math.sqrt(sum([((x - p_expect)**2)* p/100.0 \
for (x,p) in zip(values['x'],values['positive'])]))
m_sd = math.sqrt(sum([((x - m_expect)**2)* m/100.0 \
for (x,m) in zip(values['x'],values['minus'])]))
#print(p_sd, m_sd)
#FINAL avg std error
avg_sd = (p_sd + m_sd) /2.0
return (xmax, avg_sd) | dd4079265ca300760e364cfbfbcf5705dcf265f5 | 22,300 |
def solution(integers):
"""
Finds the two entries that sum to 2020 and returns their product.
Raises `ValueError` if there is no solution.
"""
inverse = set()
for n in integers:
if 2020 - n in inverse:
return n * (2020 - n)
inverse.add(n)
raise ValueError('no solution found') | 188aff6b889b7903361ac347fa3806f94a86d39e | 22,301 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.