content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def sum_naturals(n):
"""Sum the first N natural numbers
>>> sum_naturals(5)
15
"""
total = 0
k = 1
while k <= n:
total += k
k += 1
return total | 4c59057cd82083d615c72a59f682dd218a657ea0 | 706,250 |
def sort_list_files(list_patches, list_masks):
"""
Sorts a list of patches and masks depending on their id.
:param list_patches: List of name of patches in the folder, that we want to sort.
:param list_masks: List of name of masks in the folder, that we want to sort.
:return: List of sorted lists, respectively of patches and masks.
"""
return sorted(list_patches, key=lambda x: int(x[1])), sorted(list_masks, key=lambda x: int(x[1])) | 91557475bf145862ea88ad9f86cef82135eddd6c | 706,251 |
def xor(*args):
"""True if exactly one of the arguments of the iterable is True.
>>> xor(0,1,0,)
True
>>> xor(1,2,3,)
False
>>> xor(False, False, False)
False
>>> xor("kalimera", "kalinuxta")
False
>>> xor("", "a", "")
True
>>> xor("", "", "")
False
"""
return sum([bool(i) for i in args]) == 1 | 86bbe0350dd18a2508120cec9672661e1aa56ce0 | 706,252 |
import copy
def dfa2nfa(dfa):
"""Copy DFA to an NFA, so remove determinism restriction."""
nfa = copy.deepcopy(dfa)
nfa.transitions._deterministic = False
nfa.automaton_type = 'Non-Deterministic Finite Automaton'
return nfa | eed8e651a51e71599a38288665604add3d8a0a3d | 706,253 |
from typing import List
def parse_text(text):
"""
Parse raw text format playlists, each line must contain a single.
track with artist and title separated by a single dash. eg Queen - Bohemian Rhapsody
:param str text:
:return: A list of tracks
"""
tracks: List[tuple] = []
for line in text.split("\n"):
line = line.strip()
if not line or line.startswith("#"):
continue
parts = line.split("-", 1)
if len(parts) != 2:
continue
artist, track = list(map(str.strip, parts))
if not artist or not track or (artist, track) in tracks:
continue
tracks.append((artist, track))
return tracks | 1307d7ced966aa388e570456964c5921ac54ccca | 706,257 |
from typing import List
def ordered_list_item_to_percentage(ordered_list: List[str], item: str) -> int:
"""Determine the percentage of an item in an ordered list.
When using this utility for fan speeds, do not include "off"
Given the list: ["low", "medium", "high", "very_high"], this
function will return the following when when the item is passed
in:
low: 25
medium: 50
high: 75
very_high: 100
"""
if item not in ordered_list:
raise ValueError
list_len = len(ordered_list)
list_position = ordered_list.index(item) + 1
return (list_position * 100) // list_len | 2aa1b0574664e53da6080ae4bc99d1f3c93fad96 | 706,260 |
def dirty(graph):
"""
Return a set of all dirty nodes in the graph.
"""
# Reverse the edges to get true dependency
return {n: v for n, v in graph.node.items() if v.get('build') or v.get('test')} | 06835b52d7741716f1c67d951c0ab74758f476b4 | 706,261 |
def _ibp_sub(lhs, rhs):
"""Propagation of IBP bounds through a substraction.
Args:
lhs: Lefthand side of substraction.
rhs: Righthand side of substraction.
Returns:
out_bounds: IntervalBound.
"""
return lhs - rhs | 45ed06feea14275ddd64e1ec60727123db52a5cd | 706,262 |
def get_type_name_value(obj):
"""
Returns object type name from LLDB value.
It returns type name with asterisk if object is a pointer.
:param lldb.SBValue obj: LLDB value object.
:return: Object type name from LLDB value.
:rtype: str | None
"""
return None if obj is None else obj.GetTypeName() | c87a5acf7d8ef794eab97c90b82bbd9574fb0e2b | 706,267 |
def capacity_rule(mod, g, p):
"""
The capacity of projects of the *gen_ret_bin* capacity type is a
pre-specified number for each of the project's operational periods
multiplied with 1 minus the binary retirement variable.
"""
return mod.gen_ret_bin_capacity_mw[g, p] \
* (1 - mod.GenRetBin_Retire[g, p]) | ba4ccad8d620da084912a65a80793f54fb84b374 | 706,271 |
def _get_prefixed_values(data, prefix):
"""Collect lines which start with prefix; with trimming"""
matches = []
for line in data.splitlines():
line = line.strip()
if line.startswith(prefix):
match = line[len(prefix):]
match = match.strip()
matches.append(match)
return matches | d0fe7ff11321ccbf06397963a303f0e79181ebba | 706,273 |
def bytes_to_msg(seq, standard="utf-8"):
"""Decode bytes to text."""
return seq.decode(standard) | 5664d97b3fec5d119daa2171bcb431ca5a4b5f33 | 706,274 |
def bonferroni_correction(pvals):
"""
Bonferroni correction.
Reference: http://en.wikipedia.org/wiki/Bonferroni_correction
"""
n = len(pvals)
return [min(x * n , 1.0) for x in pvals] | f57ffd6b77a0a74a61904334604d1cb0eb08f8ff | 706,275 |
def normalize_v(v):
""" Normalize velocity to [-1, 1].
Ref: https://github.com/microsoft/AirSim-Drone-Racing-VAE-Imitation/blob/e651be52ff8274c9f595e88b13fe42d51302403d/racing_utils/dataset_utils.py#L20 """
# normalization of velocities from whatever to [-1, 1] range
v_x_range = [-1, 7]
v_y_range = [-3, 3]
v_z_range = [-3, 3]
v_yaw_range = [-1, 1]
if len(v.shape) == 1:
# means that it's a 1D vector of velocities
v[0] = 2.0 * (v[0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0
v[1] = 2.0 * (v[1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0
v[2] = 2.0 * (v[2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0
v[3] = 2.0 * (v[3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0
elif len(v.shape) == 2:
# means that it's a 2D vector of velocities
v[:, 0] = 2.0 * (v[:, 0] - v_x_range[0]) / (v_x_range[1] - v_x_range[0]) - 1.0
v[:, 1] = 2.0 * (v[:, 1] - v_y_range[0]) / (v_y_range[1] - v_y_range[0]) - 1.0
v[:, 2] = 2.0 * (v[:, 2] - v_z_range[0]) / (v_z_range[1] - v_z_range[0]) - 1.0
v[:, 3] = 2.0 * (v[:, 3] - v_yaw_range[0]) / (v_yaw_range[1] - v_yaw_range[0]) - 1.0
else:
raise Exception('Error in data format of V shape: {}'.format(v.shape))
return v
# Note: The version used in Shuang's code base is below, which should be equivalent to the above version.
# self.targets[:, 0] = 2. * (self.targets[:, 0] + 1.) / (7. + 1.) - 1.
# self.targets[:, 1] = 2. * (self.targets[:, 1] + 3.) / (3. + 3.) - 1.
# self.targets[:, 2] = 2. * (self.targets[:, 2] + 3.) / (3. + 3.) - 1.
# self.targets[:, 3] = 2. * (self.targets[:, 3] + 1.) / (1. + 1.) - 1. | cd47c8d3498e677a1f566b64199224f23a4b5896 | 706,278 |
def second_smallest(numbers):
"""Find second smallest element of numbers."""
m1, m2 = float('inf'), float('inf')
for x in numbers:
if x <= m1:
m1, m2 = x, m1
elif x < m2:
m2 = x
return m2 | 0ca7b297da68651e4a8b56377e08f09d4d82cfb7 | 706,279 |
import hmac
import hashlib
def hmac_sha512(key: bytes, data: bytes) -> bytes:
"""
Return the SHA512 HMAC for the byte sequence ``data`` generated with the
secret key ``key``.
Corresponds directly to the "HMAC-SHA512(Key = ..., Data = ...)" function
in BIP32
(https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions).
:param key: The secret key used for HMAC calculation.
:param data: The data for which an HMAC should be calculated.
:return: A byte sequence containing the HMAC of ``data`` generated with the
secret key ``key``.
"""
h = hmac.new(key, data, hashlib.sha512)
return h.digest() | 64850ea2d5e921138d8e0ebc2d021f8eaf5a7357 | 706,282 |
def validate(data):
"""Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid
"""
if not isinstance(data, dict):
raise ValueError("data should be dict")
if "text" not in data or not isinstance(data["text"],
str) or len(data["text"]) < 1:
raise ValueError("text field is required and should not be empty")
if "markdown" in data and not isinstance(data["markdown"], bool):
raise ValueError("markdown field should be bool")
if "attachments" in data:
if not isinstance(data["attachments"], list):
raise ValueError("attachments field should be list")
for attachment in data["attachments"]:
if "text" not in attachment and "title" not in attachment:
raise ValueError("text or title is required in attachment")
return True | ae8b7e74bd7607a7c8f5079014a0f5e3af5bc011 | 706,283 |
def stripExtra(name):
"""This function removes paranthesis from a string
*Can later be implemented for other uses like removing other characters from string
Args:
name (string): character's name
Returns:
string: character's name without paranthesis
"""
startIndexPer=name.find('(')
start = 0
if(startIndexPer!=-1):
start = startIndexPer
if(start==0):
return name
else:
return name[0:start-1] | fd9b8c2d6f513f06d8b1df067520c7f05cff023d | 706,284 |
def voltage(raw_value, v_min=0, v_max=10, res=32760, gain=1):
"""Converts a raw value to a voltage measurement.
``V = raw_value / res * (v_max - v_min) * gain``
"""
return (float(raw_value) / res * (v_max - v_min) * gain, "V") | b4ea7d2521e1fa856a21b98ace2a9490f8a3b043 | 706,287 |
def extract_characteristics_from_string(species_string):
"""
Species are named for the SBML as species_name_dot_characteristic1_dot_characteristic2
So this transforms them into a set
Parameters:
species_string (str) = species string in MobsPy for SBML format (with _dot_ instead of .)
"""
return set(species_string.split('_dot_')) | abfcc0d3e425e8f43d776a02254a04b0e85dc6d1 | 706,288 |
def _diff_bearings(bearings, bearing_thresh=40):
"""
Identify kinked nodes (nodes that change direction of an edge) by diffing
Args:
bearings (list(tuple)): containing (start_node, end_node, bearing)
bearing_thresh (int): threshold for identifying kinked nodes (range 0, 360)
Returns:
list[str] of kinked nodes
"""
kinked_nodes = []
# diff bearings
nodes = [b[0] for b in bearings]
bearings_comp = [b[2] for b in bearings]
bearing_diff = [y - x for x, y in zip(bearings_comp, bearings_comp[1:])]
node2bearing_diff = list(zip(nodes[1:-1], bearing_diff))
# id nodes to remove
for n in node2bearing_diff:
# controlling for differences on either side of 360
if min(abs(n[1]), abs(n[1] - 360)) > bearing_thresh:
kinked_nodes.append(n[0])
return kinked_nodes | a29c3cdd009065d7a73dd993ae66f81853d5e2bc | 706,289 |
def _convert_steplist_to_string(step_data):
"""Converts list of step data into a single string.
Parameters
----------
step_data : list
List of step data
Returns
-------
str
A space delimited string where every 6th value is followed by a newline.
"""
text = ''
for i, datum in enumerate(step_data):
if i == 0:
text += f'\n{datum}\n'
else:
if i%6 == 0:
text += f'{datum}\n'
else:
text += f'{datum} '
return text | 112495edbafc3db39946d7abeefff6466e2dff94 | 706,290 |
def format_time(data, year):
"""Format any time variables in US.
Parameters
----------
data : pd.DataFrame
Data without time formatting.
year : int
The `year` of the wave being processed.
Returns
-------
data : pd.DataFrame
Data with time formatting.
"""
# See to do messages at the top of the file.
# Theres some wierd overlap in the pidp data. Theres essentially a gap in September 2008 with noone in it from
# BHPS which makes transition models fail.
# Following 2 lines are a stupid work around.
# if self.year <= 2008:
# self.year += 1
data["time"] = year
return data | 858d7e48143a16e644d4f1241cd8918385dc7c5f | 706,295 |
def plot(plot, x, y, **kwargs):
"""
Adds series to plot. By default this is displayed as continuous line.
Refer to matplotlib.pyplot.plot() help for more info. X and y coordinates
are expected to be in user's data units.
Args:
plot: matplotlib.pyplot
Plot to which series should be added.
x: (float,)
Collection of x-coordinates in user units.
y: (float,)
Collection of y-coordinates in user units.
title: str
Series legend.
"""
# add series
return plot.plot(x, y, **kwargs) | 1e861243a87b61461fb49dcadf19ec9099fa5a1f | 706,296 |
def interval_to_errors(value, low_bound, hi_bound):
"""
Convert error intervals to errors
:param value: central value
:param low_bound: interval low bound
:param hi_bound: interval high bound
:return: (error minus, error plus)
"""
error_plus = hi_bound - value
error_minus = value - low_bound
return error_minus, error_plus | ffee403968ddf5fd976df79a90bdbb62474ede11 | 706,297 |
import re
def _newline_to_ret_token(instring):
"""Replaces newlines with the !RET token.
"""
return re.sub(r'\n', '!RET', instring) | 4fcf60025f79811e99151019a479da04f25ba47c | 706,300 |
def _ComputeLineCounts(old_lines, chunks):
"""Compute the length of the old and new sides of a diff.
Args:
old_lines: List of lines representing the original file.
chunks: List of chunks as returned by patching.ParsePatchToChunks().
Returns:
A tuple (old_len, new_len) representing len(old_lines) and
len(new_lines), where new_lines is the list representing the
result of applying the patch chunks to old_lines, however, without
actually computing new_lines.
"""
old_len = len(old_lines)
new_len = old_len
if chunks:
(_, old_b), (_, new_b), old_lines, _ = chunks[-1]
new_len += new_b - old_b
return old_len, new_len | ba99714016b69d87f260c8e7b8793468a2f7b04d | 706,301 |
def _read_int(file_handle, data_size):
"""
Read a signed integer of defined data_size from file.
:param file_handle: The file handle to read from at current position
:param data_size: The data size in bytes of the integer to read
:returns: The integer read and decoded
"""
return int.from_bytes(file_handle.read(data_size), byteorder="little", signed=True) | 4d2a7e82e9daa828c0e5b180250834f2fa9977d5 | 706,302 |
def list_check(lst):
"""Are all items in lst a list?
>>> list_check([[1], [2, 3]])
True
>>> list_check([[1], "nope"])
False
"""
t = [1 if isinstance(x, list) else 0 for x in lst]
return len(lst) == sum(t) | 9e2c55cb6e15f89ff2b73a78d5f15310d3cac672 | 706,304 |
import json
def all_cells_run(event_str: str, expected_count: int) -> bool:
"""Wait for an event signalling all cells have run.
`execution_count` should equal number of nonempty cells.
"""
try:
event = json.loads(event_str)
msg_type = event["msg_type"]
content = event["content"]
execution_count = content["execution_count"]
status = content["status"]
except (TypeError, KeyError):
return False
return all(
(
msg_type == "execute_reply",
execution_count == expected_count,
status == "ok",
)
) | c3e1bb23f38ffdd09d4cc2ea3326d40b7cf54034 | 706,306 |
import json
def jsonify(value):
"""
Convert a value into a JSON string that can be used for JSONB queries in
Postgres.
If a string happens to contain the character U+0000, which cannot be
represented in a PostgreSQL value, remove the escape sequence representing
that character, effectively stripping out that character from all strings.
"""
return json.dumps(value, ensure_ascii=False).replace("\\u0000", "") | 7fff497b302822f8f79f0e68b2576c26458df99c | 706,308 |
def getAsciiFileExtension(proxyType):
"""
The file extension used for ASCII (non-compiled) proxy source files
for the proxies of specified type.
"""
return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo' | cb2b27956b3066d58c7b39efb511b6335b7f2ad6 | 706,310 |
def col_index_list(info, key, value):
"""Given a list of dicts 'info', return a list of indices corresponding to
columns in which info[key] == value. Use to build lists of default columns,
non-exportable columns, etc."""
index_list = list()
if info != None:
for i in range(0, len(info)):
if info[i].get(key) == value:
index_list.append(i)
return index_list | af46b03c2fe5bce2ceb7305fd670ce1f0f52ae38 | 706,313 |
def profiling_csv(stage, phases, durations):
"""
Dumps the profiling information into a CSV format.
For example, with
stage: `x`
phases: ['a', 'b', 'c']
durations: [1.42, 2.0, 3.4445]
The output will be:
```
x,a,1.42
x,b,2.0
x,c,3.444
```
"""
assert all(hasattr(p, "name") for p in phases), "expected to have name attribute."
return "\n".join(
[f"{stage},{p.name},{round(t, 3)}" for (p, t) in zip(phases, durations)]
) | d40ee5601aa201904741870ce75c4b5bfde0f9bc | 706,315 |
def fib(n):
"""Returns the nth Fibonacci number."""
if n == 0:
return 1
elif n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2) | 397d5714f45491dde68c13379fe2a6acafe55002 | 706,318 |
def split_sample(labels):
"""
Split the 'Sample' column of a DataFrame into a list.
Parameters
----------
labels: DataFrame
The Dataframe should contain a 'Sample' column for splitting.
Returns
-------
DataFrame
Updated DataFrame has 'Sample' column with a list of strings.
"""
sample_names = labels["Sample"].str.split(" ", n=1, expand=False)
labels['Sample'] = sample_names
return labels | 483f1b78e07a2156aa3e48ae6c1f5ce41f5e60fe | 706,320 |
def escape_env_var(varname):
"""
Convert a string to a form suitable for use as an environment variable.
The result will be all uppercase, and will have all invalid characters
replaced by an underscore.
The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]*
Example:
"my.private.registry/cat/image" will become
"MY_PRIVATE_REGISTRY_CAT_IMAGE"
"""
varname = list(varname.upper())
if not varname[0].isalpha():
varname[0] = "_"
for i, c in enumerate(varname):
if not c.isalnum() and c != "_":
varname[i] = "_"
return "".join(varname) | c1e57ff3b9648e93a540202f00d0325f91bccde1 | 706,323 |
def is_in(a_list):
"""Returns a *function* that checks if its argument is in list.
Avoids recalculation of list at every comparison."""
def check(arg): return arg in a_list
return check | 34afbc269c164f0e095b1cbbf4e9576bafc7a9e1 | 706,324 |
def get_decay_fn(initial_val, final_val, start, stop):
"""
Returns function handle to use in torch.optim.lr_scheduler.LambdaLR.
The returned function supplies the multiplier to decay a value linearly.
"""
assert stop > start
def decay_fn(counter):
if counter <= start:
return 1
if counter >= stop:
return final_val / initial_val
time_range = stop - start
return 1 - (counter - start) * (1 - final_val / initial_val) / time_range
assert decay_fn(start) * initial_val == initial_val
assert decay_fn(stop) * initial_val == final_val
return decay_fn | d84c0f0305d239834429d83ba4bd5c6d6e945b69 | 706,328 |
def _vars_to_add(new_query_variables, current_query_variables):
"""
Return list of dicts representing Query Variables not yet persisted
Keyword Parameters:
new_query_variables -- Dict, representing a new inventory of Query
Variables, to be associated with a DWSupport Query
current_query_variables -- Dict, representing the Query Variables
currently associated with the 'new_query_variables' Query mapped
by tuple(table_name, column_name)
>>> from pprint import pprint
>>> test_new_vars = { 'great_fact': ['measure_a', 'measure_b']
... ,'useful_dim': ['field_one']
... ,'occasionally_useful_dim': ['field_two']}
>>> persisted_vars = { ('great_fact', 'measure_a'): object() #fake
... ,('useful_dim', 'field_one'): object()#objects
... ,('useful_dim', 'field_two'): object()}
>>> out = _vars_to_add(test_new_vars, persisted_vars)
>>> pprint(out) # check detected additions
{'great_fact': ['measure_b'], 'occasionally_useful_dim': ['field_two']}
"""
additional_fields_by_table_name = {} # Values to return
# detect additions
for new_variable_table_name, table_columns in new_query_variables.items():
for column_name in table_columns:
key = (new_variable_table_name, column_name) #table+column tuple
if key not in current_query_variables:
# New Query Variable - add variable name to table's list
table_variables = additional_fields_by_table_name.setdefault(
new_variable_table_name
,list()) #default to new, empty list (if none exists yet)
table_variables.append(column_name)
return additional_fields_by_table_name | fd5ea2209b374ab9987a05c139ba1f28805f3eff | 706,329 |
def Ak(Y2d, H, k):
"""
Calculate Ak for Sk(x)
Parameters
----------
Y2d : list
list of y values with the second derived
H : list
list of h values from spline
k : int
index from Y2d and H
Returns
-------
float
Ak from cubic spline
"""
return (Y2d[k] - Y2d[k - 1]) / (6 * H[k - 1]) | baea453b9c7b023b78c1827dc23bacbd8fd6b057 | 706,330 |
def cycle_list_next(vlist, current_val):
"""Return the next element of *current_val* from *vlist*, if
approaching the list boundary, starts from begining.
"""
return vlist[(vlist.index(current_val) + 1) % len(vlist)] | 48e2ac31178f51f981eb6a27ecf2b35d44b893b4 | 706,331 |
def get_secondary_connections(network, user):
"""
Finds all the secondary connections (i.e. connections of connections)
of a given user.
Arguments:
network: the gamer network data structure.
user: a string containing the name of the user.
Returns:
A list containing the secondary connections (connections of connections).
- If the user is not in the network, returns None.
- If a user has no primary connections to begin with,
returns an empty list.
NOTE:
It is OK if a user's list of secondary connections includes the user
himself/herself. It is also OK if the list contains a user's primary
connection that is a secondary connection as well.
"""
if user not in network:
return None
if network[user][0] == []:
return []
return [person
for group in
[network[connection][0] for connection in network[user][0]]
for person in group] | 4e53f6e43f2fb132932381370efa4b3a3cd4793c | 706,333 |
def get_regression_function(model, model_code):
"""
Method which return prediction function for trained regression model
:param model: trained model object
:return: regression predictor function
"""
return model.predict | fca4a0767b1e741952534baf59ac07cece2c9342 | 706,334 |
def RefundablePayrollTaxCredit(was_plus_sey_p, was_plus_sey_s,
RPTC_c, RPTC_rt,
rptc_p, rptc_s, rptc):
"""
Computes refundable payroll tax credit amounts.
"""
rptc_p = min(was_plus_sey_p * RPTC_rt, RPTC_c)
rptc_s = min(was_plus_sey_s * RPTC_rt, RPTC_c)
rptc = rptc_p + rptc_s
return (rptc_p, rptc_s, rptc) | e282139921045fe8e286abbde6bb4ae44151a50d | 706,339 |
def wavelength_to_energy(wavelength):
"""
Converts wavelength (A) to photon energy (keV)
"""
return 12.39842/wavelength | 4e2d11f2de8ed4890df5d885801cd492644817d8 | 706,343 |
def totaled_no_review_url(cc, sql_time_specification): # pragma: no cover
"""Counts the number of commits with no review url in a given timeframe
Args:
cc(cursor)
sql_time_specification(str): a sql command to limit the dates of the
returned results
Return:
count(int): a count of all commits with no review_url
results(list): a list of lists with all tbr'ed commits with no lgtm in the
format [rietveld_url, git_timestamp, git_subject, git_hash]
"""
cc.execute("""SELECT git_commit.review_url, git_commit.timestamp,
git_commit.subject, git_commit.hash
FROM git_commit
WHERE git_commit.review_url = ''
AND %s""" % sql_time_specification)
result = cc.fetchall()
count = len(result)
formatted_data = []
for data in result:
subject = data[2]
formatted_data.append([data[0], data[1].strftime("%Y-%m-%d %H:%M:%S"),
subject.replace('-', ' '), data[3]])
results = sorted(formatted_data, key=lambda x: x[1], reverse=True)
return count, results | 027f49b13316ecb36eed3e7dde880848b261e3b4 | 706,348 |
import random
def random_point_of_triangle(vertices):
"""Compute a random point of the triangle with given vertices"""
p, q, r = vertices
pq = q-p
pr = r-p
while True:
x = random.random()
y = random.random()
if x + y <= 1:
return p + pq*x + pr*y | ba3bf9183ddae4a16561a06b6f2455ce0ede6c8f | 706,350 |
import time
def get_minutes(hour:str) -> int:
""" Get total number of minutes from time in %H:%M .
Args:
hour (str): String containing time in 24 hour %H:%M format
Returns:
int: Returns total number of minutes
"""
t = time.strptime(hour, '%H:%M')
minutes = t[3] * 60 + t[4]
return minutes | 069835bdb6b0919d6206e0379a1933986ad2d5bd | 706,351 |
def snr2Ivar(flux, snr):
"""
Estimate the inverse variance given flux and S/N.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
snr : scalar or array of float
Signal to noise ratio
"""
return 1.0 / ((flux / snr) ** 2.0) | 91c76cd942a8f37f57a227ccb35cf4968a16193b | 706,353 |
def preprocess_dataframe(data):
"""Helper method to preprocess the dataframe.
Creates new columns for year,month,recalls and percentage change.
Limits the date range for the experiment (these data are trustworthy)."""
data['recalls'] = data['doc_count'] + 1
data.drop(columns=['product', 'Unnamed: 0', 'key', 'key_as_string', 'doc_count'], inplace=True)
data = data.resample("M").sum()
mask = (data.index > '2007-05-31') & (data.index < '2019-09-30')
data = data.loc[mask]
data['pct'] = data['recalls'].pct_change()
return data | f6670cac1319108c88ee9ee409ce0ecdd1eca746 | 706,356 |
def is_solution(x:int, y:int) -> bool:
"""Returns try if (x, y) is a solution."""
# x and y are the values in a sequence of 15 terms of the following form:
# xxxxyxxxxxyxxxx
# x must be a positive integer
if x <= 0:
return False
# y must be a negative integer
if y >= 0:
return False
# a run of 6 consecutive terms must be positive
if 5 * x + y <= 0:
return False
# a run of 11 consecutive terms must be negative
if 9 * x + 2 * y >= 0:
return False
# x must be <= 16 or y must be >= 16
return x <= 16 or y >= -16 | 5e620fc390f6a79fd25d00c8c8b51d0af788d48c | 706,357 |
def default_context(plugin, context):
"""
Return the default context for plugins rendered with a template, which
simply is a single variable named ``plugin`` containing the plugin
instance.
"""
return {"plugin": plugin} | 5f7a88c02b6c11a150197e50a5be1847cba422b0 | 706,359 |
import torch
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
device = tensor.get_device()
buf_name = f'range_buf_{device}'
if not hasattr(make_positions, buf_name):
setattr(make_positions, buf_name, tensor.new())
setattr(make_positions, buf_name, getattr(make_positions, buf_name).type_as(tensor))
if getattr(make_positions, buf_name).numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=getattr(make_positions, buf_name))
mask = tensor.ne(padding_idx)
positions = getattr(make_positions, buf_name)[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
new_tensor = tensor.clone()
return new_tensor.masked_scatter_(mask, positions[mask]).long() | 8e65c68daae2e40710c777d6e74f048b8b0ad547 | 706,360 |
def _type_convert(new_type, obj):
"""
Convert type of `obj` to `force`.
"""
return new_type(obj) | fc47c100508d41caa7ffc786746b58e3d6f684e2 | 706,363 |
def _process_labels(labels, label_smoothing):
"""Pre-process a binary label tensor, maybe applying smoothing.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
Returns
-------
torch.Tensor
The processed labels.
"""
assert label_smoothing is not None
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels | 5a71ded8ac9d3ef4b389542814a170f35ef18fdd | 706,364 |
import importlib
def import_by_path(path):
"""
Given a dotted/colon path, like project.module:ClassName.callable,
returns the object at the end of the path.
"""
module_path, object_path = path.split(":", 1)
target = importlib.import_module(module_path)
for bit in object_path.split("."):
target = getattr(target, bit)
return target | 939b3426f36b3a188f7a48e21551807d42cfa254 | 706,372 |
def _exceeded_threshold(number_of_retries: int, maximum_retries: int) -> bool:
"""Return True if the number of retries has been exceeded.
Args:
number_of_retries: The number of retry attempts made already.
maximum_retries: The maximum number of retry attempts to make.
Returns:
True if the maximum number of retry attempts have already been
made.
"""
if maximum_retries is None:
# Retry forever.
return False
return number_of_retries >= maximum_retries | c434e1e752856f9160d40e25ac20dde0583e50a6 | 706,373 |
import torch
def bert_text_preparation(text, tokenizer):
"""Preparing the input for BERT
Takes a string argument and performs
pre-processing like adding special tokens,
tokenization, tokens to ids, and tokens to
segment ids. All tokens are mapped to seg-
ment id = 1.
Args:
text (str): Text to be converted
tokenizer (obj): Tokenizer object
to convert text into BERT-re-
adable tokens and ids
Returns:
list: List of BERT-readable tokens
obj: Torch tensor with token ids
obj: Torch tensor segment ids
"""
marked_text = "[CLS] " + text + " [SEP]"
tokenized_text = tokenizer.tokenize(marked_text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [1]*len(indexed_tokens)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
return tokenized_text, tokens_tensor, segments_tensors | f9b3de4062fd0cc554e51bd02c750daea0a8250c | 706,376 |
from typing import Union
from datetime import datetime
import pytz
def api_timestamp_to_datetime(api_dt: Union[str, dict]):
"""Convertes the datetime string returned by the API to python datetime object"""
"""
Somehow this string is formatted with 7 digits for 'microsecond' resolution, so crop the last digit (and trailing Z)
The cropped string will be written into api_dt_str_mod
"""
api_dt_str_mod = None
if isinstance(api_dt, str):
api_dt_str_mod = api_dt[:-2]
elif isinstance(api_dt, dict):
api_dt_str_mod = api_dt["dateTime"][:-2]
else:
raise
dt = datetime.strptime(api_dt_str_mod, "%Y-%m-%dT%H:%M:%S.%f")
dt = pytz.utc.localize(dt)
return dt | 26f4828a19d17c883a8658eb594853158d70fbcf | 706,381 |
def __discount_PF(i, n):
"""
Present worth factor
Factor: (P/F, i, N)
Formula: P = F(1+i)^N
:param i:
:param n:
:return:
Cash Flow:
F
|
|
--------------
|
P
"""
return (1 + i) ** (-n) | b6e7424647921b945a524a22d844925573b6490a | 706,384 |
def FTCS(Uo, diffX, diffY=None):
"""Return the numerical solution of dependent variable in the model eq.
This routine uses the explicit Forward Time/Central Space method
to obtain the solution of the 1D or 2D diffusion equation.
Call signature:
FTCS(Uo, diffX, diffY)
Parameters
----------
Uo: ndarray[float], =1d, 2d
The dependent variable at time level, n within the entire domain.
diffX : float
Diffusion number for x-component of the parabolic/diffusion
equation.
diffY : float, Default=None for 1-D applications
Diffusion number for y-component of the parabolic/diffusion
equation.
Returns
-------
U: ndarray[float], =1d, 2d
The dependent variable at time level, n+1 within the entire domain.
"""
shapeU = Uo.shape # Obtain Dimension
U = Uo.copy() # Initialize U
if len(shapeU) == 1:
U[1:-1] = (
Uo[1:-1] + diffX*(Uo[2:] - 2.0*Uo[1:-1] + Uo[0:-2])
)
elif len(shapeU) == 2:
U[1:-1, 1:-1] = (
Uo[1:-1, 1:-1]
+ diffX*(Uo[2:, 1:-1] - 2.0*Uo[1:-1, 1:-1] + Uo[0:-2, 1:-1])
+ diffY*(Uo[1:-1, 2:] - 2.0*Uo[1:-1, 1:-1] + Uo[1:-1, 0:-2])
)
return U | 4b02749f3f50a2cff74abb75146159289d42b99e | 706,387 |
import re
def get_filenames(filename):
"""
Return list of unique file references within a passed file.
"""
try:
with open(filename, 'r', encoding='utf8') as file:
words = re.split("[\n\\, \-!?;'//]", file.read())
#files = filter(str.endswith(('csv', 'zip')), words)
files = set(filter(lambda s: s.endswith(('.csv', '.zip', '.pdf', '.txt', '.tsv', '.cfg', '.ini')), words))
return list(files)
except Exception as e:
print(e)
return [] | a1d8c396245cfc682ecc37edb3e673f87939b6fa | 706,390 |
import requests
def get_html_content_in_text(url):
"""
Grab all the content in webpage url and return it's content in text.
Arguments:
url -- a webpage url string.
Returns:
r.text -- the content of webpage in text.
"""
r = requests.get(url)
return r.text | fd8ddc992f34c186051ca8985ffb110c50004970 | 706,393 |
def inherits_from(obj, parent):
"""
Takes an object and tries to determine if it inherits at *any*
distance from parent.
Args:
obj (any): Object to analyze. This may be either an instance
or a class.
parent (any): Can be either instance, class or python path to class.
Returns:
inherits_from (bool): If `parent` is a parent to `obj` or not.
Notes:
What differs this function from e.g. `isinstance()` is that `obj`
may be both an instance and a class, and parent may be an
instance, a class, or the python path to a class (counting from
the evennia root directory).
"""
if callable(obj):
# this is a class
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()]
else:
obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()]
if isinstance(parent, str):
# a given string path, for direct matching
parent_path = parent
elif callable(parent):
# this is a class
parent_path = "%s.%s" % (parent.__module__, parent.__name__)
else:
parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__)
return any(1 for obj_path in obj_paths if obj_path == parent_path) | 9d7e0665b4e4fe2a3f7c136436a2502c8b72527c | 706,396 |
def name(model):
"""A repeatable way to get the formatted model name."""
return model.__name__.replace('_', '').lower() | 3d9ca275bfbfff6d734f49a47459761c559d906e | 706,397 |
import socket
def get_hostname(ipv) -> str:
"""
Get hostname from IPv4 and IPv6.
:param ipv: ip address
:return: hostname
"""
return socket.gethostbyaddr(ipv)[0] | e7d660dc3c5e30def646e56fa628099e997145be | 706,399 |
def esc_quotes(strng):
""" Return the input string with single and double quotes escaped out.
"""
return strng.replace('"', '\\"').replace("'", "\\'") | 25956257e06901d4f59088dd2c17ddd5ea620407 | 706,403 |
def to_numeric(arg):
"""
Converts a string either to int or to float.
This is important, because e.g. {"!==": [{"+": "0"}, 0.0]}
"""
if isinstance(arg, str):
if '.' in arg:
return float(arg)
else:
return int(arg)
return arg | e82746e1c5c84b57e59086030ff7b1e93c89a8ec | 706,405 |
def purelin(n):
"""
Linear
"""
return n | 493c4ae481702194fe32eec44e589e5d15614b99 | 706,407 |
import random
def shuffle_sequence(sequence: str) -> str:
"""Shuffle the given sequence.
Randomly shuffle a sequence, maintaining the same composition.
Args:
sequence: input sequence to shuffle
Returns:
tmp_seq: shuffled sequence
"""
tmp_seq: str = ""
while len(sequence) > 0:
max_num = len(sequence)
rand_num = random.randrange(max_num)
tmp_char = sequence[rand_num]
tmp_seq += tmp_char
tmp_str_1 = sequence[:rand_num]
tmp_str_2 = sequence[rand_num + 1:]
sequence = tmp_str_1 + tmp_str_2
return tmp_seq | 9e833aed9e5a17aeb419a77176713e76566d2d06 | 706,414 |
import torch
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return torch.nn.functional.relu
if activation == "gelu":
return torch.nn.functional.gelu
if activation == "glu":
return torch.nn.functional.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | ecc690e9b9ec6148b6ea8df4bd08ff2d0c1c322e | 706,416 |
import math
def inverse_gamma(data, alpha=0.1, beta=0.1):
"""
Inverse gamma distributions
:param data: Data value
:param alpha: alpha value
:param beta: beta value
:return: Inverse gamma distributiion
"""
return (pow(beta, alpha) / math.gamma(alpha)) *\
pow(alpha, data-1) * math.exp(-beta/data) | c13f5e4a05e111ae0082b7e69ef5b31498d2c221 | 706,425 |
import statistics
def linear_regression(xs, ys):
"""
Computes linear regression coefficients
https://en.wikipedia.org/wiki/Simple_linear_regression
Returns a and b coefficients of the function f(y) = a * x + b
"""
x_mean = statistics.mean(xs)
y_mean = statistics.mean(ys)
num, den = 0.0, 0.0
for x, y in zip(xs, ys):
num += (x - x_mean) * (y - y_mean)
den += (x - x_mean) * (x - x_mean)
a = num / den
b = y_mean - a * x_mean
return a, b | 6b6ecbd31262e5fe61f9cf7793d741a874327598 | 706,426 |
def getCubePixels(cubeImages):
"""
Returns a list containing the raw pixels from the `bpy.types.Image` images
in the list `cubeImages`. Factoring this functionality out into its own
function is useful for performance profiling.
"""
return [face.pixels[:] for face in cubeImages] | cdb2ba02ce9466e1b92a683dbea409e66b60c8da | 706,430 |
import torch
def shuffle_tensor(input):
"""
Returns a new tensor whose elements correspond to a randomly shuffled version of the the elements of the input.
Args:
input (`torch.Tensor`): input tensor.
Returns:
(`torch.Tensor`): output tensor.
"""
return input[torch.randperm(input.nelement())] | e7c3ff4180123de1fe6322296ba08863de9766a4 | 706,433 |
import re
def proper_units(text: str) -> str:
"""
Function for changing units to a better form.
Args:
text (str): text to check.
Returns:
str: reformatted text with better units.
"""
conv = {
r"degK": r"K",
r"degC": r"$^{\circ}$C",
r"degrees\_celsius": r"$^{\circ}$C",
r"degrees\_north": r"$^{\circ}$N",
r"degrees\_east": r"$^{\circ}$E",
r"degrees\_west": r"$^{\circ}$W",
r"I metric": r"$\mathcal{I}$--metric",
}
regex = re.compile(
"|".join(
re.escape(key) for key in sorted(conv.keys(), key=lambda item: -len(item))
)
)
return regex.sub(lambda match: conv[match.group()], text) | 5113d227db1a75ec8fa407c5f9edd5a897960d82 | 706,440 |
def does_algorithm_implementation_have_capabilities_to_execute_parameter(parameter_kisao_id, algorithm_specs):
""" Determine if an implementation of an algorithm has the capabilities to execute a model langugae
Args:
parameter_kisao_id (:obj:`str`): KiSAO id for an algorithm parameter
algorithm_specs (:obj:`dict` with schema ``https://api.biosimulators.org/openapi.json#/components/schemas/Algorithm``):
specifications of the implementation of an algorithm
Returns:
:obj:`bool`: whether the implementation of the algorithm has the capabilities to execute the SED parameter
"""
for parameter_specs in algorithm_specs['parameters']:
if parameter_specs['kisaoId']['id'] == parameter_kisao_id:
return True
return False | 653712ae621bd014547e04009243cefe4c9eb8e1 | 706,441 |
def has_remove_arg(args):
"""
Checks if remove argument exists
:param args: Argument list
:return: True if remove argument is found, False otherwise
"""
if "remove" in args:
return True
return False | 9b07fe70cecfbdf6e6e2274e5b3e715f903331c7 | 706,442 |
def convert_to_boolean(value):
"""Turn strings to bools if they look like them
Truthy things should be True
>>> for truthy in ['true', 'on', 'yes', '1']:
... assert convert_to_boolean(truthy) == True
Falsey things should be False
>>> for falsey in ['false', 'off', 'no', '0']:
... assert convert_to_boolean(falsey) == False
Other things should be unchanged
>>> for value in ['falsey', 'other', True, 0]:
... assert convert_to_boolean(value) == value
"""
if isinstance(value, str):
if value.lower() in ['t', 'true', 'on', 'yes', '1']:
return True
elif value.lower() in ['f', 'false', 'off', 'no', '0']:
return False
return value | 7cbf7a8fd601904c7aa8b685f6a3b3f5eaaa5c51 | 706,443 |
def getSampleBandPoints(image, region, **kwargs):
"""
Function to perform sampling of an image over a given region, using ee.Image.samp;e(image, region, **kwargs)
Args:
image (ee.Image): an image to sample
region (ee.Geometry): the geometry over which to sample
Returns:
An ee.FeatureCollection of sampled points along with coordinates
"""
dargs = {
'numPixels': 1000,
'region': region
}
dargs.update(kwargs)
sample = image.sample(**dargs)
return sample | 4cfbc3c180b805abe52c718f81cc16c409693922 | 706,444 |
def get_text(cell):
""" get stripped text from a BeautifulSoup td object"""
return ''.join([x.strip() + ' ' for x in cell.findAll(text=True)]).strip() | 08037cbe5d2058206de029417f03d211d350820f | 706,447 |
import math
def ToMercPosition(lat_deg, num_tiles):
"""Calculate position of a given latitude on qt grid.
LOD is log2(num_tiles)
Args:
lat_deg: (float) Latitude in degrees.
num_tiles: (integer) Number of tiles in the qt grid.
Returns:
Floating point position of latitude in tiles relative to equator.
"""
lat_rad = lat_deg / 180.0 * math.pi
y_merc = math.log(math.tan(lat_rad / 2.0 + math.pi / 4.0))
return num_tiles / 2.0 * (1 + y_merc / math.pi) | 1ae7e7b2da9ec3ee20756ef7ffa13d99485aaea7 | 706,450 |
def check_output_filepath(filepath):
"""
Check and return an appropriate output_filepath parameter.
Ensures the file is a csv file. Ensures a value is set. If
a value is not set or is not a csv, it will return a
default value.
:param filepath: string filepath name
:returns: a string representing a filepath location.
"""
if filepath.endswith('.csv'):
return filepath
return "clean_rules_report.csv" | 63fcf697dbde9a62cc39311b4d234955520f6394 | 706,451 |
def _single_value_set(target_list, value):
"""
Return true if this constraint has only one value and it is
this one.
"""
return len(target_list) == 1 and target_list[0] == value | 472ebe1aa9726c70642423d05fa55723496e9bc5 | 706,452 |
def get_positive_input(message, float_parse=False, allow_zero=False):
""" Obtains and returns a positive int from the user.
Preconditions:
message: non-empty string
float_parse: bool defaulted to False
allow_zero: bool defaulted to False
Parameters:
message: The message that is printed when obtaining the input.
float_parse: Whether to parse input to float or int
allow_zero: Whether to allow zero as an input
Postconditions:
num: The valid inputted number.
"""
# use ternary operator to determine the sign to use
sign = ">=" if allow_zero else ">"
# try to parse input to either a float or int
try:
if float_parse:
num = float(input("(must be " + sign + " 0), " + message).strip())
else:
num = int(input("(must be " + sign + " 0), " + message).strip())
# raise a ValueError if input was invalid
if (not allow_zero) and (num <= 0):
raise ValueError()
elif num < 0:
raise ValueError()
return num
# catch any ValueErrors.
except ValueError:
print("Not a valid input.")
# recurse the method until proper input was found
return get_positive_input(message, float_parse, allow_zero) | 17982ff069907464c70df7b6efb1f42d3811962e | 706,453 |
def flights_preclean(df):
"""
Input: Raw dataframe of Flights table.
Output: Cleaned flights table:
- Remove cancelled rows, made available in new dataframe "df_can"
- Drop columns ['Unnamed: 0', 'branded_code_share',
'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time',
'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name']
- Fill null values in delay columns
- Drop remaining null values
"""
global df_can
df_can = df[df.cancelled == 1].copy()
print("Removed cancelled flights - now available in dataframe 'df_can'")
df = df[df.cancelled == 0]
df = df.drop(columns=['Unnamed: 0', 'branded_code_share',
'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time',
'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name'])
for col in ['carrier_delay', 'weather_delay', 'nas_delay', 'security_delay', 'late_aircraft_delay']:
df[col] = df[col].fillna(value=0)
df = df.dropna()
return df | 61dcfa6afd6ec7dd0abb5525187938d6ab978996 | 706,454 |
import math
def distance(s1, s2):
""" Euclidean distance between two sequences. Supports different lengths.
If the two series differ in length, compare the last element of the shortest series
to the remaining elements in the longer series. This is compatible with Euclidean
distance being used as an upper bound for DTW.
:param s1: Sequence of numbers
:param s2: Sequence of numbers
:return: Euclidean distance
"""
n = min(len(s1), len(s2))
ub = 0
for v1, v2 in zip(s1, s2):
ub += (v1 - v2)**2
# If the two series differ in length, compare the last element of the shortest series
# to the remaining elements in the longer series
if len(s1) > len(s2):
v2 = s2[n - 1]
for v1 in s1[n:]:
ub += (v1 - v2)**2
elif len(s1) < len(s2):
v1 = s1[n-1]
for v2 in s2[n:]:
ub += (v1 - v2)**2
return math.sqrt(ub) | 61c308da89b98b4bbde1bba690c86559fd5e1400 | 706,460 |
def arithmetic_series(a: int, n: int, d: int = 1) -> int:
"""Returns the sum of the arithmetic sequence with parameters a, n, d.
a: The first term in the sequence
n: The total number of terms in the sequence
d: The difference between any two terms in the sequence
"""
return n * (2 * a + (n - 1) * d) // 2 | 168f0b07cbe6275ddb54c1a1390b41a0f340b0a6 | 706,463 |
def find_splits(array1: list, array2: list) -> list:
"""Find the split points of the given array of events"""
keys = set()
for event in array1:
keys.add(event["temporalRange"][0])
keys.add(event["temporalRange"][1])
for event in array2:
keys.add(event["temporalRange"][0])
keys.add(event["temporalRange"][1])
return list(sorted(keys)) | c52f696caddf35fa050621e7668eec06686cee14 | 706,467 |
def is_word(s):
""" String `s` counts as a word if it has at least one letter. """
for c in s:
if c.isalpha(): return True
return False | 524ed5cc506769bd8634a46d346617344485e5f7 | 706,470 |
def Bern_to_Fierz_nunu(C,ddll):
"""From semileptonic Bern basis to Fierz semileptonic basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
ind = ddll.replace('l_','').replace('nu_','')
return {
'F' + ind + 'nu': C['nu1' + ind],
'F' + ind + 'nup': C['nu1p' + ind],
} | 4f08f79d6614c8929c3f42096fac71b04bfe7b4b | 706,472 |
def location_parser(selected_variables, column):
"""
Parse the location variable by creating a list of tuples.
Remove the hyphen between the start/stop positions. Convert all elements to
integers and create a list of tuples.
Parameters:
selected_variables (dataframe): The dataframe containing the location of
the variables contained in the cps_selected_variables file
column (character): The name of the column containing the start/stop positions
Returns:
selected_fields: A list of tuples containing the start/stop positions
"""
fields = []
for field in selected_variables[column]:
field = field.split('-')
field = [int(i) for i in field]
fields.append(field)
return fields | 106f669269276c37652e92e62eb8c2c52dfe7637 | 706,476 |
import torch
import math
def get_qmf_bank(h, n_band):
"""
Modulates an input protoype filter into a bank of
cosine modulated filters
Parameters
----------
h: torch.Tensor
prototype filter
n_band: int
number of sub-bands
"""
k = torch.arange(n_band).reshape(-1, 1)
N = h.shape[-1]
t = torch.arange(-(N // 2), N // 2 + 1)
p = (-1)**k * math.pi / 4
mod = torch.cos((2 * k + 1) * math.pi / (2 * n_band) * t + p)
hk = 2 * h * mod
return hk | 87e8cf3b0d85a6717cce9dc09f7a0a3e3581e498 | 706,477 |
def action_to_upper(action):
"""
action to upper receives an action in pddl_action_representation, and returns it in upper case.
:param action: A action in PddlActionRepresentation
:return: PddlActionRepresentation: The action in upper case
"""
if action:
action.name = action.name.upper()
action.types = [type.upper() for type in action.types]
action.predicates = [pred.upper() for pred in action.predicates]
action.requirements = [req.upper() for req in action.requirements]
action.action = action.action.upper()
return action | e9266ad79d60a58bf61d6ce81284fa2accbb0b8d | 706,478 |
def _get_param_combinations(lists):
"""Recursive function which generates a list of all possible parameter values"""
if len(lists) == 1:
list_p_1 = [[e] for e in lists[0]]
return list_p_1
list_p_n_minus_1 = _get_param_combinations(lists[1:])
list_p_1 = [[e] for e in lists[0]]
list_p_n = [p_1 + p_n_minus_1 for p_1 in list_p_1 for p_n_minus_1 in list_p_n_minus_1]
return list_p_n | b4903bea79aebeabf3123f03de986058a06a21f4 | 706,479 |
def sanitize_tag(tag: str) -> str:
"""Clean tag by replacing empty spaces with underscore.
Parameters
----------
tag: str
Returns
-------
str
Cleaned tag
Examples
--------
>>> sanitize_tag(" Machine Learning ")
"Machine_Learning"
"""
return tag.strip().replace(" ", "_") | 40ac78846f03e8b57b5660dd246c8a15fed8e008 | 706,480 |
def smoothed_epmi(matrix, alpha=0.75):
"""
Performs smoothed epmi.
See smoothed_ppmi for more info.
Derived from this:
#(w,c) / #(TOT)
--------------
(#(w) / #(TOT)) * (#(c)^a / #(TOT)^a)
==>
#(w,c) / #(TOT)
--------------
(#(w) * #(c)^a) / #(TOT)^(a+1))
==>
#(w,c)
----------
(#(w) * #(c)^a) / #(TOT)^a
==>
#(w,c) * #(TOT)^a
----------
#(w) * #(c)^a
"""
row_sum = matrix.sum(axis=1)
col_sum = matrix.sum(axis=0).power(alpha)
total = row_sum.sum(axis=0).power(alpha)[0, 0]
inv_col_sum = 1 / col_sum # shape (1,n)
inv_row_sum = 1 / row_sum # shape (n,1)
inv_col_sum = inv_col_sum * total
mat = matrix * inv_row_sum
mat = mat * inv_col_sum
return mat | e2f72c4169aee2f394445f42e4835f1b55f347c9 | 706,482 |
def concat(l1, l2):
""" Join two possibly None lists """
if l1 is None:
return l2
if l2 is None:
return l1
return l1 + l2 | 9e87bead7eedc4c47f665808b9e0222437bc01b5 | 706,484 |
def imthresh(im, thresh):
"""
Sets pixels in image below threshold value to 0
Args:
im (ndarray): image
thresh (float): threshold
Returns:
ndarray: thresholded image
"""
thresh_im = im.copy()
thresh_im[thresh_im < thresh] = 0
return thresh_im | 180dc1eba6320c21273e50e4cf7b3f28c786b839 | 706,486 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.