content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def find_root_visual(conn):
"""Find the xcffib.xproto.VISUALTYPE corresponding to the root visual"""
default_screen = conn.setup.roots[conn.pref_screen]
for i in default_screen.allowed_depths:
for v in i.visuals:
if v.visual_id == default_screen.root_visual:
return v | 930bb700bdcb141aba9fc4370d244b588357f8f1 | 703,099 |
import re
def creole_slugify(value):
"""Convert the given string to a slug consistent with heading IDs used
by our creole parser.
>>> creole_slugify("Only 20%!")
"only-20"
"""
if not value:
return value
# Only keep alphanumeric and space characters.
value = re.sub(r"[^a-zA-Z0-9 ]+", "", value)
# replace whitespace with underscores
value = re.sub('[-\s]+', '-', value)
return value.lower() | fe621981715372c4a9c03179d862b745551d87f2 | 703,100 |
def previous_line(view, sr):
"""sr should be a Region covering the entire hard line"""
if sr.begin() == 0:
return None
else:
return view.full_line(sr.begin() - 1) | b6c668044d57983d2b66a7ae567b59126031cf9f | 703,101 |
def predecessor_to_forwarding(predecessor, source):
"""
Compute a forwarding table from a predecessor list.
"""
# Create variable to return (forwarding-table dictionary)
FT = {}
# Loop over all nodes that AREN'T the source
for (key, value) in predecessor.items():
if (key != source):
# Add all node pairs, where the immediate predecessor is the source
if (value[0] == source):
FT[key] = (value[0], key)
# If immediate predecessor is not source, follow predecessors
# until source is found, and isolate that link, to add to the dict
else:
newKey = key
newValue = value[0]
while (newValue != source):
newKey = newValue
newValue = predecessor[newKey][0]
FT[key] = (newValue, newKey)
return FT | af0d241fc2b8447581ea582d756a4a3735220815 | 703,102 |
def async_wraps(cls, wrapped_cls, attr_name):
"""Similar to wraps, but for async wrappers of non-async functions."""
def decorator(func):
func.__name__ = attr_name
func.__qualname__ = ".".join((cls.__qualname__, attr_name))
func.__doc__ = """Like :meth:`~{}.{}.{}`, but async.
""".format(
wrapped_cls.__module__, wrapped_cls.__qualname__, attr_name
)
return func
return decorator | c93fb2a52bfcb3edc9cbf0442138daa1ecf84dda | 703,103 |
def _get_trigger(model, machine, trigger_name, *args, **kwargs):
"""Convenience function added to the model to trigger events by name.
Args:
model (object): Model with assigned event trigger.
machine (Machine): The machine containing the evaluated events.
trigger_name (str): Name of the trigger to be called.
*args: Variable length argument list which is passed to the triggered event.
**kwargs: Arbitrary keyword arguments which is passed to the triggered event.
Returns:
bool: True if a transitions has been conducted or the trigger event has been queued.
"""
try:
return machine.events[trigger_name].trigger(model, *args, **kwargs)
except KeyError:
pass
raise AttributeError("Do not know event named '%s'." % trigger_name) | 1f16e62480f0caf661dc144d6dd92feae9426e96 | 703,104 |
import argparse
def _get_parser():
"""Return :class:`argparse.ArgumentParser`."""
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
'-f', '--force', dest='force',
help='Force installation of packages.',
action='store_true',
)
parser.add_argument(
'--settings', dest='settings',
help=("""
The Python path to a settings module, e.g.
"myproject.settings.main". If this isn't provided, the
DJANGO_SETTINGS_MODULE environment variable will be
used.
"""),
)
return parser | d8de14332326e48f3b5afde60d133350e0ac2908 | 703,105 |
import os
def os_cwd():
"""Returns the pathlib object for the current working directory
Examples:
>>> os_cwd()\n
'/home/cooluser/pyplay'
"""
return os.getcwd() | 785477619ea629be3e7862889164471ae9e5de88 | 703,106 |
import pandas
def from_timestamp_to_datetime(timestamp, unit='ms'):
"""
:param timestamp: timestamp in unix format.
:param unit: measurement unit used in the timestamp.
:return: the timestamp in date_time format.
"""
return pandas.to_datetime(timestamp, unit=unit) | 48046c91956a9a7195203a84871241d84dd6cd10 | 703,107 |
from pathlib import Path
import inspect
def caller_module() -> Path:
"""Returns the name of the file containing the module from which this
function was called. This ignores all modules located directly inside the
parent directory of the current file (tsfile/*)."""
this_file_parent = Path(__file__).parent
for frame in inspect.stack():
if frame.filename.startswith('<'):
continue
path = Path(frame.filename)
if path.parent == this_file_parent:
continue
return path
raise RuntimeError("Failed to determine the caller module.") | a7c01b9b747ef838315f6c41fc01aa2c70ccb1da | 703,108 |
def qgrams_to_char(s: list) -> str:
"""Converts a list of q-grams to a string.
Parameters
----------
s : list
List of q-grams.
Returns
-------
A string from q-grams.
"""
if len(s) == 1:
return s[0]
return "".join([s[0]] + [s[i][-1] for i in range(1, len(s))]) | cc7dc5eb4d5c9e3e5f751cf7c2190e68c3ba11bd | 703,109 |
def traverse(coord, np_mask, coord_str):
"""Edge case: if pixel value in mask is 0 at coord, then
bounding box has captured the extreme points in this corner
of the image
"""
x, y = coord
if np_mask[y, x] == 0.0:
return (x, y)
height, width = np_mask.shape
store_x, store_y = 0, 0
# Get extreme y coord. Traverse up or down until 0 is found
if coord_str == "coord1" or coord_str == "coord2":
for new_y in range(y, -1, -1):
if np_mask[new_y, x] == 0.0:
store_y = new_y
break
else:
for new_y in range(y, height):
if np_mask[new_y, x] == 0.0:
store_y = new_y
break
# Get extreme x coord. Traverse left or right until 0 is found
if coord_str == "coord1" or coord_str == "coord3":
for new_x in range(x, -1, -1):
if np_mask[y, new_x] == 0.0:
store_x = new_x
break
else:
for new_x in range(x, width):
if np_mask[y, new_x] == 0.0:
store_x = new_x
break
return (store_x, store_y) | e46889c358deeb2cd26fdba0a6e2eff03034d016 | 703,110 |
def iterate_module_func(m, module, func, converged):
"""Call function func() in specified module (if available) and use the result to
adjust model convergence status. If func doesn't exist or returns None, convergence
status will not be changed."""
module_converged = None
iter_func = getattr(module, func, None)
if iter_func is not None:
module_converged = iter_func(m)
if module_converged is None:
# module is not taking a stand on whether the model has converged
return converged
else:
return converged and module_converged | f7221e003dcc627f6e19a9b4961e62d7d98b87e3 | 703,113 |
import re
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret | 4b08f36678247df6119e594ff9859f697f2e8d23 | 703,114 |
def _check_mod_11_2(numeric_string: str) -> bool:
"""
Validate numeric_string for its MOD-11-2 checksum.
Any "-" in the numeric_string are ignored.
The last digit of numeric_string is assumed to be the checksum, 0-9 or X.
See ISO/IEC 7064:2003 and
https://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
"""
# Strip -
nums = numeric_string.replace("-", "")
total = 0
# skip last (check)digit
for num in nums[:-1]:
digit = int(num)
total = (total + digit) * 2
remainder = total % 11
result = (12 - remainder) % 11
if result == 10:
checkdigit = "X"
else:
checkdigit = str(result)
# Compare against last digit or X
return nums[-1].upper() == checkdigit | 685a9e8085000248290c9e482a115c99942c51d1 | 703,115 |
import json
def get_stored_username():
"""get stored username if available"""
file_name = 'chapter_10/remember.json'
try:
with open(file_name) as f_o:
usern = json.load(f_o)
except FileNotFoundError:
return None
else:
return usern | 6abe0542a882fe01f63c5392d112514595a167d7 | 703,116 |
def drawbox(img,bbox):
"""
skeleton function which draws bbox on an img
:param img:
:param bbox:
:return:
"""
return img | c1f56b17d3f78333f7c6e9cd5dcda0b0d792040c | 703,117 |
import os
def getFileName(prompt):
"""Prompts the user for a valid file which it returns.
"""
while True:
fileName = input(prompt+" ")
if os.path.exists(fileName):
return fileName
else:
print("File not found! Make sure that the file is inside this directory.") | f1814ce49e79923a8c6e935d78b7362a2fba2f15 | 703,118 |
def gen_cookie(username, hash_password):
"""
Build secure cookie content as a string containing:
- content length (excluding length itself)
- role_name
- 16 first chars of the hash password
Part of hash password is there for 2 main reasons:
1/ If the cookie secret key is stolen, the attacker will be able to
build a valid secure cookie, so in this situation having a
non-predictibable variable - part of hash password - wich can be
checked on server side will render the attack effective less, unless
if the attacker knows role's password.
2/ We don't want to see the full hash password stored in a cookie.
"""
content = "%s:%s" % (username, hash_password[:15])
return "%s:%s" % (len(content) + 1, content) | 44b0ec056bd7002aea7b63fb57901dc1180cb45c | 703,119 |
def str_to_state(str_state):
""" Reads a sequence of 9 digits and returns the corresponding state. """
assert len(str_state) == 9 and sorted(str_state) == list('012345678')
return tuple(int(c) for c in str_state) | 8e9e8c2b70f86aa4798f9be14d39c43404336e7a | 703,120 |
import argparse
def parse_args():
"""parsing and configuration"""
parser = argparse.ArgumentParser(description="Generate ImageNet10 using BigGAN")
# for training generative model
parser.add_argument('--gan_type', type=str, default='BigGAN', help='The type of GAN')
parser.add_argument('--dataset', type=str, default='ImageNet', help='The name of dataset')
parser.add_argument('--mode', type=str, default='evaluate', choices=['evaluate', 'reconstruct'])
parser.add_argument('--save_dir', type=str, default='./generative/models/ImageNet/BigGAN',
help='Directory name to save the model')
# for calculating local lipschitz constant (would more efficient if replace partition by parallel computing)
parser.add_argument('--radius', type=float, default=0.5, help='latent space ball radius')
parser.add_argument('--n_samples', type=int, default=500, help='number of natural samples')
parser.add_argument('--n_neighbors', type=int, default=100, help='number of neighboring points')
parser.add_argument('--n_parts', type=int, default=10, help='number of partitions for neighbors')
# for reconstructing dataset
parser.add_argument('--seed', type=int, default=141, help='manual seed number')
parser.add_argument('--train_parts', type=int, default=500, help='number of partitions for training set')
parser.add_argument('--test_parts', type=int, default=100, help='number of partitions for test set')
parser.add_argument('--train_size', type=int, default=100, help='number of training samples')
parser.add_argument('--test_size', type=int, default=100, help='number of testing samples')
return parser.parse_args() | c1255653398aa6b1f964cf8ebbb8a1583bae6431 | 703,121 |
def get_collection_table_name(node, intermine_model):
"""
Get the table name for this collection
:param node:
:param intermine_model:
:return: (table-name, reference-column-name).
table-name will be null if there isn't a collection table for this node
"""
if 'reverse-reference' in node:
referenced_path = '%s.%s' % (node['referenced-type'], node['reverse-reference'])
referenced_node = intermine_model.get(referenced_path)
# If the referenced node is an attribute then there will be no table
if referenced_node is not None and referenced_node['flavour'] != 'collection':
return None, None
part1 = node['name'].lower()
if 'reverse-reference' in node:
reverse_reference_name = node['reverse-reference'].lower()
else:
# reverse_reference_name = node['referenced-type'].lower()
reverse_reference_name = node['class'].lower()
part2 = reverse_reference_name
# The first name in the alphabet is the first part of the table name
if part1 > part2:
part1, part2 = part2, part1
return part1 + part2, reverse_reference_name | 06aff0058450263131ad27afbe136dd63eaf4320 | 703,122 |
def _readdir(DIR):
"""Implementation of perl readdir in scalar context"""
try:
result = (DIR[0])[DIR[1]]
DIR[1] += 1
return result
except IndexError:
return None | 0ebb237de9ea32fd11f7c6fc6e8a365420c65655 | 703,123 |
def compute_normalization(data):
"""
Write a function to take in a dataset and compute the means, and stds.
Return 6 elements: mean of s_t, std of s_t, mean of (s_t+1 - s_t), std of (s_t+1 - s_t), mean of actions, std of actions
"""
l = []
for a in [data['observations'], (data['next_observations'] - data['observations']), data['actions']]:
l.append(a.mean(axis=0))
l.append(a.std(axis=0))
return l | 521995f2611f66dc11bbe5cc3d3d40e04c97ff19 | 703,124 |
def dictize(aniter, mode, initial=None):
"""iter must contain (key,value) pairs. mode is a string, one of: replace, keep,
tally, sum, append, or a custom function that takes two arguments.
replace: default dict behavior. New value overwrites old if key exists. This
is essentially a pass-thru.
keep: Noop if kv already exists in dict.
tally: Ignore value, count how many times each key occurs.
sum: Each key contains a sum of the (presumably summable) values that arrive
with that key.
append: each key contains a list of the items that arrived with that key.
add: each key contains a set of the items that arrived with that key.
Custom func: The first argument is the existing key value. This function
won't be called if the key doesn't exist. The second is the newly arrived value.
The return value will replace the existing value in the internal dict
initial optional argument: function that gets called the first time a key
occurs. It's parameter is the value. It's return is placed in the dict. Use
to specify a default value."""
data = {}
modes = "replace keep tally sum append add".split(' ')
funcs = [lambda e, n: n,
lambda e, n: e,
lambda e, n=None: e+1,
lambda e, n: e+n,
lambda e, n: e+[n],
lambda e, n: e.union([n])]
inits = [lambda v: v,
lambda v: v,
lambda v: 1,
lambda v: v,
lambda v: [v],
lambda v: set([v])]
if mode in modes:
modei = modes.index(mode)
func = funcs[modei]
if not initial:
initial = inits[modei]
else:
assert hasattr(mode, '__call__'), '2nd argument must be a function or\
one of: %s' % ' '.join(modes)
func = mode
if not initial:
initial = lambda x: x
for (k, v) in aniter:
if k in data:
data[k] = func(data[k], v)
else:
data[k] = initial(v)
return data | c56a2ad83ec9a45e87caa7def33c6b51f63655cb | 703,125 |
def find_replace_line_endings(fp_readlines):
"""
special find and replace function
to clean up line endings in the file
from end or starttag characters
"""
clean = []
for line in fp_readlines:
if line.endswith("=<\n"):
line = line.replace("<\n", "lt\n")
clean.append(line)
return "".join(clean) | 1fa3703b6d244d6b51c17c95a8cd71e48d5ebc9d | 703,126 |
def read_txt_file(file_path, n_num=-1, code_type='utf-8'):
"""
read .txt files, get all text or the previous n_num lines
:param file_path: string, the path of this file
:param n_num: int, denote the row number decided by \n, but -1 means all text
:param code_type: string, the code of this file
:return: string, text
"""
with open(file_path, 'r', encoding=code_type) as f:
if n_num <= 0:
text = f.read().strip()
else: # n_num > 0
text = '\n'.join([f.readline() for _ in range(n_num)])
return text | 9c55d370d8e8610965e0f8c4b1bed85e6adcdc5b | 703,128 |
import math
def pow(x, y):
"""Return the logarithm of x with base y (default to e)"""
if x <= 0 and not isinstance(y, int):
raise ValueError(f"Exponent must be an integer if negative base. Received base {x} with exponent {y}.")
return math.pow(x, y) | 6ceb9957be3805c44db7c6b00e170db7ce27354f | 703,129 |
def sum_while_same(xs, x):
"""Sum points for same date representation"""
if not xs:
return [x]
if xs[-1][0] == x[0]:
return xs[:-1] + [(xs[-1][0], xs[-1][1] + x[1])]
else:
return xs + [x] | 646fc1873b582b3d9825cc69816e97de6f91bf3b | 703,130 |
import torch
def l1_loss(pred_traj, pred_traj_gt):
"""
Input:
:param pred_traj: Tensor of shape (batch, seq_len)/(batch, seq_len). Predicted trajectory along one dimension.
:param pred_traj_gt: Tensor of shape (batch, seq_len)/(batch, seq_len).
Groud truth predictions along one dimension.
:return: l1 loss |pred_traj - pred_traj_gt|
"""
return torch.sum(torch.abs(pred_traj - pred_traj_gt), dim=-1, keepdim=True) | 3fb4dd2b7fc85e8f32610065078aa3dc98d728d5 | 703,131 |
import time
def splitting_division_semi(f, group, table, sample_indices, splitting_fold):
"""Saving indices of each splitting group to a list that will be fed later to the deep learning model.
Specific for the semi_resampling strategy, since there only training and validation need to be splitted."""
t0 = time.time()
indices_train = [i for i in sample_indices if f[group][table][i][splitting_fold] == 0.0]
indices_val = [i for i in sample_indices if f[group][table][i][splitting_fold] == 1.0]
print(time.time() - t0)
return indices_train, indices_val | 5179caf3ec3205fc56f0e1c6b77687cd15842556 | 703,132 |
def get_short_description(dict, value):
"""Get layout class based on value."""
return dict.get(value, {}).get('short_description') | f5885f10ce009db925d552f8a0b0f40ab1ccaa2a | 703,133 |
def _is_subexpansion_optional(query_metadata, parent_location, child_location):
"""Return True if child_location is the root of an optional subexpansion."""
child_optional_depth = query_metadata.get_location_info(child_location).optional_scopes_depth
parent_optional_depth = query_metadata.get_location_info(parent_location).optional_scopes_depth
return child_optional_depth > parent_optional_depth | 29391226258e75d434e07c291fe9590c1810d85b | 703,134 |
def _filter_labels(text, labels, allowed_labels):
"""Keep examples with approved labels.
:param text: list of text inputs.
:param labels: list of corresponding labels.
:param allowed_labels: list of approved label values.
:return: (final_text, final_labels). Filtered version of text and labels
"""
final_text, final_labels = [], []
for text, label in zip(text, labels):
if label in allowed_labels:
final_text.append(text)
final_labels.append(label)
return final_text, final_labels | e17ed7659acbdadc71a6b3f5b522af1e34d40370 | 703,135 |
import math
def Q(fastev,lagev,fastrc,lagrc):
"""Following Wuestefeld et al. 2010"""
omega = math.fabs((fastev - fastrc + 3645)%90 - 45) / 45
delta = lagrc / lagev
dnull = math.sqrt(delta**2 + (omega-1)**2) * math.sqrt(2)
dgood = math.sqrt((delta-1)**2 + omega**2) * math.sqrt(2)
if dnull < dgood:
return -(1 - dnull)
else:
return (1 - dgood) | fe975fb234297a6f25100bb85aa6639590a010d8 | 703,136 |
def array_check(lst):
"""
Function to check whether 1,2,3 exists in given array
"""
for i in range(len(lst)-2):
if lst[i] == 1 and lst[i+1] == 2 and lst[i+2] == 3:
return True
return False | ef60b52a9d7300fa458b503f49996aec0f0831ad | 703,139 |
from typing import List
from typing import Any
def list_difference(list_1: List[Any], list_2: List[Any]) -> List[Any]:
""" This Function that takes two lists as parameters
and returns a new list with the values that are in l1, but NOT in l2"""
differ_list = [values for values in list_1 if values not in list_2]
return differ_list | 3831d799bb40080b828ee90e2929f77ff8aeb7ba | 703,140 |
def format_feedback(feedback_row, study):
"""Updates the feedback dict with the new information."""
formatted_feedback_row = {
"success": {
study.get_single_field(field["field_id"]).field_name: field["field_value"]
for field in feedback_row["success"]
},
"failed": {
study.get_single_field(field["field_id"]).field_name: [
field["code"],
field["message"],
]
for field in feedback_row["failed"]
},
}
return formatted_feedback_row | 3d6d52d6a5340b13e81ca19ee28bff28b189cbc1 | 703,142 |
def correlation(self, column_a, column_b):
"""
Calculate correlation for two columns of current frame.
Parameters
----------
:param column_a: (str) The name of the column from which to compute the correlation.
:param column_b: (str) The name of the column from which to compute the correlation.
:return: (float) Pearson correlation coefficient of the two columns.
Notes
-----
This method applies only to columns containing numerical data.
Examples
--------
Consider Frame *my_frame*, which contains the data
<hide>
>>> s = [("idnum", int), ("x1", float), ("x2", float), ("x3", float), ("x4", float)]
>>> rows = [ [0, 1.0, 4.0, 0.0, -1.0], [1, 2.0, 3.0, 0.0, -1.0], [2, 3.0, 2.0, 1.0, -1.0], [3, 4.0, 1.0, 2.0, -1.0], [4, 5.0, 0.0, 2.0, -1.0]]
>>> my_frame = tc.frame.create(rows, s)
-etc-
</hide>
>>> my_frame.inspect()
[#] idnum x1 x2 x3 x4
===============================
[0] 0 1.0 4.0 0.0 -1.0
[1] 1 2.0 3.0 0.0 -1.0
[2] 2 3.0 2.0 1.0 -1.0
[3] 3 4.0 1.0 2.0 -1.0
[4] 4 5.0 0.0 2.0 -1.0
my_frame.correlation computes the common correlation coefficient (Pearson's) on the pair
of columns provided.
In this example, the *idnum* and most of the columns have trivial correlations: -1, 0, or +1.
Column *x3* provides a contrasting coefficient of 3 / sqrt(3) = 0.948683298051 .
>>> my_frame.correlation("x1", "x2")
-0.9999999999999998
>>> my_frame.correlation("x1", "x4")
nan
>>> my_frame.correlation("x2", "x3")
-0.9486832980505138
"""
return self._scala.correlation(column_a, column_b) | b8f1600e0b2968ca4013418b2fbfda0b13f5911a | 703,143 |
import numpy as np
def uv2spd_dir(u,v):
"""
converts u, v meteorological wind components to speed/direction
where u is velocity from N and v is velocity from E (90 deg)
usage spd, dir = uv2spd_dir(u, v)
"""
spd = np.zeros_like(u)
dir = np.zeros_like(u)
spd = np.sqrt(u**2 + v**2)
dir = np.arctan2(v, u)*180.0/np.pi
return (spd, dir) | 21525616f97974ba7463fe137f07ef7e7a728fe1 | 703,145 |
from typing import List
from typing import Dict
def parse_idf(content: str) -> dict:
"""Parse an IDF file into a dictionary."""
sections = content.rstrip().split(';')
sub_sections: List[List[str]] = []
obj_dict: Dict[str, List[List[str]]] = {}
for sec in sections:
sec_lines = sec.splitlines()
_lines = []
for sl in sec_lines:
content = sl.split('!')[0]
if content != '':
_lines.append(content)
_lines = ' '.join(_lines).split(',')
clean_lines = [i.strip() for i in _lines]
sub_sections.append(clean_lines)
for ssec in sub_sections:
obj_dict[ssec[0].lower()] = []
for ssec in sub_sections:
obj_dict[ssec[0].lower()].append(ssec[1:])
return obj_dict | 9232964d590745482bb0552520b7ad42ebce8d94 | 703,146 |
import pkg_resources
def get_version():
"""Returns version"""
return pkg_resources.get_distribution("rosetta-cipher").version | 4a9e67434e8a1884e44dafaa30ffaf8c5ec5025d | 703,148 |
def good_fft_number(goal):
"""pick a number >= goal that has only factors of 2,3,5. FFT will be much
faster if I use such a number"""
assert goal < 1e5
choices = [2**a * 3**b * 5**c for a in range(17) for b in range(11)
for c in range(8)]
return min(x for x in choices if x >= goal) | e469a37f28869dca520aea3520fa0d763e9bb8ae | 703,149 |
def connect_to_ecs(env):
"""
Return boto connection to the ecs in the specified environment's region.
"""
rh = env.resource_handler.cast()
wrapper = rh.get_api_wrapper()
client = wrapper.get_boto3_client(
'ecs',
rh.serviceaccount,
rh.servicepasswd,
env.aws_region
)
return client | e4c0b7ad80c18fd6d2a90df6670ca9bfa6f1cbe3 | 703,150 |
def infotodict(seqinfo):
"""Heuristic evaluator for determining which runs belong where
allowed template fields - follow python string module:
item: index within category
subject: participant id
seqitem: run number during scanning
subindex: sub index within group
"""
info = {'test':[]}
return info | ea5c334452cbc3e5fd700c38f258578b26f9cf56 | 703,151 |
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
"""
high = len(input_list) - 1
low = 0
while low <= high:
mid = (low + high) // 2
if input_list[mid] == number:
return mid
elif input_list[mid] < number <= input_list[high]:
low = mid + 1
else:
if input_list[low] <= number:
high = mid - 1
else:
low = mid + 1
return -1 | c3deb50e608c58e5e11665d949a602cc661305ae | 703,152 |
from typing import Optional
def has_at_least_one_share_class(filing_json, filing_type) -> Optional[str]: # pylint: disable=too-many-branches
"""Ensure that share structure contain at least 1 class by the end of the alteration or IA Correction filing."""
if filing_type in filing_json['filing'] and 'shareStructure' in filing_json['filing'][filing_type]:
share_classes = filing_json['filing'][filing_type] \
.get('shareStructure', {}).get('shareClasses', [])
if len(share_classes) == 0:
return 'A company must have a minimum of one share class.'
return None | ce0e324edd1fb4e427aafdedcdecafc736fdb8d0 | 703,153 |
import builtins
def no_matplotlib(monkeypatch):
""" Mock an import error for matplotlib"""
import_orig = builtins.__import__
def mocked_import(name, globals, locals, fromlist, level):
""" """
if name == 'matplotlib.pyplot':
raise ImportError("This is a mocked import error")
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import) | 681ba8c0e70387e46ad7ed42ffb11ce8aa7f23bc | 703,154 |
import tqdm
def create_tqdm_reader(reader, max_reads=None):
"""Wrap an iterable in a tqdm progress bar.
Args:
reader: The iterable to wrap.
max_reads: Max number of items, if known in advance.
Returns:
The wrapped iterable.
"""
return tqdm.tqdm(reader, total=max_reads) | 29bd93b85ace167f5586ac275510e82f7bbe8223 | 703,155 |
def calc_U_slip_quasisteady(eps, E, x, mu):
"""
Slip velocity (quasi-steady limit)
"""
u_slip_quasisteady = -eps*E**2*x/(2*mu)
return u_slip_quasisteady | 0b5eebc9333b36cbd4b8179731e5998f7cb2739a | 703,156 |
import math
def get_sequence_of_considered_visits(max_num_considered_actions,
num_simulations):
"""Returns a sequence of visit counts considered by Sequential Halving.
Sequential Halving is a "pure exploration" algorithm for bandits, introduced
in "Almost Optimal Exploration in Multi-Armed Bandits":
http://proceedings.mlr.press/v28/karnin13.pdf
The visit counts allows to implement Sequential Halving by selecting the best
action from the actions with the currently considered visit count.
Args:
max_num_considered_actions: The maximum number of considered actions.
The `max_num_considered_actions` can be smaller than the number of
actions.
num_simulations: The total simulation budget.
Returns:
A tuple with visit counts. Length `num_simulations`.
"""
if max_num_considered_actions <= 1:
return tuple(range(num_simulations))
log2max = int(math.ceil(math.log2(max_num_considered_actions)))
sequence = []
visits = [0] * max_num_considered_actions
num_considered = max_num_considered_actions
while len(sequence) < num_simulations:
num_extra_visits = max(1, int(num_simulations / (log2max * num_considered)))
for _ in range(num_extra_visits):
sequence.extend(visits[:num_considered])
for i in range(num_considered):
visits[i] += 1
# Halving the number of considered actions.
num_considered = max(2, num_considered // 2)
return tuple(sequence[:num_simulations]) | f0081ae5bfe25d6a3eaad9f032cfb88a403fbb45 | 703,157 |
def two_node_diff(a):
"""Calculate and return diffs over two nodes instead of one."""
N = len(a)
return a[2:] - a[:(N-2)] | a38abe787ef87c37104373b402fc020f85f8e3aa | 703,158 |
import datetime
import os
def make_savedir(parent_dir='./'):
"""
make directory to save results
Params
----------
parent_dir : str
parent directory to save results
"""
if parent_dir[-1] != '/':
parent_dir += '/'
orig_dirname = datetime.datetime.now().strftime('%y%m%d_%H%M')
orig_dirname = parent_dir + orig_dirname
dirname = orig_dirname
count = 1
while os.path.exists(dirname):
dirname = orig_dirname + str(count)
count += 1
os.mkdir(dirname)
return dirname | 583e45b184e7cbeee39f2371ab649a8921c19f6f | 703,159 |
def _get_timeout(payload_len):
"""Conservatively assume min 5 seconds or 3 seconds per 1MB."""
return max(3 * payload_len / 1024 / 1024, 5) | 70ef10f9c4630afafa0019057bdaa005eb39e7ad | 703,160 |
def _validate_int(
setting, value, option_parser, config_parser=None, config_section=None
) -> int:
"""Validate an integer setting."""
return int(value) | 9036b1b043bd2463cad4f26780d47e80aa404f73 | 703,161 |
def get_best_name(phenomena):
"""
Create a best_name field which takes the best name as defined by the preference order
:param phenomena: phenomena attributes in form [{"name":"standard_name","value":"time"},{"name":"---","value":"---"},{}...]
:return: best_name(string)
"""
preference_order = ["long_name","standard_name","title","name","short_name","var_id"]
attributes = phenomena["attributes"]
for name in preference_order:
best_name = [d['value'] for d in attributes if d['name'] == name]
if best_name:
return best_name[0]
return None | cd5f1153a22e161e96f48fdcc02f4c4dbbfdbc34 | 703,162 |
def get_global_evidence(a):
"""
PyMultiNest's Analyzer has a get_stats() method, but it's
a bit too sluggish if all we want is to get the global
evidence out. This is a hack around the issue.
"""
stats_file = open(a.stats_file)
lines = stats_file.readlines()
stats = {}
a._read_error_into_dict(lines[1], stats)
Z_str = 'Nested Importance Sampling Global Log-Evidence'
Z = stats[Z_str.lower()]
Zerr = stats[(Z_str + ' error').lower()]
stats['global evidence'] = Z
stats['global evidence error'] = Zerr
return stats | 8677908c4537927f168cd16ab1ddbfaf959f4546 | 703,163 |
def buildRecorderDicts(energyInterval, powerInterval, voltageInterval,
energyPowerMeter, triplexGroup, recordMode,
query_buffer_limit):
"""Helper function to construct dictionaries to be used by individuals to
add recorders to their own models.
Note that the returned dictionary will more or less be directly passed to
a genetic.individual object, and subsequently passed to the appropriate
method in glm.modGLM.
We could add custom table definitions in the future, but why?
"""
recorders = {
'energy': {'objType': 'recorder',
'properties': {'parent': energyPowerMeter,
'table': 'energy',
'interval': energyInterval,
'propList': ['measured_real_energy', ],
'limit': -1,
'mode': recordMode,
'query_buffer_limit': query_buffer_limit
}
},
'power': {'objType': 'recorder',
'properties': {'parent': energyPowerMeter,
'table': 'power',
'interval': powerInterval,
'propList': ['measured_real_power',
'measured_reactive_power'],
'limit': -1,
'mode': recordMode,
'query_buffer_limit': query_buffer_limit
}
},
'triplexVoltage': {'objType': 'recorder',
'properties': {'group': triplexGroup,
'propList': [
'measured_voltage_1.mag',
'measured_voltage_2.mag'],
'interval': voltageInterval,
'table': 'triplexVoltage',
'limit': -1,
'mode': recordMode,
'query_buffer_limit': query_buffer_limit
}
}
}
return recorders | def51273b940ef97d61228cfd54277a9ac053aea | 703,164 |
def sub(arg1, arg2):
"""
Function that subtracts two arguments.
"""
return arg1 - arg2 | fb3694bc0827f62befe67cb58c1edb9de1adb808 | 703,165 |
def query_for_data(driver):
"""Grab all relevant data on a jobs page.
Return:
------
job_titles: list
job_locations: list
posting_companies: list
dates: list
hrefs: list
"""
job_titles = driver.find_elements_by_xpath(
"//span[@itemprop='title']")
job_locations = driver.find_elements_by_xpath(
"//div[@itemprop='jobLocation']")
posting_companies = driver.find_elements_by_xpath(
"//span[@itemprop='name']")
dates = driver.find_elements_by_xpath(
"//time[@itemprop='datePosted']")
hrefs = driver.find_elements_by_xpath("//div//article//div//h2//a")
return job_titles, job_locations, posting_companies, dates, hrefs | d3a44ec2e66f9c8ba09dac45dc253c2dd67303c4 | 703,166 |
import typing
def split_line(line: str) -> typing.Tuple[str, str]:
"""
Separates the raw line string into two strings: (1) the command and (2) the
argument(s) string
:param line:
:return:
"""
index = line.find(' ')
if index == -1:
return line.lower(), ''
return line[:index].lower(), line[index:].strip() | 964877ebe0e63161f449a1d60542fbcab451de28 | 703,167 |
from typing import List
def matrix_transpose(mat: List[List]) -> List[List]:
"""
>>> matrix_transpose([[1, 2], [3, 4], [5, 6]])
[[1, 3, 5], [2, 4, 6]]
"""
if len(mat) == 0:
raise ValueError("Matrix is empty")
return [[mat[j][i] for j in range(len(mat))] for i in range(len(mat[0]))] | 34cf049779408f0b74a1e29cbd1f0a18a6d5dd23 | 703,168 |
def split_nvr_epoch(nvre: str):
"""Split nvre to N-V-R and E.
This function is backported from `kobo.rpmlib.split_nvr_epoch`.
@param nvre: E:N-V-R or N-V-R:E string
@type nvre: str
@return: (N-V-R, E)
@rtype: (str, str)
"""
if ":" in nvre:
if nvre.count(":") != 1:
raise ValueError("Invalid NVRE: %s" % nvre)
nvr, epoch = nvre.rsplit(":", 1)
if "-" in epoch:
if "-" not in nvr:
# switch nvr with epoch
nvr, epoch = epoch, nvr
else:
# it's probably N-E:V-R format, handle it after the split
nvr, epoch = nvre, ""
else:
nvr, epoch = nvre, ""
return (nvr, epoch) | 6b65a5dd4655b8d0d961952be41ab6a939cc24c8 | 703,171 |
def findMergeNode(head1, head2):
"""
Go forward the lists every time till the end, and
then jumps to the beginning of the opposite list, and
so on. Advance each of the pointers by 1 every time,
until they meet. The number of nodes traveled from
head1 -> tail1 -> head2 -> intersection point and
head2 -> tail2-> head1 -> intersection point will be equal.
"""
node1 = head1
node2 = head2
while node1 != node2:
if node1.next:
node1 = node1.next
else:
node1 = head2
if node2.next:
node2 = node2.next
else:
node2 = head1
return node2.data | 01c24a3eda17a8063c94c92cff6d558025c03726 | 703,172 |
import functools
import os
def cached(producer):
"""Calls to a function wrapped with this decorator are cached using ``self.cache.lookup``"""
@functools.wraps(producer)
def wrapper(self):
return self.cache.lookup(data_path=os.path.join(self.category_name, producer.__name__),
plot=self, producer=lambda: producer(self))
return wrapper | 5d42bdba56d19cd9040becb8eb1acfe847752d98 | 703,173 |
from dateutil.parser import parse
def is_datetime_string(string: str) -> bool:
"""
Check if the string is date-like.
Parameters
----------
string : str
Returns
-------
is_date: bool
"""
try:
parse(string)
return True
except ValueError:
return False | ec26eab5d25c2b130efbf32304b2b79f8292a6e1 | 703,174 |
def is_valid_perioddata(data):
"""Check that a dictionary of period data has enough information
(based on key names) to set up stress periods.
Perlen must be explicitly input, or 3 of start_date_time, end_date_time,
nper and/or freq must be specified. This is analogous to the input
requirements for the pandas.date_range method for generating
time discretization
(https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.date_range.html)
"""
perlen = data.get('perlen') is not None
steady = data.get('steady', False)
if isinstance(steady, dict):
steady = steady.get(0)
if not steady:
included = [k for k in ['nper', 'start_date_time', 'end_date_time', 'freq']
if data.get(k) is not None]
has3 = len(included) >= 3
return perlen or has3
else:
nper = data.get('nper') is not None
return nper or perlen | d8c8f4646757177b7504181029cd39b5aa46d124 | 703,176 |
def get_instance_ips():
"""
Function to get instance IPs of your servers.
Sometimes you'd want to do this, if your IPs are
allocated dynamically, e.g. an auto-scaling group
within AWS.
if you have only one system. you can simpley use localhost or its ip for that.
Returns: None
"""
return [
# 'ip-172-31-22-63.ap-south-1.compute.internal', # master
# 'ip-172-31-21-222.ap-south-1.compute.internal', # replica_01
"ec2-3-111-120-49.ap-south-1.compute.amazonaws.com", # master
"ec2-13-232-208-97.ap-south-1.compute.amazonaws.com", # replica_01
] | 92b0cc79022d60b5cd11a050b14237fb397bda86 | 703,178 |
from typing import List
from typing import Union
def __parse_db(db_exp: str) -> List[Union[str, int]]:
"""
Parse a DB directive returning its contents as a list.
:param db_exp: DB directive command.
:return: Contents of the db as the directive.
"""
db_exp_ = db_exp.split(" ", 1)[1]
db_params = [eval(param) for param in db_exp_]
params: List[Union[str, int]] = []
for param_ in db_params:
if isinstance(param_, str):
params.extend(char for char in param_)
else:
params.append(param_) # TO-DO: Error regarding number overflows in DB.
return params | 26337261033d9f59238c727842b8c3cbe5be2d88 | 703,179 |
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__)) | dfffc592e8e03095c91653327d673e424480dfcf | 703,180 |
def hcolor(data, thresholds):
""" Multicolor a graph according to thresholds
:param data: the data
:type data: list of tuples (info, value)
:param thresholds: dict of thresholds, format
{<threshold>: <color>,}
:type thresholds: dict
:return: the colored graph
:rtype: list of arrays
"""
ret = []
for info, value in data:
newval = []
minover = None
maxt = 0
for t in thresholds:
if maxt < t:
maxt = t
if value > t:
newval.append((t, thresholds[t]))
else:
if minover is None or minover > t:
minover = t
if minover is None:
minover = maxt
newval.append((value, thresholds[minover]))
ret.append((info, newval))
return ret | 00b18c204ea97a7f5d919122b08488c42a25e9da | 703,181 |
def euler_problem_6(n=100):
"""
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
"""
# looks like brute force gives you O(n) or O(n logn), which is not bad...
# but we can do better with mathematical insight.
def sum_of_integer_squares(k):
"""
Use the formula 1^2 + 2^2 + ... + n^2 = (n * (n+1) * (2n+1)) / 6.
"""
return (k * (k + 1) * (2 * k + 1)) / 6
def square_of_integer_sums(k):
"""
Use the formula 1 + 2 + ... + n = n (n+1) / 2.
"""
return (k * (k + 1) / 2) ** 2
# O(logn) basic operations
sqsum = square_of_integer_sums(n)
sumsq = sum_of_integer_squares(n)
return int(sqsum - sumsq) | 550d29deea17b3047bc869a134837d4f5c1baf95 | 703,182 |
def _parse_http_429_5xx_retry_after(result=None, **ignored):
"""Return seconds to throttle"""
assert result is not None, """
The signature defines it with a default value None,
only because the its shape is already decided by the
IndividualCache's.__call__().
In actual code path, the result parameter here won't be None.
"""
response = result
lowercase_headers = {k.lower(): v for k, v in getattr(
# Historically, MSAL's HttpResponse does not always have headers
response, "headers", {}).items()}
if not (response.status_code == 429 or response.status_code >= 500
or "retry-after" in lowercase_headers):
return 0 # Quick exit
default = 60 # Recommended at the end of
# https://identitydivision.visualstudio.com/devex/_git/AuthLibrariesApiReview?version=GBdev&path=%2FService%20protection%2FIntial%20set%20of%20protection%20measures.md&_a=preview
retry_after = int(lowercase_headers.get("retry-after", default))
try:
# AAD's retry_after uses integer format only
# https://stackoverflow.microsoft.com/questions/264931/264932
delay_seconds = int(retry_after)
except ValueError:
delay_seconds = default
return min(3600, delay_seconds) | 041a334af8b79e33f875251e189a4a60862f519a | 703,183 |
def inject_local_assets():
"""only ever run from forge-generate"""
return [
{'when': {'platform_is': 'ios'}, 'do': {'inject_local_assets': ()}},
{'when': {'platform_is': 'android'}, 'do': {'inject_local_assets': ()}},
] | 71d8df6950f4cd3e51f31fb660297916f0fa1ecc | 703,184 |
def freedom(L=5):
"""
DOES NOT DO ANYTHING
"""
return -1 | e612245ec6023f3710da63baedf9997c92f4e279 | 703,185 |
def get_num_classes(dataset_name: str) -> int:
"""
Get the number of supervised loss given a dataset name.
:param dataset_name: dataset name
:return: number of supervised class.
"""
if dataset_name == "cifar10":
return 10
elif "cifar100" in dataset_name:
return 100
else:
raise ValueError("Supported datasets are only original CIFAR10/100") | dc699aeaef87b1763c9986cda596b920156e2478 | 703,187 |
def get_bsse_section(natoms_a, natoms_b, mult_a=1, mult_b=1, charge_a=0, charge_b=0):
"""Get the &FORCE_EVAL/&BSSE section."""
bsse_section = {
'FORCE_EVAL': {
'BSSE' : {
'FRAGMENT': [{
'LIST': '1..{}'.format(natoms_a)
},
{
'LIST': '{}..{}'.format(natoms_a + 1, natoms_a + natoms_b)
}],
'CONFIGURATION': [
{ # A fragment with basis set A
'MULTIPLICITY': mult_a,
'CHARGE': charge_a,
'GLB_CONF': '1 0',
'SUB_CONF': '1 0',
},
{ # B fragment with basis set B
'MULTIPLICITY': mult_b,
'CHARGE': charge_b,
'GLB_CONF': '0 1',
'SUB_CONF': '0 1',
},
{ # A fragment with basis set A+B
'MULTIPLICITY': mult_a,
'CHARGE': charge_a,
'GLB_CONF': '1 1',
'SUB_CONF': '1 0',
},
{ # B fragment with basis set A+B
'MULTIPLICITY': mult_b,
'CHARGE': charge_b,
'GLB_CONF': '1 1',
'SUB_CONF': '0 1',
},
{ # A+B fragments with basis set A+B
'MULTIPLICITY': mult_a + mult_b - 1,
'CHARGE': charge_a + charge_b,
'GLB_CONF': '1 1',
'SUB_CONF': '1 1',
}
]
}
}
}
return bsse_section | 61c9398ed35eaaf2212c2c1a66e2cf43b9bbe029 | 703,189 |
import re
def load_placement(placement_file):
"""
Loads VPR placement file. Returns a tuple with the grid size and a dict
indexed by locations that contains top-level block names.
"""
RE_PLACEMENT = re.compile(
r"^\s*(?P<net>\S+)\s+(?P<x>[0-9]+)\s+(?P<y>[0-9]+)\s+(?P<z>[0-9]+)"
)
RE_GRID_SIZE = re.compile(
r"Array size:\s+(?P<x>[0-9]+)\s+x\s+(?P<y>[0-9]+)\s+logic blocks"
)
# Load the file
with open(placement_file, "r") as fp:
lines = fp.readlines()
# Parse
grid_size = None
placement = {}
for line in lines:
line = line.strip()
if line.startswith("#"):
continue
# Placement
match = RE_PLACEMENT.match(line)
if match is not None:
loc = (int(match.group("x")), int(match.group("y")))
placement[loc] = match.group("net")
# Grid size
match = RE_GRID_SIZE.match(line)
if match is not None:
grid_size = (int(match.group("x")), int(match.group("y")))
return grid_size, placement | 72b534b5c8597f4a42d02e041c69a8fc3c92e8f7 | 703,190 |
import os
import glob
def find_matching_geoloc_file(radiance_filename, myd03_dir):
"""
:param radiance_filename: the filename for the radiance .hdf, demarcated with "MYD02".
:param myd03_dir: root directory of MYD03 geolocational files
:return geoloc_filename: the path to the corresponding geolocational file, demarcated with "MYD03"
The radiance (MYD02) geolocational (MYD03) files share the same capture date (saved in the filename itself), yet can have different processing dates (also seen within the filename). A regex search on a partial match in the same directory provides the second filename and path.
"""
tail = os.path.basename(radiance_filename)
identifier = tail.split('A')[1].split('.')[1]
geoloc_filename = glob.glob(os.path.join(myd03_dir, '*D03*.{}.*.hdf'.format(identifier)))[0]
return geoloc_filename | 622a0c8c8900d86d97355c8f44c90f583a858b3d | 703,191 |
def license_path(licenses):
"""Get license path."""
# return license if there is exactly one license
return licenses[0] if len(licenses) == 1 else None | b8194e099c4516627edab6c4538e5dfcdc6600a3 | 703,192 |
import glob
import os
def get_next_run(exp_dir):
"""
get run id by looking at current exp_dir
"""
next_run = 0
files = glob.glob(os.path.join(exp_dir, "*.log"))
for file in files:
filename = os.path.basename(file)
run = filename.split('.')[0]
id = int(run[3:])
if id >= next_run:
next_run = id
next_run += 1
return 'Run{:03d}'.format(next_run) | e00f19adbca5c64dddf2b9ccbe6447d4356fa7a4 | 703,193 |
from datetime import datetime
def complete_month(year, month):
""" Return a string with the month number padded with zero if the month has
only one digit. It is also necessary to provide a year.
:param year:
:param month:
:return: Month number padded with zero.
:rtype: str
"""
return datetime(year, month, 1).strftime("%m") | 03915be101c0f418caa78ae6bc423273ad3af24c | 703,194 |
import glob
import os
def list_api(file_pattern):
"""
return list according to file_pattern
"""
file_list = [f for f in glob.glob(file_pattern)]
targets = []
for file in file_list:
targets.append(os.path.basename(file).split(".")[0])
return targets | 6984eeeddfffd124bd0b962d7e7d2ab955c09fcc | 703,195 |
import sys
import os
def get_lib_dir():
"""
Anaconda specific
"""
dirname = 'DLLs' if sys.platform == 'win32' else 'lib'
libdir = os.path.join(sys.prefix, dirname)
return libdir | c13277f73b35d64e37854872e6869a4cf36a9dc7 | 703,196 |
def scrub_email(address):
"""
Remove the local-part from an email address
for the sake of anonymity
:param address: <str>
:return: <str>
"""
if '@' in address:
domain = address.split('@')[1]
return 'user@{}'.format(domain)
else:
return address | 90b54f3a06f3fe50b354138113c27e980c01c59c | 703,197 |
def modify_res(res, cur):
""" Преобразует список-результат запроса в свисок словарей"""
result = list()
for ll in res:
temp_dict = dict()
for k, v in zip(cur.description, ll):
temp_dict[k[0]] = bytes(v) if type(v) is memoryview else v
result.append(temp_dict)
return result | 20654a49ab0980fc5d6539f7079818f1a89bab9b | 703,198 |
import argparse
def params_args(args=None):
"""
Parse command line arguments
:param args: command line arguments or None (default)
:return: dictionary of parameters
"""
# parameters of model and files
params = argparse.ArgumentParser(description='Run distribute model simnet bow.')
params.add_argument("--name", type=str, default="simnet_bow",
help="The name of current model")
params.add_argument("--train_files_path", type=str, default="train_data",
help="Data file(s) for training.")
params.add_argument("--test_files_path", type=str, default="test_data",
help="Data file(s) for validation or evaluation.")
params.add_argument("--log_path", type=str, default="result")
params.add_argument("--model_path", type=str, default="model")
# parameters of training
params.add_argument("-l", "--learning_rate", type=float, default=0.2,
help="Initial learning rate for training.")
params.add_argument("-b", "--batch_size", type=int, default=128,
help="Mini batch size for training.")
params.add_argument("-e", "--epochs", type=int, default=1,
help="Number of epochs for training.")
# customized parameters
params.add_argument('--dict_dim', type=int, default=1451594)
params.add_argument('--emb_dim', type=int, default=128)
params.add_argument('--hid_dim', type=int, default=128)
params.add_argument('--margin', type=float, default=0.1)
params.add_argument('--sample_rate', type=float, default=0.02)
# parameters of train method
params.add_argument("--is_pyreader_train", type=bool, default=False)
params.add_argument("--is_dataset_train", type=bool, default=False)
# parameters of distribute
params.add_argument("--is_local_cluster", type=bool, default=False)
params.add_argument("--is_sparse", type=bool, default=False)
params.add_argument('-r', "--role", type=str, required=False, choices=['TRAINER', 'PSERVER'])
params.add_argument("--endpoints", type=str, default="",
help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001')
params.add_argument('--current_endpoint', type=str, default='',
help='The path for model to store (default: 127.0.0.1:6000)')
params.add_argument('-i', "--current_id", type=int, default=0,
help="Specifies the number of the current role")
params.add_argument("--trainers", type=int, default=1,
help="Specify the number of nodes participating in the training")
params.add_argument("--is_first_trainer", type=bool, default=False)
params.add_argument("--pserver_ip", type=str, default="127.0.0.1")
params.add_argument("--pserver_endpoints", type=list, default=[])
params.add_argument("--pserver_ports", type=str, default="36001")
params.add_argument("--sync_mode", type=str,required=False,choices=['sync','half_async','async'])
params.add_argument("--cpu_num", type=int, default=2)
params = params.parse_args()
return params | 4d114f3249da104ce10d51b4bf8b84b70df98833 | 703,199 |
def symm_area(col, n):
"""
returns n + (n - 1) + ... + (n - col + 1)
i.e., the number of matrix elements below and including the diagonal and
from column 0 to column `col`
"""
return col * (2 * n - col + 1) // 2 | e5c7970ee2b612f4678952056be0358c968e06b3 | 703,200 |
import os
def finder(scenario, multiplier, conserve_total):
"""yield what we can find."""
res = []
for dirname, _dirpath, filenames in os.walk("/i/0/cli"):
for fn in filenames:
res.append(
[f"{dirname}/{fn}", scenario, multiplier, conserve_total]
)
return res | d6d712e435140b94baca30ef9318a625efee7cc0 | 703,201 |
def find_device(p, tags):
"""
Find an audio device to read input from
"""
device_index = None
for i in range(p.get_device_count()):
devinfo = p.get_device_info_by_index(i)
print("Device %d: %s" % (i, devinfo["name"]))
for keyword in tags:
if keyword in devinfo["name"].lower():
print("Found an input: device %d - %s"%(i, devinfo["name"]))
device_index = i
return device_index
if device_index is None:
print("No preferred sound input found; using default input device.")
return device_index | 41428447dc39be8fa06ede59816a7aca9d5bffee | 703,202 |
def normalize_lons(l1, l2):
"""
An international date line safe way of returning a range of longitudes.
>>> normalize_lons(20, 30) # no IDL within the range
[(20, 30)]
>>> normalize_lons(-17, +17) # no IDL within the range
[(-17, 17)]
>>> normalize_lons(-178, +179)
[(-180, -178), (179, 180)]
>>> normalize_lons(178, -179)
[(-180, -179), (178, 180)]
>>> normalize_lons(179, -179)
[(-180, -179), (179, 180)]
>>> normalize_lons(177, -176)
[(-180, -176), (177, 180)]
"""
if l1 > l2: # exchange lons
l1, l2 = l2, l1
delta = l2 - l1
if l1 < 0 and l2 > 0 and delta > 180:
return [(-180, l1), (l2, 180)]
elif l1 > 0 and l2 > 180 and delta < 180:
return [(l1, 180), (-180, l2 - 360)]
elif l1 < -180 and l2 < 0 and delta < 180:
return [(l1 + 360, 180), (l2, -180)]
return [(l1, l2)] | c0d58aa7be8409d6337f0fa8b753f5ef30f531e5 | 703,203 |
def extract_list(p):
"""Check if there is a list after p"""
for sibling in p.next_siblings:
if sibling.name == 'ul':
return [li.text for li in sibling.find_all('li')]
if sibling.name == 'p':
return None | b78a2fb9c5d6eee6a11bb0a9317cdc872f8ff92c | 703,204 |
def find_true_link(s):
"""
Sometimes Google wraps our links inside sneaky tracking links, which often fail and slow us down
so remove them.
"""
# Convert "/url?q=<real_url>" to "<real_url>".
if s and s.startswith('/') and 'http' in s:
s = s[s.find('http'):]
return s | 4d9824f0f67c5e463833f40e7db8365d5312fe46 | 703,205 |
from typing import Tuple
def _unmerge_points(
board_points: Tuple[int, ...]
) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
"""Return player and opponent board positions starting from their respective ace points."""
player: Tuple[int, ...] = tuple(
map(
lambda n: 0 if n < 0 else n,
board_points,
)
)
opponent: Tuple[int, ...] = tuple(
map(
lambda n: 0 if n > 0 else -n,
board_points[::-1],
)
)
return player, opponent | 25965e023030266cc92e6b1456483204ad2c863a | 703,206 |
import json
def from_json_string(my_str):
"""Returns an object (Python data structure) represented by
a JSON string:
Arguments:
my_str (obj) -- json str
Returns:
obj -- object
"""
return json.loads(my_str) | cb013514b62456d6c628cf4ebea475b54851dfa4 | 703,208 |
from typing import Any
def to_str(object_: Any) -> str:
"""
>>> to_str(b"ass")
'ass'
>>> to_str("ass")
'ass'
>>> to_str(None)
''
>>> to_str({"op": "oppa"})
"{'op': 'oppa'}"
"""
if object_ is None:
return ""
if isinstance(object_, bytes):
return object_.decode("utf-8")
return str(object_) | 1e6606db7ad2f4dee219d84703cae49c2c6566fc | 703,209 |
def get_bse(da, da_peak_times):
"""
Takes an xarray DataArray containing veg_index values and calculates the vegetation
value base (bse) for each timeseries per-pixel. The base is calculated as the mean
value of two minimum values; the min of the slope to the left of peak of season, and
the min of the slope to the right of the peak of season. Users must provide an existing
peak of season (pos) data array, which can either be the max of the timeseries, or the
middle of season (mos) values.
Parameters
----------
da: xarray DataArray
A two-dimensional or multi-dimensional DataArray containing an array of veg_index
values.
da_peak_times: xarray DataArray
An xarray DataArray type with an x and y dimension (no time). Each pixel must be
the time (day of year) value calculated at either at peak of season (pos) or middle
of season (mos) prior.
Returns
-------
da_bse_values : xarray DataArray
An xarray DataArray type with an x and y dimension (no time). Each pixel is the
base (bse) veg_index value detected at across the timeseries at each pixel.
"""
# notify user
print('Beginning calculation of base (bse) values (times not possible).')
# get vos values (min val in each pixel timeseries)
print('> Calculating base (bse) values.')
# split timeseries into left and right slopes via provided peak/middle values
slope_l = da.where(da['time.dayofyear'] <= da_peak_times).min('time')
slope_r = da.where(da['time.dayofyear'] >= da_peak_times).min('time')
# get per pixel mean of both left and right slope min values
da_bse_values = (slope_l + slope_r) / 2
# convert type
da_bse_values = da_bse_values.astype('float32')
# rename
da_bse_values = da_bse_values.rename('bse_values')
# notify user
print('> Success!\n')
return da_bse_values | 3edaf6156bd9fdae15c3bf845eb3deb293489cfb | 703,210 |
def approx_2rd_deriv(f_x0,f_x0_minus_1h,f_x0_minus_2h,h):
"""Backwards numerical approximation of the second derivative of a function.
Args:
f_x0: Function evaluation at current timestep.
f_x0_minus_1h: Previous function evaluation.
f_x0_minus_2h: Function evaluations two timesteps ago.
h: Time inbetween function evaluations.
Returns:
The approximated value of the second derivative of f evaluated at x0
"""
return (-1*f_x0+2*f_x0_minus_1h-1*f_x0_minus_2h)/(h**2) | b5c93902bf39d32bf84db38476f8fab4772f5fc9 | 703,211 |
def get_dict_key_by_value(val, dic):
"""
Return the first appeared key of a dictionary by given value.
Args:
val (Any): Value of the key.
dic (dict): Dictionary to be checked.
Returns:
Any, key of the given value.
"""
for d_key, d_val in dic.items():
if d_val == val:
return d_key
return None | d01522a61d7a0549ed54bfcb620da10857d67ae7 | 703,212 |
import json
def isJson(var=''):
""" Check json
>>> isJson(var='')
False
>>> isJson('')
False
>>> isJson('{}')
True
"""
result = True
try:
json.loads(var)
except Exception as e:
result = False
return result | dc146fff1449df844ce0ac00607d77b5e2dc4370 | 703,213 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.