content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import math
def squeezenet1_0_fpn_feature_shape_fn(img_shape):
""" Takes an image_shape as an input to calculate the FPN output sizes
Ensure that img_shape is of the format (..., H, W)
Args
img_shape : image shape as torch.Tensor not torch.Size should have
H, W as last 2 axis
Returns
P3_shape, P4_shape, P5_shape, P6_shape, P7_shape : as 5 (2,) Tensors
"""
C0_shape = img_shape[-2:]
C1_shape = (math.floor((C0_shape[0] - 5) / 2), math.floor((C0_shape[1] - 5) / 2))
C2_shape = (math.ceil((C1_shape[0] - 1) / 2), math.ceil((C1_shape[1] - 1) / 2))
P3_shape = (math.ceil((C2_shape[0] - 1) / 2), math.ceil((C2_shape[1] - 1) / 2))
P4_shape = (math.ceil((P3_shape[0] - 1) / 2), math.ceil((P3_shape[1] - 1) / 2))
P5_shape = (math.ceil((P4_shape[0] - 1) / 2), math.ceil((P4_shape[1] - 1) / 2))
P6_shape = (math.ceil(P5_shape[0] / 2), math.ceil(P5_shape[1] / 2))
P7_shape = (math.ceil(P6_shape[0] / 2), math.ceil(P6_shape[1] / 2))
return C2_shape, P3_shape, P4_shape, P5_shape, P6_shape, P7_shape | d56fe3d834bcd9633727defe3ad9a27ea756ed40 | 705,239 |
def clip_alpha(aj, H, L):
"""
cLips alpha vaLues tHat are greater
tHan H or Less tHan L
"""
if aj > H:
aj = H
if L > aj:
aj = L
return aj | d272e2703c1b6008fc4840e887ce842005dfad62 | 705,240 |
import types
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__) | 37fca64ddaadfc8a6a24dce012af2143038cacd2 | 705,241 |
import random
def get_codename():
"""Helper for generating a random codename to represent a voter
in the admin interface. To protect voting privacy of our voters,
we are using hashes to make it slightly more difficult to
reveal/infer who voted for who. On the admin interface, however,
instead of using a less human-friendly hash, we are representing
voters with randomly generated codenames, which still obscures
who they are but looks a bit better. The codename is always randomly
generated and does not stay the same like the hash, since we don't
need to use it to track anything.
"""
codename = random.choice(['Black', 'Blue', 'Red', 'Brown', 'Gray',
'Green', 'Yellow', 'Purple', 'White',
'Orange', 'Pink'])
codename += " " + random.choice(['Mamba', 'Raptor', 'Eagle',
'Hawk', 'Sparrow', 'Snake', 'Mosquito',
'Turkey', 'Opossum', 'Narwhal',
'Seabass','Octopus', 'Jellyfish',
'Armadillo', 'Lemur', 'Tiger',
'Whale', 'Elephant','Turtle',
'Dragon', 'Horse', 'Donkey', 'Coyote',
'Penguin', 'Fox', 'Mouse', 'Albatross',
'Mammoth', 'Tiger', 'Bear', 'Weasel'])
return codename | 6baf9cc8dd774f0d5541980d7a48be98cb4c66a0 | 705,242 |
def getTJstr(text, glyphs, simple, ordering):
""" Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
"""
if text.startswith("[<") and text.endswith(">]"): # already done
return text
if not bool(text):
return "[<>]"
if simple: # each char or its glyph is coded as a 2-byte hex
if glyphs is None: # not Symbol, not ZapfDingbats: use char code
otxt = "".join(["%02x" % ord(c) if ord(c) < 256 else "b7" for c in text])
else: # Symbol or ZapfDingbats: use glyphs
otxt = "".join(["%02x" % glyphs[ord(c)][0] if ord(c) < 256 else "b7" for c in text])
return "[<" + otxt + ">]"
# non-simple fonts: each char or its glyph is coded as 4-byte hex
if ordering < 0: # not a CJK font: use the glyphs
otxt = "".join(["%04x" % glyphs[ord(c)][0] for c in text])
else: # CJK: use the char codes
otxt = "".join(["%04x" % ord(c) for c in text])
return "[<" + otxt + ">]" | bd5b7abd1b5ceb0b273e99e30ecc248482ed7476 | 705,245 |
def check_skip(timestamp, filename):
"""
Checks if a timestamp has been given and whether the timestamp corresponds
to the given filename.
Returns True if this condition is met and False Otherwise"
"""
if ((len(timestamp) > 0) and not(timestamp in filename)):
return True
elif ((len(timestamp) > 0) and (timestamp in filename)):
return False | 738043fb554f20b79fa3ac8861f9e60d0d697e5e | 705,246 |
def is_tabledap(url):
"""
Identify a dataset as an ERDDAP TableDAP dataset.
Parameters
----------
url (str) : URL to dataset
Returns
-------
bool
"""
return "tabledap" in url | 9f4650bc3a3bc0794637b042c1779a84d7c02779 | 705,247 |
def generate_timestamp(time_to_use, stamp_type="default"):
""" Genrate a text timestamp """
new_stamp = time_to_use.strftime("%Y%m%d-%H%M%S")
return new_stamp | 1b386ed7375b3158867d980796c764a627c68338 | 705,248 |
def choices_on_ballots(L, printing_wanted=False):
"""
Return a dict of the choices shown on ballot list L, with counts.
Args:
L (list): list of ballots
Returns:
C (dict): dict of distinct strings appearing in ballots in L,
each with count of number of occurrences.
Example:
"""
C = dict()
ballot_no = 0
for ballot in L:
ballot_no += 1
for choice in ballot:
if False and choice not in C:
print("Choice {} first seen in ballot {}"
.format(choice, ballot_no))
C[choice] = 1 + C.get(choice, 0)
return C | e489eef70ee0efd0f40f5163c2135a5549c8893e | 705,249 |
import random
def person_split(whole_data, train_names, valid_names, test_names):
"""Split data by person."""
random.seed(30)
random.shuffle(whole_data)
train_data = []
valid_data = []
test_data = []
for idx, data in enumerate(whole_data): # pylint: disable=unused-variable
if data["name"] in train_names:
train_data.append(data)
elif data["name"] in valid_names:
valid_data.append(data)
elif data["name"] in test_names:
test_data.append(data)
print("train_length:" + str(len(train_data)))
print("valid_length:" + str(len(valid_data)))
print("test_length:" + str(len(test_data)))
return train_data, valid_data, test_data | ef0475fbc515af1352401c576be27351cda81a35 | 705,250 |
import torch
def cal_area(group_xyz):
"""
Calculate Area of Triangle
:param group_xyz: [B, N, K, 3] / [B, N, G, K, 3]; K = 3
:return: [B, N, 1] / [B, N, G, 1]
"""
pad_shape = group_xyz[..., 0, None].shape
det_xy = torch.det(torch.cat([group_xyz[..., 0, None], group_xyz[..., 1, None], torch.ones(pad_shape)], dim=-1))
det_yz = torch.det(torch.cat([group_xyz[..., 1, None], group_xyz[..., 2, None], torch.ones(pad_shape)], dim=-1))
det_zx = torch.det(torch.cat([group_xyz[..., 2, None], group_xyz[..., 0, None], torch.ones(pad_shape)], dim=-1))
area = torch.sqrt(det_xy ** 2 + det_yz ** 2 + det_zx ** 2).unsqueeze(-1)
return area | bbafa626c1833b5bde81303b4038081dae7bc965 | 705,251 |
def get_jquery_min_js():
"""
Return the location of jquery.min.js. It's an entry point to adapt the path
when it changes in Django.
"""
return 'admin/js/vendor/jquery/jquery.min.js' | 86315a0992dc181435f6899b24eb93abc0a47941 | 705,252 |
def quicksort(inputArray):
"""input: array
output: new sorted array
features: stable
efficiency O(n^2) (worst case), O(n log(n)) (avg case), O(n) (best case):
space complexity: O(n)
method:
Pick the last element in the array as the pivot.
Separate values into arrays based on whether they are
greater than, less than, or equal to the pivot.
Recursively sort the greater than and less than arrays.
Return an new array merging the sorted arrays and the pivot.
"""
if len(inputArray) <= 1:
return inputArray
pivot = inputArray[-1]
lesser = []
greater = []
equal = []
for value in inputArray[:-1]:
if value > pivot:
greater.append(value)
elif value < pivot:
lesser.append(value)
elif value == pivot:
equal.append(value)
lesser = quicksort(lesser)
greater = quicksort(greater)
return lesser + equal + [pivot] + greater | 2a8036ba038f4f7a8e817175d9a810184911ce4b | 705,253 |
def get_paypal_currency_code(iso_currency_code):
"""
Function will map the currency code to paypal currency code
"""
if iso_currency_code == 124:
return 'CAD'
if iso_currency_code == 840:
return 'USD'
if iso_currency_code == 484:
return 'MXN'
return 'CAD' | af9579a6d12e44dd3263956eb41ece9eadeacaee | 705,254 |
import socket
def get_own_ip():
"""
returns own ip
original from:
https://stackoverflow.com/a/25850698/3990615
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 1)) # connect() for UDP doesn't send packets
local_ip_address = s.getsockname()[0]
return local_ip_address | 53195ee3880a9025ba525c120f2e4ccc0e676a93 | 705,255 |
import time
def chaperone(method):
"""
Wraps all write, read and query methods of the adapters; monitors and handles communication issues
:param method: (callable) method to be wrapped
:return: (callable) wrapped method
"""
def wrapped_method(self, *args, validator=None, **kwargs):
if not self.connected:
raise ConnectionError(f'Adapter is not connected for instrument at address {self.instrument.address}')
while self.busy: # wait for turn to talk to the instrument
time.sleep(0.05)
self.busy = True # block other methods from talking to the instrument
# Catch communication errors and either try to repeat communication or reset the connection
attempts = 0
reconnects = 0
while reconnects <= self.max_reconnects:
while attempts < self.max_attempts:
try:
response = method(self, *args, **kwargs)
if validator:
valid_response = validator(response)
else:
valid_response = (response != '') * (response != float('nan')) * (response is not None)
if not valid_response:
raise ValueError(f'invalid response, {response}, from {method.__name__} method')
elif attempts > 0 or reconnects > 0:
print('Resolved')
self.busy = False
return response
except BaseException as err:
print(f'Encountered {err} while trying to talk to {self.instrument.name}')
print('Trying again...')
attempts += 1
# repeats have maxed out, so try reconnecting with the instrument
print('Reconnecting...')
self.disconnect()
time.sleep(self.delay)
self.connect()
attempts = 0
reconnects += 1
# Getting here means that both repeats and reconnects have been maxed out
raise ConnectionError(f'Unable to communicate with {self.instrument.name}!')
wrapped_method.__doc__ = method.__doc__ # keep method doc string
return wrapped_method | ec224565208428c9daacdb2e3d15ae0dbb4ee9b1 | 705,256 |
import re
def preprocess_text(text, lower=True):
""" Prepsocess text.
"""
text = text.replace("ä", "äe").replace("ö", "oe").replace("ü", "ue").replace("ß", "ss")
# Remove punctuations and numbers
text = re.sub("[^a-zA-Z]+", " ", text)
# Single character removal
text = re.sub(r"\b[a-zA-Z]\b", "", text)
# Removing multiple spaces
text = re.sub(r"\s+", " ", text)
if lower==True:
text = text.lower()
return text | fb0c982b8ce3dce2d78918dd8a6ce469a33c93eb | 705,257 |
def separate_last_day(df_):
"""
takes a dataset which has the target and features built
and separates it into the last day
"""
# take the last period
last_period = df_.iloc[-1]
# the last period is now a series, so it's name will be the timestamp
training_data = df_.loc[df_.index < last_period.name]
return last_period, training_data | 0e7e7ea31a55c6f648e218b44845290689e344ab | 705,258 |
def cubic_spline_breaksToknots(bvec):
"""
Given breakpoints generated from _cubic_spline_breaks,
[x0, x0, x0, x0, x1, x2, ..., xN-2, xf, xf, xf, xf],
return the spline knots [x0, x1, ..., xN-1=xf].
This function ``undoes" _cubic_spline_breaks:
knot_vec = _cubic_spline_breaks2knots(_cubic_spline_breaks(knot_vec))
"""
return bvec[3:-3] | 15a73dea4b001e05bd67075ec21e15247db1f031 | 705,259 |
def tamper_nt_response(data, vars):
"""The connection is sometimes terminated if NTLM is successful, this prevents that"""
print("Tamper with NTLM response")
nt_response = vars["nt_response"]
fake_response = bytes([(nt_response[0] + 1 ) % 0xFF]) + nt_response[1:]
return data.replace(nt_response, fake_response) | cf2acad343f457b5ea5529d91653169d2093d500 | 705,260 |
def pascal_classes():
"""Get Pascal VOC classes
:return: mapping from class name to an integer
"""
return {
'aeroplane': 1, 'bicycle' : 2, 'bird' : 3, 'boat' : 4,
'bottle' : 5, 'bus' : 6, 'car' : 7, 'cat' : 8,
'chair' : 9, 'cow' : 10, 'diningtable': 11, 'dog' : 12,
'horse' : 13, 'motorbike': 14, 'person' : 15, 'potted-plant': 16,
'sheep' : 17, 'sofa' : 18, 'train' : 19, 'tv/monitor' : 20
} | e6f488df00075ed6977024466e0eebb995b98605 | 705,262 |
def filenames_per_batch (gen):
""" arg = name of the data generator (datagen.flow_from_dataframe) """
img_paths_per_batch=[]
batches_per_epoch = gen.samples // gen.batch_size + (gen.samples % gen.batch_size > 0)
for i in range(batches_per_epoch):
batch = next(gen)
current_index = ((gen.batch_index-1) * gen.batch_size)
if current_index < 0:
if gen.samples % gen.batch_size > 0:
current_index = max(0, gen.samples - gen.samples % gen.batch_size)
else:
current_index = max(0,gen.samples - gen.batch_size)
index_array = gen.index_array[current_index:current_index + gen.batch_size].tolist()
img_paths = [gen.filepaths[idx] for idx in index_array]
img_paths_per_batch.append(img_paths)
return img_paths_per_batch | 23ea9dfbbfe64fc51796af22c83a470847c9f698 | 705,264 |
def ethtype_to_int_priv_pubv(priv, pubv):
"""
将 priv 和 pubv 转换为 weidentity 支持的格式(十进制)
:param priv: type: bytes
:param pubv: type: hex
:return: priv int, pubv int
"""
private_key = int.from_bytes(priv, byteorder='big', signed=False)
public_key = eval(pubv)
return {"priv": str(private_key), "pubv": str(public_key)} | 763a284015029a43257061818634b50d69417de5 | 705,265 |
import argparse
def build_arg_parse() -> argparse.ArgumentParser:
"""Builds the arguments parser."""
parser = argparse.ArgumentParser(
description="This script updates the python extension micro version based on the release or pre-release channel."
)
parser.add_argument(
"--release",
action="store_true",
help="Treats the current build as a release build.",
)
parser.add_argument(
"--build-id",
action="store",
type=int,
default=None,
help="If present, will be used as a micro version.",
required=False,
)
parser.add_argument(
"--for-publishing",
action="store_true",
help="Removes `-dev` or `-rc` suffix.",
)
return parser | 8151e3366c7a2acecb7f40cb05684af368dc9e1f | 705,266 |
def _SanitizeDoc(doc, leader):
"""Cleanup the doc string in several ways:
* Convert None to empty string
* Replace new line chars with doxygen comments
* Strip leading white space per line
"""
if doc is None:
return ''
return leader.join([line.lstrip() for line in doc.split('\n')]) | 7ca6f17296c9b23c05239092e28c8d6b4df7c725 | 705,267 |
def pop_execute_query_kwargs(keyword_arguments):
""" pop the optional execute query arguments from arbitrary kwargs;
return non-None query kwargs in a dict
"""
query_kwargs = {}
for key in ('transaction', 'isolate', 'pool'):
val = keyword_arguments.pop(key, None)
if val is not None:
query_kwargs[key] = val
return query_kwargs | d4ae2df3158660f62e21153d943922692f633b76 | 705,268 |
def get_long_description(readme_file='README.md'):
"""Returns the long description of the package.
@return str -- Long description
"""
return "".join(open(readme_file, 'r').readlines()[2:]) | 604c57fce1f9b8c32df4b64dc9df4fe61120d680 | 705,269 |
def extract_versions():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('lib/matplotlib/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__numpy__')):
exec(line.strip())
return locals() | b54733ffdae76206400e7203f792e3b809cf6c30 | 705,270 |
def lico2_ocp_Ramadass2004(sto):
"""
Lithium Cobalt Oxide (LiCO2) Open Circuit Potential (OCP) as a a function of the
stochiometry. The fit is taken from Ramadass 2004. Stretch is considered the
overhang area negative electrode / area positive electrode, in Ramadass 2002.
References
----------
.. [1] P. Ramadass, Bala Haran, Parthasarathy M. Gomadam, Ralph White, and Branko
N. Popov. "Development of First Principles Capacity Fade Model for Li-Ion Cells."
(2004)
Parameters
----------
sto : :class:`pybamm.Symbol`
Stochiometry of material (li-fraction)
"""
stretch = 1.13
sto = stretch * sto
u_eq = ((- 4.656 + 88.669 * (sto ** 2)
- 401.119 * (sto ** 4) + 342.909 * (sto ** 6)
- 462.471 * (sto ** 8) + 433.434 * (sto ** 10)) / (
- 1 + 18.933 * (sto ** 2) - 79.532 * (sto ** 4)
+ 37.311 * (sto ** 6) - 73.083 * (sto ** 8)
+ 95.96 * (sto ** 10))
)
return u_eq | 2c0902e1d1cdec9ac7626038e34092933665bf84 | 705,272 |
import doctest
def doctestobj(*args, **kwargs):
"""
Wrapper for doctest.run_docstring_examples that works in maya gui.
"""
return doctest.run_docstring_examples(*args, **kwargs) | 1efccd1a887636bbcf80e762f12934e7d03efe28 | 705,273 |
def _return_model_names_for_plots():
"""Returns models to be used for testing plots. Needs
- 1 model that has prediction interval ("theta")
- 1 model that does not have prediction interval ("lr_cds_dt")
- 1 model that has in-sample forecasts ("theta")
- 1 model that does not have in-sample forecasts ("lr_cds_dt")
"""
model_names = ["theta", "lr_cds_dt"]
return model_names | bd180134c5c74f4d1782384bc8e3b13abff8b125 | 705,274 |
import time
def find_workflow_component_figures(page):
""" Returns workflow component figure elements in `page`. """
time.sleep(0.5) # Pause for stable display.
root = page.root or page.browser
return root.find_elements_by_class_name('WorkflowComponentFigure') | 1a56a0a348803394c69478e3443cbe8c6cb0ce9c | 705,276 |
import configparser
def get_headers(path='.credentials/key.conf'):
"""Get the authentication key header for all requests"""
config = configparser.ConfigParser()
config.read(path)
headers = {
'Ocp-Apim-Subscription-Key': config['default']['primary']
}
return headers | d40c1b6246efb728040adc47b6180f50aa4dc3e8 | 705,277 |
def Sdif(M0, dM0M1, alpha):
"""
:math:`S(\\alpha)`, as defined in the paper, computed using `M0`,
`M0 - M1`, and `alpha`.
Parameters
----------
M0 : ndarray or matrix
A symmetric indefinite matrix to be shrunk.
dM0M1 : ndarray or matrix
M0 - M1, where M1 is a positive definite target matrix.
alpha : float
A shrinking parameter.
Returns
-------
S(alpha) : float
The convex combination :math:`(1-\\alpha) M_0 + \\alpha M_1`.
"""
return M0 - alpha * dM0M1 | 6463cb04d7dcfaad93358c7db38f4674a51654b2 | 705,278 |
import os
def env_world_size():
"""World size for distributed training.
Is set in torch.distributed.launch as args.nproc_per_node * args.nnodes.
For example, when running on 1 node with 4 GPUs per node, the world size is 4.
see: https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py"""
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE'])
return 1 | 58587f8f4462fd18e156834214385f5dfebfaa2a | 705,279 |
from typing import Dict
def get_sanitized_bot_name(dict: Dict[str, int], name: str) -> str:
"""
Cut off at 31 characters and handle duplicates.
:param dict: Holds the list of names for duplicates
:param name: The name that is being sanitized
:return: A sanitized version of the name
"""
# This doesn't work someimtes in continue_and_spawn because it doesn't understand the names already in the match
# which may be kept if the spawn IDs match. In that case it's the caller's responsibility to figure it out upstream.
name = name[:31]
base_name = name
count = 2
while name in dict:
name = f'{base_name[:27]} ({count})' # Truncate at 27 because we can have up to '(10)' appended
count += 1
dict[name] = 1
return name | 42d432610602b15b1206f0ce1bc007fdaef6b23f | 705,280 |
def query_left(tree, index):
"""Returns sum of values between 1-index inclusive.
Args:
tree: BIT
index: Last index to include to the sum
Returns:
Sum of values up to given index
"""
res = 0
while index:
res += tree[index]
index -= (index & -index)
return res | e293194c86ad1c53a005be290ba61ef2fff097c8 | 705,282 |
def maxsum(sequence):
"""Return maximum sum."""
maxsofar, maxendinghere = 0, 0
for x in sequence:
# invariant: ``maxendinghere`` and ``maxsofar`` are accurate for ``x[0..i-1]``
maxendinghere = max(maxendinghere + x, 0)
maxsofar = max(maxsofar, maxendinghere)
return maxsofar | 884d8b5dd20a0a35ff79c64bc6151b0d8ae7f5a0 | 705,283 |
def get_conserved_sequences(cur):
"""docstring for get_conserved_sequences"""
cur.execute("SELECT sureselect_probe_counts.'sureselect.seq', \
sureselect_probe_counts.cnt, \
sureselect_probe_counts.data_source, \
cons.cons \
FROM sureselect_probe_counts, cons \
WHERE sureselect_probe_counts.'sureselect.id' = cons.id \
AND data_source = 'conservation'")
return cur.fetchall() | e518d22b7ac2ba072c75a76f72114009eed6ce7c | 705,284 |
from typing import Any
def delete_empty_keys(data: Any):
"""Build dictionary copy sans empty fields"""
# Remove empty field from dict
# https://stackoverflow.com/questions/5844672/delete-an-element-from-a-dictionary#5844700
dic = data.dict()
# if isinstance(data, BaseModel):
# dic = **data.dict()
return { i:dic[i] for i in dic if dic[i] != None } | db190b021bb00ae3870e205bc27bf28dd09e29c3 | 705,285 |
def find_language(article_content):
"""Given an article's xml content as string, returns the article's language"""
if article_content.Language is None:
return None
return article_content.Language.string | 4a228779992b156d01bc25501677556a5c9b7d39 | 705,286 |
import re
def geturls(str1):
"""returns the URIs in a string"""
URLPAT = 'https?:[\w/\.:;+\-~\%#\$?=&,()]+|www\.[\w/\.:;+\-~\%#\$?=&,()]+|' +\
'ftp:[\w/\.:;+\-~\%#?=&,]+'
return re.findall(URLPAT, str1) | 3d127a3c4250d7b013d9198e21cfb87f7909de8d | 705,287 |
def reduce_to(n):
"""processor to reduce list"""
def reduce(list):
if len(list) < n:
return n
else:
return list[0:n]
return reduce | b9a1fb6091ef9801957c6cc64cab5485091d6801 | 705,288 |
import os
def readfiles(meta):
"""
Reads in the files saved in datadir and saves them into a list
Parameters
-----------
meta
metadata object
Returns
----------
meta
metadata object but adds segment_list to metadata containing the sorted data fits files
Notes:
----------
History:
Written by Sebastian Zieba December 2021
"""
meta.segment_list = []
for fname in os.listdir(str(meta.datadir)):
if fname.endswith(meta.suffix + '.fits'):
meta.segment_list.append(str(meta.datadir) +'/'+ fname)
return meta | be62cd892e25f4b5cd5a1671b734bac6893c1df9 | 705,289 |
def read_file(file_name, encoding='utf-8'):
"""
读文本文件
:param encoding:
:param file_name:
:return:
"""
with open(file_name, 'rb') as f:
data = f.read()
if encoding is not None:
data = data.decode(encoding)
return data | 4e4a90512727b4b40d4968930479f226dc656acb | 705,290 |
import os
import json
def get_storcli_dall_show(c):
""" Gets data from json resource
:param c: unused
:return:
"""
del c
with open(os.path.join(os.path.dirname(__file__),
'../resources/storcli_dall_show.json')) as fp:
data = json.load(fp)
controller_list = []
for controller in data['Controllers']:
# Account for some crazy JSON schema
controller_list.append(
controller['Response Data']['Response Data']
)
return controller_list | 24b0ea1065443d17078971c6567989e05da0cd60 | 705,291 |
import os
import re
def generatePSSMProfile(fastas, outDir, blastpgp, db):
"""
Generate PSSM file by using the blastpgp program in NCBI blast-2.2.18 package.
Parameters
----------
file : file
the file, which include the protein sequences in fasta format.
blastpgp: string
the path of blastpgp program.
db: string
the uniref50 data base, which is formated by the 'formatdb' program in blast package.
Returns
-------
a string:
A directory name, which include the predicted protein PSSM information.
"""
if os.path.exists(outDir) == False:
os.mkdir(outDir)
for i in fastas:
name, sequence = re.sub('\|', '', i[0]), i[1]
with open(name + '.txt', 'w') as f:
f.write('>'+name+'\n'+sequence + '\n')
myCmd = blastpgp + ' -i ' + name + '.txt' + ' -d ' + db + ' -b 0 -v 5000 -j 3 -h 0.001 -Q ' + outDir + '/' + name +'.pssm'
print('Doing psiblast for protein: ' + name)
os.system(myCmd)
os.remove(name + '.txt')
return outDir | 2e7cc5f3fae9820d1ecdef1b3674c835bc73e640 | 705,292 |
def safe_encode(s, coding='utf-8', errors='surrogateescape'):
"""encode str to bytes, with round-tripping "invalid" bytes"""
return s.encode(coding, errors) | 1b1ba8439db8ec4c82c571197e5007e58f397c87 | 705,293 |
import collections
def extract_stats(output):
"""Extract stats from `git status` output
"""
lines = output.splitlines()
return collections.Counter([x.split()[0] for x in lines]) | 41d8aef4df3401ee8127ad0b72402ff9c54c41e3 | 705,294 |
def _expr_rshift_as_multiplication_of_reverse_order(lhs, rhs):
"""The multiply express will reverse order.
"""
return rhs * lhs | 4f245d8a8071cf4bcfc6543e0abae24cb1cdde9d | 705,296 |
import numpy
def CalculateHarmonicTopoIndex(mol):
"""
#################################################################
Calculation of harmonic topological index proposed by Narnumi.
---->Hato
Usage:
result=CalculateHarmonicTopoIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas, "d")
nAtoms = mol.GetNumAtoms()
res = nAtoms / sum(1.0 / deltas)
return res | 65984702d49071f18089cdf17b1ba4a21b70357e | 705,297 |
def add_custom_encoder_arguments(group):
"""Define arguments for Custom encoder."""
group.add_argument(
"--enc-block-arch",
type=eval,
action="append",
default=None,
help="Encoder architecture definition by blocks",
)
group.add_argument(
"--enc-block-repeat",
default=0,
type=int,
help="Repeat N times the provided encoder blocks if N > 1",
)
group.add_argument(
"--custom-enc-input-layer",
type=str,
default="conv2d",
choices=["conv2d", "vgg2l", "linear", "embed"],
help="Custom encoder input layer type",
)
group.add_argument(
"--custom-enc-positional-encoding-type",
type=str,
default="abs_pos",
choices=["abs_pos", "scaled_abs_pos", "rel_pos"],
help="Custom encoder positional encoding layer type",
)
group.add_argument(
"--custom-enc-self-attn-type",
type=str,
default="self_attn",
choices=["self_attn", "rel_self_attn"],
help="Custom encoder self-attention type",
)
group.add_argument(
"--custom-enc-pw-activation-type",
type=str,
default="relu",
choices=["relu", "hardtanh", "selu", "swish"],
help="Custom encoder pointwise activation type",
)
group.add_argument(
"--custom-enc-conv-mod-activation-type",
type=str,
default="swish",
choices=["relu", "hardtanh", "selu", "swish"],
help="Custom encoder convolutional module activation type",
)
return group | f49a778b78351a08bdb411e8004d00da0ccd96a4 | 705,298 |
import argparse
def get_parser() -> argparse.ArgumentParser:
"""
Create a command line parser.
Returns:
argparse.ArgumentParser: Created parser
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--list",
required=False,
action="store_true",
help="list of all highways"
)
parser.add_argument(
"-r",
"--roadworks",
type=str,
required=False,
help="list of roadworks"
)
parser.add_argument(
"-p",
"--parking",
type=str,
required=False,
help="list of service areas"
)
parser.add_argument(
"-v",
"--version",
required=False,
action="store_true",
help="show the version"
)
return parser | c17895029da4e41a5bbc0e0cc9a689680fb2c80e | 705,299 |
def sign(x):
"""Returns sign of x"""
if x==0:
return 0
return x/abs(x) | 677dfd796b0ee354fbcaf78b58cf7a5a660446b5 | 705,300 |
from datetime import datetime
def handler_callback(callback, user):
"""
A method for handling callbacks
:param user: user object
:param callback: callback from telebot.types.CallbackQuery
:return: datetime.date object if some date was picked else None
"""
if callback == "prev" and user.curr_date.replace(day=1) >= user.min_date.replace(
day=1
):
user.curr_date = user.curr_date.replace(month=user.curr_date.month - 1)
return None
if callback == "next" and user.curr_date.replace(day=1) <= user.max_date.replace(
day=1
):
user.curr_date = user.curr_date.replace(month=user.curr_date.month + 1)
return None
if callback != "none":
entered_date = datetime.strptime(callback, "%Y,%m,%d")
if user.min_date <= entered_date <= user.max_date:
return entered_date
return -1 | 27eb78f77d77543fe02a984751078cee96d38b06 | 705,301 |
def echo_handler(completed_proc):
"""Immediately return ``completed_proc``."""
return completed_proc | 53f3ef51bf349ac5146014ef25b88326d5bc010e | 705,302 |
import random
def random_choice(choices):
"""returns a random choice
from a list of (choice, probability)"""
# sort by probability
choices = sorted(choices, key=lambda x:x[1])
roll = random.random()
acc_prob = 0
for choice, prob in choices:
acc_prob += prob
if roll <= acc_prob:
return choice | f477abe220fa9d87ee3692bed8c41973af4c637c | 705,303 |
import torch
def convert_label_to_color(label, color_map):
"""Convert integer label to RGB image.
"""
n, h, w = label.shape
rgb = torch.index_select(color_map, 0, label.view(-1)).view(n, h, w, 3)
rgb = rgb.permute(0, 3, 1, 2)
return rgb | a37ec3ad382f88bdc9de8fbc2b4e2524213607c3 | 705,304 |
def is_collection(name):
"""compare with https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/user"""
return name in [
'assignedLicenses', 'assignedPlans', 'businessPhones', 'imAddresses',
'interests', 'provisionedPlans', 'proxyAddresses', 'responsibilities',
'schools', 'skills'
] | c2557f142f3ca066506256b273c9f65657079478 | 705,305 |
def in_nested_list(my_list, item):
"""
Determines if an item is in my_list, even if nested in a lower-level list.
"""
if item in my_list:
return True
else:
return any(in_nested_list(sublist, item) for sublist in my_list if
isinstance(sublist, list)) | 3daeaf89099bf19ba82eabfedd943adfb32fc146 | 705,306 |
import sys
def get_exc_info(exception):
"""Get an exc_info tuple based on an exception instance."""
try:
raise exception
except:
return sys.exc_info() | 58378b68b7d6c65d0b62d6fd304c29798f65f1c2 | 705,307 |
def is_prebuffer() -> bool:
"""
Return whether audio is in pre-buffer (threadsafe).
Returns
-------
is_prebuffer : bool
Whether audio is in pre-buffer.
"""
is_prebuffer = bool(RPR.Audio_IsPreBuffer()) # type:ignore
return is_prebuffer | 8afa4979578be310fd71b22907c99bb747780454 | 705,308 |
def _get_interface_name_index(dbapi, host):
"""
Builds a dictionary of interfaces indexed by interface name.
"""
interfaces = {}
for iface in dbapi.iinterface_get_by_ihost(host.id):
interfaces[iface.ifname] = iface
return interfaces | 0217f6ef8d4e5e32d76a4fc0d66bf74aa45f8c36 | 705,309 |
import torch
def to_data(x):
"""Converts variable to numpy"""
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy() | b91f755d43fde06db1bd38158881eb2f84e43d10 | 705,310 |
import argparse
def get_arguments():
"""
Wrapper function to get the command line arguments. Inserting this piece of code
into its own function for conda compatibility.
"""
parser = argparse.ArgumentParser(
prog='KrakMeOpen',
usage='krakmeopen [--input FILE | --input_pickle FILE | --input_file_list FILE] [--output FILE | --output_pickle FILE] --names FILE --nodes FILE [--tax_id INT | --tax_id_file FILE] --kmer_tally_table FILE',
description='''
A Kraken2 downstream analysis toolkit. More specifically, calculate
a series of quality metrics for Kraken2 classifications.''',
epilog='''
The metrics are calculated on the clade-level. All kmers
from all reads that are classified to any of the nodes in the
clades rooted at the supplied tax IDs are aggregated, and metrics
are calculated on those aggregations.
Input is Kraken2 read-by-read classification files
(can be gzipped).
Output is a tab separated file containing the metrics.''')
# Input arguments
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument(
'--input',
metavar='FILE',
type=str,
help='Kraken2 read-by-read classifications file.')
input_group.add_argument(
'--input_pickle',
metavar='FILE',
type=str,
help='A pickle file containing kmer tallies, produced with --output_pickle')
input_group.add_argument(
'--input_file_list',
metavar='FILE',
type=str,
help='''A file containing file paths to multiple pickles, one per line.
Will calculate metrics on the sum of kmer counts from all pickles.''')
# Output arguments
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument(
'--output',
metavar='FILE',
type=str,
help='The file to write the quality metrics output to.')
output_group.add_argument(
'--output_pickle',
metavar='FILE',
type=str,
help='''The pickle file to write kmer tallies to. Use this argument
to supress calculation of quality metrics and only output kmer
counts to a pickled file. Input the pickled file using
--input_pickle.''')
parser.add_argument(
'--kmer_tally_table',
metavar='FILE',
required=False,
help='File to output the complete kmer tally table for each tax ID to. Optional.')
# The taxonomy
parser.add_argument(
'--names',
metavar='FILE',
required=True,
help='NCBI style taxonomy names dump file (names.dmp). Required.')
parser.add_argument(
'--nodes',
metavar='FILE',
required=True,
help='NCBI style taxonomy nodes dump file (nodes.dmp). Required.')
# Supply relevant taxonomic ID on command line, or one or multiple taxonomic IDs
# through a text file.
tax_id_group = parser.add_mutually_exclusive_group(required=True)
tax_id_group.add_argument(
'--tax_id',
metavar='INT',
type=int,
help='A taxonomic ID for a clade that you wish to calculate quality metrics for.')
tax_id_group.add_argument(
'--tax_id_file',
metavar='FILE',
type=str,
help='''Supply multiple taxonomic IDs at once. A textfile with one
taxonomic ID per line. Calculate quality metrics for the clades
rooted at the taxonomic IDs in the file.''')
return parser.parse_args() | ad326fdab79874f33e8df005d5d5e470d23f8e42 | 705,311 |
def getManifestFsLayers(manifest):
""" returns hashes pointing to layers for manifest"""
return manifest["manifest"]["fsLayers"] | a3449c2828222c2b806df8621dd6a24375778ed2 | 705,312 |
def delchars(str, chars):
"""Returns a string for which all occurrences of characters in
chars have been removed."""
# Translate demands a mapping string of 256 characters;
# whip up a string that will leave all characters unmolested.
identity = "".join([chr(x) for x in range(256)])
return str.translate(identity, chars) | a220202a05e0ead7afa6226ef309c56940a1d153 | 705,313 |
def bytes2hex(bytes_array):
"""
Converts byte array (output of ``pickle.dumps()``) to spaced hexadecimal string representation.
Parameters
----------
bytes_array: bytes
Array of bytes to be converted.
Returns
-------
str
Hexadecimal representation of the byte array.
"""
s_hex = bytes_array.hex()
# Insert spaces between each hex number. It makes YAML file formatting better.
return " ".join([s_hex[n : n + 2] for n in range(0, len(s_hex), 2)]) | 19019ee1e3cd45d671f53e0ae4fd92b283c3b38d | 705,314 |
import os
def family_directory(fonts):
"""Get the path of font project directory."""
if fonts:
dirname = os.path.dirname(fonts[0])
if dirname == '':
dirname = '.'
return dirname | 91d1f880a01ba2de11e6570272d5748d1dea6d47 | 705,315 |
from pathlib import Path
def to_posix(d):
"""Convert the Path objects to string."""
if isinstance(d, dict):
for k, v in d.items():
d[k] = to_posix(v)
elif isinstance(d, list):
return [to_posix(x) for x in d]
elif isinstance(d, Path):
return d.as_posix()
return d | 91dbda7738308dd931b58d59dad8e04a277034ea | 705,316 |
def firstLetterCipher(ciphertext):
"""
Returns the first letters of each word in the ciphertext
Example:
Cipher Text: Horses evertime look positive
Decoded text: Help """
return "".join([i[0] for i in ciphertext.split(" ")]) | 87f37d1a428bde43c07231ab2e5156c680c96f91 | 705,318 |
from pathlib import Path
from typing import Tuple
import re
def parse_samtools_flagstat(p: Path) -> Tuple[int, int]:
"""Parse total and mapped number of reads from Samtools flagstat file"""
total = 0
mapped = 0
with open(p) as fh:
for line in fh:
m = re.match(r'(\d+)', line)
if m:
if 'in total' in line:
total = int(m.group(1))
if ' mapped (' in line:
mapped = int(m.group(1))
return total, mapped | 60c6f9b227cefdea9877b05bb2fe66e4c82b4dd1 | 705,319 |
def app_files(proj_name):
"""Create a list with the project files
Args:
proj_name (str): the name of the project, where the code will be hosted
Returns:
files_list (list): list containing the file structure of the app
"""
files_list = [
"README.md",
"setup.py",
"setup.cfg",
f"{proj_name}/__init__.py",
f"{proj_name}/{proj_name}.py",
"tests/tests.py",
]
return files_list | 2c6cbf112c7939bea12672668c8a5db1656b6edd | 705,320 |
import torch
def log_safe(x):
"""The same as torch.log(x), but clamps the input to prevent NaNs."""
x = torch.as_tensor(x)
return torch.log(torch.min(x, torch.tensor(33e37).to(x))) | 98c73b316d22ebe9ef4b322b1ba984a734422e7a | 705,321 |
import requests
def resolve_s1_slc(identifier, download_url, project):
"""Resolve S1 SLC using ASF datapool (ASF or NGAP). Fallback to ESA."""
# determine best url and corresponding queue
vertex_url = "https://datapool.asf.alaska.edu/SLC/SA/{}.zip".format(
identifier)
r = requests.head(vertex_url, allow_redirects=True)
if r.status_code == 403:
url = r.url
queue = "{}-job_worker-small".format(project)
elif r.status_code == 404:
url = download_url
queue = "factotum-job_worker-scihub_throttled"
else:
raise RuntimeError("Got status code {} from {}: {}".format(
r.status_code, vertex_url, r.url))
return url, queue | cf489b0d65a83dee3f87887a080d67acd180b0b3 | 705,322 |
def make_anagram_dict(filename):
"""Takes a text file containing one word per line.
Returns a dictionary:
Key is an alphabetised duple of letters in each word,
Value is a list of all words that can be formed by those letters"""
result = {}
fin = open(filename)
for line in fin:
word = line.strip().lower()
letters_in_word = tuple(sorted(word))
if letters_in_word not in result:
result[letters_in_word] = [word]
else:
result[letters_in_word].append(word)
return result | c6c0ad29fdf63c91c2103cefc506ae36b64a40ec | 705,323 |
def group_property_types(row : str) -> str:
"""
This functions changes each row in the dataframe to have the one
of five options for building type:
- Residential
- Storage
- Retail
- Office
- Other
this was done to reduce the dimensionality down to the top building
types.
:param: row (str) : The row of the pandas series
:rvalue: str
:return: One of 5 building types.
"""
if row == 'Multifamily Housing' or\
row == 'Residence Hall/Dormitory' or\
row == 'Hotel' or\
row == 'Other - Lodging/Residential' or\
row == 'Residential Care Facility':
return 'Residential'
elif row == 'Non-Refrigerated Warehouse' or\
row == 'Self-Storage Facility' or\
row == 'Refrigerated Warehouse':
return 'Storage'
elif row == 'Financial Office' or\
row == 'Office':
return 'Office'
elif row == 'Restaurant' or\
row == 'Retail Store' or\
row == 'Enclosed Mall' or\
row == 'Other - Mall' or\
row == 'Strip Mall' or\
row == 'Personal Services (Health/Beauty, Dry Cleaning, etc.)' or\
row == 'Lifestyle Center' or\
row == 'Wholesale Club/Supercenter':
return 'Retail'
else:
return 'Other' | 44aa5d70baaa24b0c64b7464b093b59ff39d6d1c | 705,324 |
def write_simple_templates(n_rules, body_predicates=1, order=1):
"""Generate rule template of form C < A ^ B of varying size and order"""
text_list = []
const_term = "("
for i in range(order):
const_term += chr(ord('X') + i) + ","
const_term = const_term[:-1] + ")"
write_string = "{0} #1{1} :- #2{1}".format(n_rules, const_term)
if body_predicates > 1:
for i in range(body_predicates - 1):
write_string += ", #" + str(i + 3) + const_term
text_list.append(write_string)
return text_list | 3a911702be9751b0e674171ec961029f5b10a9e7 | 705,325 |
def iou(box1, box2, x1y1x2y2=True):
""" iou = intersection / union """
if x1y1x2y2:
# min and max of 2 boxes
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else: # (x, y, w, h)
mx = min(box1[0] - box1[2] / 2, box2[0] - box2[2] / 2)
Mx = max(box1[0] + box1[2] / 2, box2[0] + box2[2] / 2)
my = min(box1[1] - box1[3] / 2, box2[1] - box2[3] / 2)
My = max(box1[1] + box1[3] / 2, box2[1] + box2[3] / 2)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
corea = cw * ch
uarea = area1 + area2 - carea
return carea / uarea | 6ad0d3d7dd3a3031d28f8a0b9d0075ecf9362792 | 705,326 |
def gen_string(**kwargs) -> str:
"""
Generates the string to put in the secrets file.
"""
return f"""\
apiVersion: v1
kind: Secret
metadata:
name: keys
namespace: {kwargs['namespace']}
type: Opaque
data:
github_client_secret: {kwargs.get('github_client_secret')}
""" | ed2702c171f20b9f036f07ec61e0a4d74424ba03 | 705,327 |
def get_props_from_row(row):
"""Return a dict of key/value pairs that are props, not links."""
return {k: v for k, v in row.iteritems() if "." not in k and v != ""} | a93dfbd1ef4dc87414492b7253b1ede4e4cc1888 | 705,328 |
def generate_url(resource, bucket_name, object_name, expire=3600):
"""Generate URL for bucket or object."""
client = resource.meta.client
url = client.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": object_name},
ExpiresIn=expire,
)
return url | 8a74618d5cfcd39c8394577035b497ecb5835765 | 705,329 |
import math
def F7(x):
"""Easom function"""
s = -math.cos(x[0])*math.cos(x[1])*math.exp(-(x[0] - math.pi)**2 - (x[1]-math.pi)**2)
return s | a17060f046df9c02690e859e789b7ef2591d1a3c | 705,330 |
import torch
def zaxis_to_world(kpt: torch.Tensor):
"""Transform kpt from 2D+Z to 3D Real World Coordinates (RWC) for ITOP Dataset
Args:
kpt (np.ndarray): Array containing keypoints to transform
Returns:
np.ndarray: Converted keypoints
"""
tmp = kpt.clone()
tmp[..., 0] = (tmp[..., 0].clone() - 160) * 0.0035 * tmp[..., 2].clone()
tmp[..., 1] = -(tmp[..., 0].clone() - 120) * 0.0035 * tmp[..., 2].clone()
return tmp | d925382c62d370a991fa2dfd4c51cb43d051423e | 705,331 |
import numpy
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> numpy.allclose(q, [-44, -14, 48, 28])
True
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64) | bcc6973f169840400c86b5eaf673deb75444a63f | 705,332 |
from datetime import datetime
def _get_stop_as_datetime(event_json)->datetime:
"""Reads the stop timestamp of the event and returns it as a datetime
object.
Args:
event_json (json): The event encapsulated as json.
Returns
datetime: Timestamp of the stop of the event.
"""
name = event_json['info']['name']
payload_stop = 'meta.raw_payload.' + name + '-stop'
stop_timestamp_string = event_json['info'][payload_stop]['timestamp']
stop_date_string, stop_time_string = stop_timestamp_string.split('T')
stop_time_string, _ = stop_time_string.split('.')
date_and_time_string = stop_date_string + ' ' + stop_time_string
return datetime.strptime(date_and_time_string, '%Y-%m-%d %H:%M:%S') | 958915a568c66a04da3f44abecf0acca90181f43 | 705,334 |
from typing import Dict
def contents_append_notable_sequence_event_types(sequence, asset_sequence_id) -> Dict:
"""Appends a dictionary of filtered data to the base list for the context
Args:
sequence: sequence object
asset_sequence_id: asset sequence ID
Returns:
A contents list with the relevant notable sequence event types
"""
content = {
'eventType': sequence.get('eventType'),
'displayName': sequence.get('displayName'),
'count': sequence.get('count'),
'sequenceId': asset_sequence_id
}
return content | fca27e5242968fa0db3c9d450588d77e4b307d1e | 705,335 |
import string
import random
def gen_pass(length=8, no_numerical=False, punctuation=False):
"""Generate a random password
Parameters
----------
length : int
The length of the password
no_numerical : bool, optional
If true the password will be generated without 0-9
punctuation : bool, optional
If true the password will be generated with punctuation
Returns
-------
string
The generated password
"""
characters = [string.ascii_letters]
# Add user options to the character set
if not no_numerical:
characters.append(string.digits)
if punctuation:
characters.append(string.punctuation)
# Shuffle the character set
random.SystemRandom().shuffle(characters)
chars_left = length - (len(characters) - 1)
char_amounts = []
# Decide on number of characters per character set
for char_set in characters:
i = random.SystemRandom().randint(1, chars_left)
char_amounts.append(i)
chars_left -= i - 1
char_amounts[-1] += chars_left - 1
# Generate the password's characters
password = ''
for i, length in enumerate(char_amounts):
password +=''.join(random.SystemRandom().choice(characters[i]) for _ in range(length))
# Shuffle the password
password = list(password)
random.SystemRandom().shuffle(password)
password = ''.join(password)
return password | dc0ca0c228be11a5264870112e28f27817d4bbc8 | 705,336 |
def document_version_title(context):
"""Document version title"""
return context.title | 1589a76e8bb4b4a42018783b7dbead9efc91e21a | 705,337 |
def vertical(hfile):
"""Reads psipred output .ss2 file.
@param hfile psipred .ss2 file
@return secondary structure string.
"""
result = ''
for l in hfile:
if l.startswith('#'):
continue
if not l.strip():
continue
l_arr = l.strip().split()
result += l_arr[2]
return result | c118b61be6edf29b42a37108c5fe21a0e62b801a | 705,338 |
def decide_play(lst):
"""
This function will return the boolean to control whether user should continue the game.
----------------------------------------------------------------------------
:param lst: (list) a list stores the input alphabet.
:return: (bool) if the input character is alphabet and if only one character is in the string.
"""
if len(lst) == 4:
for char in lst:
if char.isalpha() and len(char) == 1:
pass
else:
return False
return True
else:
return False | 3062e1335eda572049b93a60a0981e905ff6ca0d | 705,339 |
def vocabfile_to_hashdict(vocabfile):
"""
A basic vocabulary hashing strategy just uses the line indices
of each vocabulary word to generate sequential hashes. Thus,
unique hashes are provided for each word in the vocabulary, and the
hash is trivially reversable for easy re-translation.
"""
hash_dict = {}
hash_index = 0
with open(vocabfile, "rb") as file:
for line in file:
line = line.decode('utf-8')
line = line.strip().replace('\n', '') # to prevent bad encoding
hash_dict[line] = hash_index
hash_index += 1
return hash_dict | f26515fbb406897f4f348436a8776fd2b86ce5e4 | 705,340 |
import struct
def pack(code, *args):
"""Original struct.pack with the decorator applied.
Will change the code according to the system's architecture.
"""
return struct.pack(code, *args) | 851e8db4d0e710edf2ea15503d92e76d352a2f05 | 705,341 |
import os
def getpath():
""" Generate filepath to the present file.
:return: filepath to the present file.
:rtype: str
"""
return os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) | 7feb3e0662a512231d6aa02afdb724555cb78ebb | 705,342 |
def _md_fix(text):
"""
sanitize text data that is to be displayed in a markdown code block
"""
return text.replace("```", "``[`][markdown parse fix]") | 2afcad61f4b29ae14c66e04c39413a9a94ae30f8 | 705,343 |
def inverse_hybrid_transform(value):
"""
Transform back from the IRAF-style hybrid log values.
This takes the hybrid log value and transforms it back to the
actual value. That value is returned. Unlike the hybrid_transform
function, this works on single values not a numpy array. That is because
one is not going to have a data array in the hybrid transformation form.
Parameters
----------
value : A real number to be transformed back from the hybrid
log scaling
Returns
-------
newvalue : The associated value that maps to the given hybrid
log value.
"""
if value < 0.:
workvalue = -1.0*value
sign = -1.0
else:
workvalue = value
sign = +1.0
if workvalue < 1.0:
newvalue = 10.*workvalue
else:
newvalue = 10.**workvalue
newvalue = sign * newvalue
return newvalue | 2b8db45901c6f762c970937058670c5c4c5457ea | 705,344 |
def answers(provider):
"""Default answers data for copier"""
answers = {}
answers["class_name"] = "TemplateTestCharm"
# Note "TestCharm" can't be used, that's the name of the deafult unit test class
answers["charm_type"] = provider
return answers | 9ae26b4eceab5a40d9b342dcb510d3e6843ee640 | 705,345 |
def get_side_effects_from_sider(meddra_all_se_file):
"""
Get the most frequent side effects from SIDER
"""
pubchem_to_umls = {}
umls_to_name = {}
with open(meddra_all_se_file, 'r') as med_fd:
for line in med_fd:
fields = line.strip().split('\t')
pubchem = str(int(fields[1].split('CID')[1]))
concept_type = fields[3].upper()
umls_id = fields[4].upper()
umls_term = fields[5].lower()
if concept_type == 'PT':
pubchem_to_umls.setdefault(pubchem, set()).add(umls_id)
umls_to_name[umls_id] = umls_term
print('NUMBER OF PUBCHEM IDS ASSOCIATED WITH UMLS: {}'.format(len(pubchem_to_umls)))
return pubchem_to_umls, umls_to_name | 4fa012cd2a16e09f01d43ae66f99640f1e090e22 | 705,347 |
import os
def get_env_var(name, default_value = None):
"""Get the value of an environment variable, if defined"""
if name in os.environ:
return os.environ[name]
elif default_value is not None:
return default_value
else:
raise RuntimeError('Required environment variable %s not found' % name) | 0f0455ede0e025c9da9fd65769a1d4e52ae520fc | 705,348 |
def hexStringToRGB(hex):
"""
Converts hex color string to RGB values
:param hex: color string in format: #rrggbb or rrggbb with 8-bit values in hexadecimal system
:return: tuple containing RGB color values (from 0.0 to 1.0 each)
"""
temp = hex
length = len(hex)
if temp[0] == "#":
temp = hex[1:length]
if not len(temp) == 6:
return None
colorArr = bytearray.fromhex(temp)
return colorArr[0], colorArr[1], colorArr[2] | 7adcb7b247e6fe1aefa1713d754c828d1ac4a5b0 | 705,349 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.