content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def phedex_url(api=''):
"""Return Phedex URL for given API name"""
return 'https://cmsweb.cern.ch/phedex/datasvc/json/prod/%s' % api | a642cd138d9be4945dcbd924c7b5c9892de36baa | 706,490 |
import torch
def format_attn(attention_tuples: tuple):
"""
Input: N tuples (N = layer num)
Each tuple item is Tensor of shape
Batch x num heads x from x to
Output: Tensor of shape layer x from x to
(averaged over heads)
"""
# Combine tuples into large Tensor, then avg
return torch.cat([l for l in attention_tuples], dim=0).mean(dim=1) | 8d25d081992099835a21cdbefb406f378350f983 | 706,493 |
def _sp_sleep_for(t: int) -> str:
"""Return the subprocess cmd for sleeping for `t` seconds."""
return 'python -c "import time; time.sleep({})"'.format(t) | 20ac8022a2438ceb62123f534ba5911b7c560502 | 706,497 |
def is_serial_increased(old, new):
""" Return true if serial number was increased using RFC 1982 logic. """
old, new = (int(n) for n in [old, new])
diff = (new - old) % 2**32
return 0 < diff < (2**31 - 1) | 44a33a1c7e8caebe3b74284002c7c4be6ac29b40 | 706,499 |
import decimal
def split_amount(amount, splits, places=2):
"""Return list of ``splits`` amounts where sum of items equals ``amount``.
>>> from decimal import Decimal
>>> split_amount(Decimal('12'), 1)
Decimal('12.00')
>>> split_amount(Decimal('12'), 2)
[Decimal('6.00'), Decimal('6.00')]
Amounts have a max of ``places`` decimal places. Last amount in the list
may not be the same as others (will always be lower than or equal to
others).
>>> split_amount(Decimal('100'), 3)
[Decimal('33,34'), Decimal('33,34'), Decimal('33,32')]
>>> split_amount(Decimal('100'), 3, 4)
[Decimal('33,3334'), Decimal('33,3334'), Decimal('33,3332')]
>>> split_amount(Decimal('12'), 7) # Doctest: +ELLIPSIS
[Decimal('1.72'), ..., Decimal('1.72'), ..., Decimal('1.68')]
>>> split_amount(Decimal('12'), 17) # Doctest: +ELLIPSIS
[Decimal('0.71'), ..., Decimal('0.71'), Decimal('0.64')]
"""
one = decimal.Decimal(10) ** -places
amount = amount.quantize(one)
with decimal.localcontext() as decimal_context:
decimal_context.rounding = decimal.ROUND_UP
upper_split = (amount / splits).quantize(one)
splitted_amounts = [upper_split] * (splits - 1)
lower_split = amount - sum(splitted_amounts)
splitted_amounts.append(lower_split)
return splitted_amounts | 8c8a17ed9bbcab194550ea78a9b414f51ca5610d | 706,500 |
def format_alleles(variant):
"""Gets a string representation of the variant's alleles.
Args:
variant: nucleus.genomics.v1.Variant.
Returns:
A string ref_bases/alt1,alt2 etc.
"""
return '{}/{}'.format(variant.reference_bases, ','.join(
variant.alternate_bases)) | 775fe3e112ff0b7e73780600e0621a8695fa5ad0 | 706,502 |
def parse_dict(input_data):
"""Return a rules dict of the format:
{
'light red': [(1, 'bright white'), (2, 'muted yellow')],
'dark orange': [(3, bright white), (4, muted yellow)],
'faded blue': [(0, 'bags')]
}
"""
bags = dict()
for line in input_data.split('\n'):
outer, inner = line.strip().split(' bags contain ')
inner = [i.split(' ') for i in inner.split(", ")]
if 'no' in inner[0]:
bags[outer] = [(0, 'bags')]
else:
bags[outer] = [(int(i[0]), ' '.join(i[1:3])) for i in inner]
return bags | a1aad66a16e4754c35c9b3518d5641096e393530 | 706,503 |
def map(v, ds, de, ts, te):
"""\
Map the value v, in range [ds, de] to
the corresponding value in range [ts, te]
"""
d1 = de - ds
d2 = te - ts
v2 = v - ds
r = v2 / d1
return ts + d2 * r | 2c2ba49b2acc283ca25b07c10b7ad717ad6a280d | 706,507 |
def splitBinNum(binNum):
"""Split an alternate block number into latitude and longitude parts.
Args:
binNum (int): Alternative block number
Returns:
:tuple Tuple:
1. (int) Latitude portion of the alternate block number.
Example: ``614123`` => ``614``
2. (int) Longitude portion of the alternate block number.
Example: ``614123`` => ``123``
"""
latBin = int(binNum / 1000)
longBin = binNum - (latBin * 1000)
return (latBin, longBin) | da9b9cc67d592e73da842f4b686c0d16985f3457 | 706,514 |
def split_in_pairs(s, padding = "0"):
"""
Takes a string and splits into an iterable of strings of two characters each.
Made to break up a hex string into octets, so default is to pad an odd length
string with a 0 in front. An alternative character may be specified as the
second argument.
"""
if not isinstance(padding, str) or len(padding) != 1:
raise TypeError("Padding must be a single character.")
s = padding + s if len(s) % 2 else s
v = iter(s)
return (a+b for a,b in zip(v,v)) | 8807448bb8125c80fa78ba32f887a54ba9bab1dd | 706,515 |
def has_global(node, name):
"""
check whether node has name in its globals list
"""
return hasattr(node, "globals") and name in node.globals | 7a2ef301cb25cba242d8544e2c191a537f63bf19 | 706,516 |
from typing import Iterable
def negate_objective(objective):
"""Take the negative of the given objective (converts a gain into a loss and vice versa)."""
if isinstance(objective, Iterable):
return (list)((map)(negate_objective, objective))
else:
return -objective | e24877d00b7c84e04c0cb38b5facdba85694890f | 706,517 |
def parse_plot_set(plot_set_string):
"""
Given one of the string arguments to the --plot-sets option, parse out a
data structure representing which conditions ought to be compared against
each other, and what those comparison plots/tables should be called.
The syntax of a plot set is [title:]condition[,condition[,condition...]].
The first condition is the comparison baseline, when applicable.
Returns a tuple of a plot set title, or None if unspecified, and a list of
condition names.
"""
colon_pos = plot_set_string.find(':')
if colon_pos != -1:
# Pull out the title before the colon
title = plot_set_string[0:colon_pos]
# And the rest of the specifier after it
plot_set_string = plot_set_string[colon_pos + 1:]
else:
# No title given
title = None
# Return the title and condition list tuple
return (title, plot_set_string.split(',')) | 1df83681aa3110dfd9302bd7918f15dfbfa497ab | 706,518 |
def prior_min_field(field_name, field_value):
"""
Creates prior min field with the
:param field_name: prior name (field name initial)
:param field_value: field initial properties
:return: name of the min field, updated field properties
"""
name = field_name
value = field_value.copy()
value.update({
'label': 'Min',
'required': False,
})
return name + '_min', value | 9f331ee58e699318e678d881c0028486b746c05c | 706,521 |
def mean_zero_unit_variance(arr, mean_vector=None, std_vector=None, samples_in='row'):
"""
Normalize input data to have zero mean and unit variance.
Return the normalized data, the mean, and the calculated standard
deviation which was used to normalize the data
[normalized, meanvec, stddev] = mean_zero_unit_variance(data)
or
[normalized, meanvec, stddev] = mean_zero(data, mean_vector=provided_mean_vector)
etc.
"""
samplesIn = 1 if samples_in == 'col' else 0
dimsIn = int(not samplesIn)
nSamples = arr.shape[samplesIn]
nDims = arr.shape[dimsIn]
theshape = [1, 1]
theshape[dimsIn] = nDims
if not mean_vector:
mean_vector = arr.mean(axis=samplesIn).reshape(theshape)
if not std_vector:
std_vector = arr.std(axis=samplesIn).reshape(theshape)
# If you have a row with absolutely no information, you will divide by zero. Hence...
std_vector[std_vector < 1e-6] = 1
norma = (arr - mean_vector) / std_vector
return norma, mean_vector, std_vector | 38a1ca262362b3f04aed06f3f0d21836eca8d5ad | 706,523 |
def renorm_flux_lightcurve(flux, fluxerr, mu):
""" Normalise flux light curves with distance modulus."""
d = 10 ** (mu/5 + 1)
dsquared = d**2
norm = 1e18
# print('d**2', dsquared/norm)
fluxout = flux * dsquared / norm
fluxerrout = fluxerr * dsquared / norm
return fluxout, fluxerrout | 97f2606d54b106d2051983dfc29d942112e7a1e3 | 706,525 |
def is_valid_sudoku(board):
"""
Checks if an input sudoku board is valid
Algorithm:
For all non-empty squares on board, if value at that square is a number,
check if the that value exists in that square's row, column,
and minor square.
If it is, return False.
"""
cols = [set() for _ in range(9)]
squares = [[set() for _ in range(3)] for x in range(3)]
for row in range(9):
rows = set()
for col in range(9):
if board[row][col] == ".":
continue
# Check row
if board[row][col] in rows:
return False
else:
rows.add(board[row][col])
# Check col
if board[row][col] in cols[col]:
return False
else:
cols[col].add(board[row][col])
# Check square
if board[row][col] in squares[row // 3][col // 3]:
return False
else:
squares[row // 3][col // 3].add(board[row][col])
return True | 001a02a47acbaa192215d985f3d743c42a9fb42b | 706,526 |
def no_transform(image):
"""Pass through the original image without transformation.
Returns a tuple with None to maintain compatability with processes that
evaluate the transform.
"""
return (image, None) | 25b45a5c77d3c2864ebc7a046e0f47b2fafb067b | 706,528 |
def NodeToString(xml_node):
"""Returns an XML string.
Args:
xml_node: xml.dom.Node object
Returns:
String containing XML
"""
return xml_node.toxml() | 043072bbb40f33947febedf967679e3e39931834 | 706,534 |
def difference(data, interval):
""" difference dataset
parameters:
data: dataset to be differenced
interval: the interval between the two elements to be differenced.
return:
dataset: with the length = len(data) - interval
"""
return [data[i] - data[i - interval] for i in range(interval, len(data))] | 611f4ad36935000ae7dc16f76aef7cbb494b36ac | 706,535 |
def get_mongo_database(connection, database_name):
""" Access the database
Args:
connection (MongoClient): Mongo connection to the database
database_name (str): database to be accessed
Returns:
Database: the Database object
"""
try:
return connection.get_database(database_name)
except:
return None | 9299cbe0b697dec2e548fb5e26e2013214007575 | 706,538 |
def find_u_from_v(matrix, v, singular_value):
"""
Finds the u column vector of the U matrix in the SVD UΣV^T.
Parameters
----------
matrix : numpy.ndarray
Matrix for which the SVD is calculated
v : numpy.ndarray
A column vector of V matrix, it is the eigenvector of the Gramian of `matrix`.
singular_value : float
A singular value of `matrix` corresponding to the `v` vector.
Returns
-------
numpy.ndarray
u column vector of the U matrix in the SVD.
"""
return matrix @ v / singular_value | ef2871c86bf7ddc4c42446a54230068282ad85df | 706,539 |
from typing import List
def double(items: List[str]) -> List[str]:
"""
Returns a new list that is the input list, repeated twice.
"""
return items + items | 9e4b6b9e84a80a9f5cbd512ca820274bb8cad924 | 706,540 |
def tolist(obj):
"""
Convert given `obj` to list.
If `obj` is not a list, return `[obj]`, else return `obj` itself.
"""
if not isinstance(obj, list):
return [obj]
return obj | f511f4ebb86977b2db8646e692abc9840c2ae2d1 | 706,547 |
def khinalug_input_normal(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace common shortcuts with valid Khinalug characters.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
text = text.replace('c1_', 'č̄')
text = text.replace('c1\'', 'č̣')
text = text.replace('7', 'ˁ')
text = text.replace('g1', 'ǧ')
text = text.replace('s1', 'š')
text = text.replace('z1', 'ž')
text = text.replace('c1', 'č')
text = text.replace('j1', 'ǯ')
text = text.replace('a1', 'ä')
text = text.replace('u1', 'ü')
text = text.replace('o1', 'ö')
text = text.replace('i1', 'ı')
text = text.replace('k_', 'k̄')
text = text.replace('t_', 't̄')
text = text.replace('q_', 'q̄')
text = text.replace('c_', 'c̄')
text = text.replace('c\'', 'c̣')
text = text.replace('k\'', 'ḳ')
text = text.replace('q\'', 'q̇')
text = text.replace('x\'', 'x̣')
text = text.replace('t\'', 'ṭ')
text = text.replace('h\'', 'ḥ')
return text | b9b9413ae461b6a03aa8c0db4396658dbe242c91 | 706,549 |
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal and keyword arguments to connect::
@receiver(signal_object, sender=sender)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
signal.connect(func, **kwargs)
return func
return _decorator | dbbde0855b2a657adaff9fa688aa158053e46579 | 706,551 |
def unnormalise_x_given_lims(x_in, lims):
"""
Scales the input x (assumed to be between [-1, 1] for each dim)
to the lims of the problem
"""
# assert len(x_in) == len(lims)
r = lims[:, 1] - lims[:, 0]
x_orig = r * (x_in + 1) / 2 + lims[:, 0]
return x_orig | 1d4cd35f45ab8594e297eb64e152a481c01905cd | 706,553 |
def file2bytes(filename: str) -> bytes:
"""
Takes a filename and returns a byte string with the content of the file.
"""
with open(filename, 'rb') as f:
data = f.read()
return data | f917a265c17895c917c3c340041586bef0c34dac | 706,558 |
def mock_dataset(mocker, mock_mart, mart_datasets_response):
"""Returns an example dataset, built using a cached response."""
mocker.patch.object(mock_mart, 'get', return_value=mart_datasets_response)
return mock_mart.datasets['mmusculus_gene_ensembl'] | bb9a8b828f0ac5bfa59b3faee0f9bcc22c7d954e | 706,559 |
def info2lists(info, in_place=False):
"""
Return info with:
1) `packages` dict replaced by a 'packages' list with indexes removed
2) `releases` dict replaced by a 'releases' list with indexes removed
info2list(info2dicts(info)) == info
"""
if 'packages' not in info and 'releases' not in info:
return info
if in_place:
info_lists = info
else:
info_lists = info.copy()
packages = info.get('packages')
if packages:
info_lists['packages'] = list(packages.values())
releases = info.get('releases')
if releases:
info_lists['releases'] = list(releases.values())
return info_lists | 313fda757d386332e16a0a91bb4408fe3cb8c070 | 706,560 |
def is_chitoi(tiles):
"""
Returns True if the hand satisfies chitoitsu.
"""
unique_tiles = set(tiles)
return (len(unique_tiles) == 7 and
all([tiles.count(tile) == 2 for tile in unique_tiles])) | c04149174bb779cd07616d4f419fc86531ab95dd | 706,561 |
def flop_gemm(n, k):
"""# of + and * for matmat of nxn matrix with nxk matrix, with accumulation
into the output."""
return 2*n**2*k | b217b725e2ac27a47bc717789458fd20b4aa56c1 | 706,564 |
def index() -> str:
"""Rest endpoint to test whether the server is correctly working
Returns:
str: The default message string
"""
return 'DeChainy server greets you :D' | ce0caeb9994924f8d6ea10462db2be48bbc126d0 | 706,565 |
from typing import AnyStr
from typing import List
import json
def load_json_samples(path: AnyStr) -> List[str]:
"""
Loads samples from a json file
:param path: Path to the target file
:return: List of samples
"""
with open(path, "r", encoding="utf-8") as file:
samples = json.load(file)
if isinstance(samples, list):
return samples
else:
raise RuntimeError(f"File's content must be list-like") | b735e7265a31f6bc6d19381bfe9d0cbe26dcf170 | 706,566 |
import struct
import lzma
def decompress_lzma(data: bytes) -> bytes:
"""decompresses lzma-compressed data
:param data: compressed data
:type data: bytes
:raises _lzma.LZMAError: Compressed data ended before the end-of-stream marker was reached
:return: uncompressed data
:rtype: bytes
"""
props, dict_size = struct.unpack("<BI", data[:5])
lc = props % 9
props = props // 9
pb = props // 5
lp = props % 5
dec = lzma.LZMADecompressor(
format=lzma.FORMAT_RAW,
filters=[
{
"id": lzma.FILTER_LZMA1,
"dict_size": dict_size,
"lc": lc,
"lp": lp,
"pb": pb,
}
],
)
return dec.decompress(data[5:]) | 247c3d59d45f3f140d4f2c36a7500ff8a51e45b0 | 706,567 |
def get_case_number(caselist):
"""Get line number from file caselist."""
num = 0
with open(caselist, 'r') as casefile:
for line in casefile:
if line.strip().startswith('#') is False:
num = num + 1
return num | b1366d8e4a0e2c08da5265502d2dd2d72bf95c19 | 706,568 |
def splitDataSet(dataSet, index, value):
"""
划分数据集,取出index对应的值为value的数据
dataSet: 待划分的数据集
index: 划分数据集的特征
value: 需要返回的特征的值
"""
retDataSet = []
for featVec in dataSet:
if featVec[index] == value:
reducedFeatVec = featVec[:index]
reducedFeatVec.extend(featVec[index+1:])
retDataSet.append(reducedFeatVec)
# 返回index列为value的数据集(去除index列)
return retDataSet | 814a54fe13d832e69d8df32af52d882d4a15c4ba | 706,570 |
def in_skill_product_response(handler_input):
"""Get the In-skill product response from monetization service."""
""" # type: (HandlerInput) -> Union[InSkillProductsResponse, Error] """
locale = handler_input.request_envelope.request.locale
ms = handler_input.service_client_factory.get_monetization_service()
return ms.get_in_skill_products(locale) | 9452ac1498ff0e6601df9fc419df0cfdd6b9171e | 706,572 |
def stat_mtime(stat):
"""Returns the mtime field from the results returned by os.stat()."""
return stat[8] | 1f7fec9a54a97bb63141d63db706b2885913dadb | 706,573 |
def calc_very_restricted_wage_distribution(df):
"""Compute per-period mean and std of wages for agents under two choice restrictions."""
return (
df.query("Policy == 'veryrestricted' and Choice == 'a' or Choice == 'b'")
.groupby(["Period"])["Wage"]
.describe()[["mean", "std"]]
) | 3ca8a2f0061e456a3158b4ee8a128a5a7439af3f | 706,574 |
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Take
blobs: list of blobs to return in addition to output blobs.
kwargs: Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start: optional name of layer at which to begin the forward pass
end: optional name of layer at which to finish the forward pass (inclusive)
Give
outs: {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set([end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in kwargs.items():
if blob.ndim != 4:
raise Exception('{} blob is not 4-d'.format(in_))
if blob.shape[0] != self.blobs[in_].num:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs} | 790baa0fc8529e3cad45bd8236060bad591ab4a4 | 706,577 |
from datetime import datetime
def create_nav_btn(soup,date,text):
"""
Helper functions for month_calendar, generates a navigation button
for calendar
:param soup: BeautifulSoup parser of document
:param date: Date to create nav button
:param text: Text for button
"""
nav_th = soup.new_tag('th',attrs=[('colspan','2')])
nav_th['class'] = 'month'
nav_a = soup.new_tag('a',href='/apps/orders/%s/%s' % (date.year,
date.month))
nav_a.string = text
if date > datetime.today():
nav_a['class'] = "btn btn-mini btn-info disabled"
nav_a['href'] = '#'
else:
nav_a['class'] = "btn btn-mini btn-info"
nav_th.insert(0,nav_a)
return nav_th | 6f49e5173980a9da01e4d92e2f5adfeb73a4a4d0 | 706,582 |
import re
def parse_name(content):
"""
Finds the name of the man page.
"""
# Create regular expression
name_regex = re.compile(r"^([\w\.-]*)")
# Get name of manual page
just_name = name_regex.search(content)
name_str = ""
if just_name is not None:
name_str = just_name.group(1)
return name_str | c3a1f32beb96d39d4490681bf90d54115597ffe5 | 706,584 |
def albanian_input_normal(field, text):
"""
Prepare a string from one of the query fields for subsequent
processing: replace common shortcuts with valid Albanian characters.
"""
if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'):
return text
text = text.replace('ё', 'ë')
text = text.replace('e:', 'ë')
return text | 6bd4e7a1e764feada04ae5e95465fb4d7cbb29fb | 706,585 |
import re
def extract_share_id_from_url(public_base_url: str) -> str:
"""
Extracts the Airtable share id from the provided URL.
:param public_base_url: The URL where the share id must be extracted from.
:raises ValueError: If the provided URL doesn't match the publicly shared
Airtable URL.
:return: The extracted share id.
"""
result = re.search(r"https:\/\/airtable.com\/shr(.*)$", public_base_url)
if not result:
raise ValueError(
f"Please provide a valid shared Airtable URL (e.g. "
f"https://airtable.com/shrxxxxxxxxxxxxxx)"
)
return f"shr{result.group(1)}" | 5aad99b5bf022a2b957f10fcb09793188051340c | 706,586 |
def get_all_ann_index(self):
""" Retrieves all annotation ids """
return list(self.ann_infos.keys()) | 4375c9dbc14bf50575c8a5e42ce0ae8749820dfb | 706,589 |
def get_quoted_text(text):
"""Method used to get quoted text.
If body/title text contains a quote, the first quote is considered as the text.
:param text: The replyable text
:return: The first quote in the text. If no quotes are found, then the entire text is returned
"""
lines = text.split('\n\n')
for line in lines:
if line.startswith('>'):
return line[1:]
return text | 3ac1801edcaf16af45d118918cb548f41d9a08fb | 706,591 |
def get_username_for_os(os):
"""Return username for a given os."""
usernames = {"alinux2": "ec2-user", "centos7": "centos", "ubuntu1804": "ubuntu", "ubuntu2004": "ubuntu"}
return usernames.get(os) | 579ebfa4e76b6660d28afcc010419f32d74aa98c | 706,592 |
from typing import List
def is_negative_spec(*specs: List[List[str]]) -> bool:
""" Checks for negative values in a variable number of spec lists
Each spec list can have multiple strings. Each string within each
list will be searched for a '-' sign.
"""
for specset in specs:
if specset:
for spec in specset:
if '-' in spec:
return True
return False | 216e6db2e63a657ac95a31896b9b61329a10a3db | 706,593 |
from datetime import datetime
import json
import hashlib
def map_aircraft_to_record(aircrafts, message_now, device_id):
"""
Maps the `aircraft` entity to a BigQuery record and its unique id.
Returns `(unique_ids, records)`
"""
def copy_data(aircraft):
result = {
'hex': aircraft.get('hex'),
'squawk': aircraft.get('squawk'),
'flight': aircraft.get('flight'),
'lat': aircraft.get('lat'),
'lon': aircraft.get('lon'),
'nucp': aircraft.get('nucp'),
'seen_pos': aircraft.get('seen_pos'),
'altitude': aircraft.get('altitude'),
'vert_rate': aircraft.get('vert_rate'),
'track': aircraft.get('track'),
'speed': aircraft.get('speed'),
'messages': aircraft.get('messages'),
'seen': aircraft.get('seen'),
'rssi': aircraft.get('rssi'),
'device_id': device_id,
'timestamp': datetime.utcfromtimestamp(float(message_now)).isoformat()
}
result_json = json.dumps(result)
result_hash = hashlib.sha512(result_json.encode('utf-8')).hexdigest()
unique_id = f'{message_now}_{result_hash}'
result['created_at'] = datetime.now().isoformat()
return (unique_id, result)
return zip( *map( copy_data, aircrafts ) ) | d423b87e2018486de076cc94a719038c53c54602 | 706,596 |
def sub(xs, ys):
"""
Computes xs - ys, such that elements in xs that occur in ys are removed.
@param xs: list
@param ys: list
@return: xs - ys
"""
return [x for x in xs if x not in ys] | 8911bb2c79919cae88463a95521cf051828038e8 | 706,597 |
def get_colors(k):
"""
Return k colors in a list. We choose from 7 different colors.
If k > 7 we choose colors more than once.
"""
base_colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k']
colors = []
index = 1
for i in range(0, k):
if index % (len(base_colors) + 1) == 0:
index = 1
colors.append(base_colors[index - 1])
index += 1
return colors | 6c4a38eb394254f57d8be9fca47e0b44f51f5f04 | 706,599 |
def ConvertToTypeEnum(type_enum, airflow_executor_type):
"""Converts airflow executor type string to enum.
Args:
type_enum: AirflowExecutorTypeValueValuesEnum, executor type enum value.
airflow_executor_type: string, executor type string value.
Returns:
AirflowExecutorTypeValueValuesEnum: the executor type enum value.
"""
return type_enum(airflow_executor_type) | 04162b04719031ba6b96d981a7ffe8a82691bc31 | 706,602 |
def is_member(musicians, musician_name):
"""Return true if named musician is in musician list;
otherwise return false.
Parameters:
musicians (list): list of musicians and their instruments
musician_name (str): musician name
Returns:
bool: True if match is made; otherwise False.
"""
i = 0 # counter
while i < len(musicians): # guard against infinite loop
musician = musicians[i].split(', ')[0].lower()
if musician_name.lower() == musician:
return True # preferable to break statement
i += 1 # MUST INCREMENT
return False | 6ef5b9bbccb17d9b97a85e3af7789e059829184b | 706,608 |
def process_domain_assoc(url, domain_map):
"""
Replace domain name with a more fitting tag for that domain.
User defined. Mapping comes from provided config file
Mapping in yml file is as follows:
tag:
- url to map to tag
- ...
A small example domain_assoc.yml is included
"""
if not domain_map:
return url
for key in domain_map:
if url in domain_map[key]:
return key
return url | 29c0f81a4959d97cd91f839cbe511eb46872b5ec | 706,616 |
import random
def shuffled(iterable):
"""Randomly shuffle a copy of iterable."""
items = list(iterable)
random.shuffle(items)
return items | cd554d4a31e042dc1d2b4c7b246528a5184d558e | 706,617 |
def parse_discontinuous_phrase(phrase: str) -> str:
"""
Transform discontinuous phrase into a regular expression. Discontinuity is
interpreted as taking place at any whitespace outside of terms grouped by
parentheses. That is, the whitespace indicates that anything can be in between
the left side and right side.
Example 1: x1 (x2 (x3"x4")) becomes x1.+(x2 (x3|x4))
"""
level = 0
parsed_phrase = ""
for index, char in enumerate(phrase):
if char == "(":
level += 1
elif char == ")":
level -= 1
elif char == " " and level == 0:
char = ".+"
parsed_phrase += char
return parsed_phrase | 58fe394a08931e7e79afc00b9bb0e8e9981f3c81 | 706,618 |
def getChildElementsListWithTagAttribValueMatch(parent, tag, attrib, value):
"""
This method takes a parent element as input and finds all the sub elements (children)
containing specified tag and an attribute with the specified value.
Returns a list of child elements.
Arguments:
parent = parent element
tag = tag value of the sub-element(child) to be searched for.
attrib = attribute name for the sub-element with above given tag should have.
value = attribute value that the sub-element with above given tag, attribute should have.
"""
child_elements = parent.findall(".//%s[@%s='%s']" % (tag, attrib, value))
return child_elements | cae87e6548190ad0a675019b397eeb88289533ee | 706,620 |
def recursive_dict_of_lists(d, helper=None, prev_key=None):
"""
Builds dictionary of lists by recursively traversing a JSON-like
structure.
Arguments:
d (dict): JSON-like dictionary.
prev_key (str): Prefix used to create dictionary keys like: prefix_key.
Passed by recursive step, not intended to be used.
helper (dict): In case d contains nested dictionaries, you can specify
a helper dictionary with 'key' and 'value' keys to specify where to
look for keys and values instead of recursive step. It helps with
cases like: {'action': {'type': 'step', 'amount': 1}}, by passing
{'key': 'type', 'value': 'amount'} as a helper you'd get
{'action_step': [1]} as a result.
"""
d_o_l = {}
if helper is not None and helper['key'] in d.keys() and helper['value'] in d.keys():
if prev_key is not None:
key = f"{prev_key}_{helper['key']}"
else:
key = helper['key']
if key not in d_o_l.keys():
d_o_l[key] = []
d_o_l[key].append(d[helper['value']])
return d_o_l
for k, v in d.items():
if isinstance(v, dict):
d_o_l.update(recursive_dict_of_lists(v, helper=helper, prev_key=k))
else:
if prev_key is not None:
key = f'{prev_key}_{k}'
else:
key = k
if key not in d_o_l.keys():
d_o_l[key] = []
if isinstance(v, list):
d_o_l[key].extend(v)
else:
d_o_l[key].append(v)
return d_o_l | c615582febbd043adae6788585d004aabf1ac7e3 | 706,623 |
def same_shape(shape1, shape2):
"""
Checks if two shapes are the same
Parameters
----------
shape1 : tuple
First shape
shape2 : tuple
Second shape
Returns
-------
flag : bool
True if both shapes are the same (same length and dimensions)
"""
if len(shape1) != len(shape2):
return False
for i in range(len(shape1)):
if shape1[i] != shape2[i]:
return False
return True | 9452f7973e510532cee587f2bf49a146fb8cc46e | 706,624 |
def is_extended_markdown(view):
"""True if the view contains 'Markdown Extended'
syntax'ed text.
"""
return view.settings().get("syntax").endswith(
"Markdown Extended.sublime-syntax") | 5c870fd277910f6fa48f2b8ae0dfd304fdbddff0 | 706,626 |
def is_point_in_rect(point, rect):
"""Checks whether is coordinate point inside the rectangle or not.
Rectangle is defined by bounding box.
:type point: list
:param point: testing coordinate point
:type rect: list
:param rect: bounding box
:rtype: boolean
:return: boolean check result
"""
x0, y0, x1, y1 = rect
x, y = point
if x0 <= x <= x1 and y0 <= y <= y1:
return True
return False | d0c7a64138899f4e50b42dc75ea6030616d4dfec | 706,628 |
def convert_timestamp(ts):
"""Converts the timestamp to a format suitable for Billing.
Examples of a good timestamp for startTime, endTime, and eventTime:
'2016-05-20T00:00:00Z'
Note the trailing 'Z'. Python does not add the 'Z' so we tack it on
ourselves.
"""
return ts.isoformat() + 'Z' | 6b8d19671cbeab69c398508fa942e36689802cdd | 706,630 |
def boolean(input):
"""Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool
"""
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input)) | 09c09206d5487bf02e3271403e2ba67358e1d148 | 706,634 |
def create_provisioned_product_name(account_name: str) -> str:
"""
Replaces all space characters in an Account Name with hyphens,
also removes all trailing and leading whitespace
"""
return account_name.strip().replace(" ", "-") | 743e7438f421d5d42c071d27d1b0fa2a816a9b4d | 706,635 |
def release_branch_name(config):
"""
build expected release branch name from current config
"""
branch_name = "{0}{1}".format(
config.gitflow_release_prefix(),
config.package_version()
)
return branch_name | 0d97c515aca8412882c8b260405a63d20b4b0f63 | 706,636 |
def torch2numpy(data):
""" Transfer data from the torch tensor (on CPU) to the numpy array (on CPU). """
return data.numpy() | c7ca4123743c4f054d809f0e307a4de079b0af10 | 706,637 |
def edges_to_adj_list(edges):
"""
Transforms a set of edges in an adjacency list (represented as a dictiornary)
For UNDIRECTED graphs, i.e. if v2 in adj_list[v1], then v1 in adj_list[v2]
INPUT:
- edges : a set or list of edges
OUTPUT:
- adj_list: a dictionary with the vertices as keys, each with
a set of adjacent vertices.
"""
adj_list = {} # store in dictionary
for v1, v2 in edges:
if v1 in adj_list: # edge already in it
adj_list[v1].add(v2)
else:
adj_list[v1] = set([v2])
if v2 in adj_list: # edge already in it
adj_list[v2].add(v1)
else:
adj_list[v2] = set([v1])
return adj_list | 683f10e9a0a9b8a29d63b276b2e550ebe8287a05 | 706,638 |
def _hexsplit(string):
""" Split a hex string into 8-bit/2-hex-character groupings separated by spaces"""
return ' '.join([string[i:i+2] for i in range(0, len(string), 2)]) | 672e475edeaafaa08254845e620b0a771b294fa8 | 706,643 |
from typing import List
def hello_world(cities: List[str] = ["Berlin", "Paris"]) -> bool:
"""
Hello world function.
Arguments:
- cities: List of cities in which 'hello world' is posted.
Return:
- success: Whether or not function completed successfully.
"""
try:
[print("Hello {}!".format(c)) for c in cities] # for loop one-liner
return True
except KeyboardInterrupt:
return False
finally:
pass | a24f0f47c9b44c97f46524d354fff0ed9a735fe3 | 706,644 |
def team_to_repos(api, no_repos, organization):
"""Create a team_to_repos mapping for use in _add_repos_to_teams, anc create
each team and repo. Return the team_to_repos mapping.
"""
num_teams = 10
# arrange
team_names = ["team-{}".format(i) for i in range(num_teams)]
repo_names = ["some-repo-{}".format(i) for i in range(num_teams)]
for name in team_names:
organization.create_team(name, permission="pull")
for name in repo_names:
organization.create_repo(name)
team_to_repos = {
team_name: [repo_name]
for team_name, repo_name in zip(team_names, repo_names)
}
return team_to_repos | 390da146c3f96c554f9194f8551a066eec535533 | 706,646 |
import struct
def padandsplit(message):
"""
returns a two-dimensional array X[i][j] of 32-bit integers, where j ranges
from 0 to 16.
First pads the message to length in bytes is congruent to 56 (mod 64),
by first adding a byte 0x80, and then padding with 0x00 bytes until the
message length is congruent to 56 (mod 64). Then adds the little-endian
64-bit representation of the original length. Finally, splits the result
up into 64-byte blocks, which are further parsed as 32-bit integers.
"""
origlen = len(message)
padlength = 64 - ((origlen - 56) % 64) # minimum padding is 1!
message += b"\x80"
message += b"\x00" * (padlength - 1)
message += struct.pack("<Q", origlen * 8)
assert (len(message) % 64 == 0)
return [
[
struct.unpack("<L", message[i + j:i + j + 4])[0]
for j in range(0, 64, 4)
]
for i in range(0, len(message), 64)
] | ea06a3fc91e19ed0dbea6ddcc2ee6d554fb5a40f | 706,647 |
def extract_coords(filename):
"""Extract J2000 coordinates from filename or filepath
Parameters
----------
filename : str
name or path of file
Returns
-------
str
J2000 coordinates
"""
# in case path is entered as argument
filename = filename.split("/")[-1] if "/" in filename else filename
# to check whether declination is positive or negative
plus_minus = "+" if "+" in filename else "-"
# extracting right acesnsion (ra) and declination(dec) from filename
filename = filename.split("_")[0].strip("J").split(plus_minus)
ra_extracted = [
"".join(filename[0][0:2]),
"".join(filename[0][2:4]),
"".join(filename[0][4:]),
]
dec_extracted = [
"".join(filename[1][0:2]),
"".join(filename[1][2:4]),
"".join(filename[1][4:]),
]
coordinates = " ".join(ra_extracted) + " " + plus_minus + " ".join(dec_extracted)
# return coordinates as a string in HH MM SS.SSS format
return coordinates | 57f0ca79223116caa770a1dbea2eda84df146855 | 706,648 |
def _parse_multi_header(headers):
"""
Parse out and return the data necessary for generating ZipkinAttrs.
Returns a dict with the following keys:
'trace_id': str or None
'span_id': str or None
'parent_span_id': str or None
'sampled_str': '0', '1', 'd', or None (defer)
"""
parsed = {
"trace_id": headers.get("X-B3-TraceId", None),
"span_id": headers.get("X-B3-SpanId", None),
"parent_span_id": headers.get("X-B3-ParentSpanId", None),
"sampled_str": headers.get("X-B3-Sampled", None),
}
# Normalize X-B3-Flags and X-B3-Sampled to None, '0', '1', or 'd'
if headers.get("X-B3-Flags") == "1":
parsed["sampled_str"] = "d"
if parsed["sampled_str"] == "true":
parsed["sampled_str"] = "1"
elif parsed["sampled_str"] == "false":
parsed["sampled_str"] = "0"
if parsed["sampled_str"] not in (None, "1", "0", "d"):
raise ValueError("Got invalid X-B3-Sampled: %s" % parsed["sampled_str"])
for k in ("trace_id", "span_id", "parent_span_id"):
if parsed[k] == "":
raise ValueError("Got empty-string %r" % k)
if parsed["trace_id"] and not parsed["span_id"]:
raise ValueError("Got X-B3-TraceId but not X-B3-SpanId")
elif parsed["span_id"] and not parsed["trace_id"]:
raise ValueError("Got X-B3-SpanId but not X-B3-TraceId")
# Handle the common case of no headers at all
if not parsed["trace_id"] and not parsed["sampled_str"]:
raise ValueError() # won't trigger a log message
return parsed | 2ac3d0cbee196385e970bcc85827c1a467b5bb3b | 706,649 |
def get_ogheader(blob, url=None):
"""extract Open Graph markup into a dict
The OG header section is delimited by a line of only `---`.
Note that the page title is not provided as Open Graph metadata if
the image metadata is not specified.
"""
found = False
ogheader = dict()
for line in blob.split('\n'):
if line == '---':
found = True
break
if line.startswith('image: '):
toks = line.split()
assert len(toks) == 2
ogheader['image'] = toks[1]
if not found:
ogheader = dict() # Ignore any matches as false positives
return ogheader
if url is not None:
assert 'url' not in ogheader
ogheader['url'] = url
for line in blob.split('\n'):
if line.startswith('# '):
ogheader['title'] = line[2:]
return ogheader | 4edd7c5545ddef241ee2bfd5e316e47a336aaa3f | 706,652 |
def rgb(r=0, g=0, b=0, mode='RGB'):
"""
Convert **r**, **g**, **b** values to a `string`.
:param r: red part
:param g: green part
:param b: blue part
:param string mode: ``'RGB | %'``
:rtype: string
========= =============================================================
mode Description
========= =============================================================
``'RGB'`` returns a rgb-string format: ``'rgb(r, g, b)'``
``'%'`` returns percent-values as rgb-string format: ``'rgb(r%, g%, b%)'``
========= =============================================================
"""
def percent(value):
value = int(value)
if value < 0:
value = 0
if value > 100:
value = 100
return value
if mode.upper() == 'RGB':
return "rgb(%d,%d,%d)" % (int(r) & 255, int(g) & 255, int(b) & 255)
elif mode == "%":
# see http://www.w3.org/TR/SVG11/types.html#DataTypeColor
# percentage is an 'integer' value
return "rgb(%d%%,%d%%,%d%%)" % (percent(r), percent(g), percent(b))
else:
raise ValueError("Invalid mode '%s'" % mode) | 563b8fe8273ce4534567687df01cebe79b9f58dc | 706,655 |
def load_csv_translations(fname, pfx=''):
"""
Load translations from a tab-delimited file. Add prefix
to the keys. Return a dictionary.
"""
translations = {}
with open(fname, 'r', encoding='utf-8-sig') as fIn:
for line in fIn:
line = line.strip('\r\n ')
if len(line) <= 2 or line.count('\t') != 1:
continue
key, value = line.split('\t')
key = pfx + key
translations[key] = value
return translations | e8b4707fe5eeb0f0f4f4859bd9a5f2272387a022 | 706,656 |
def refines_constraints(storage, constraints):
"""
Determines whether with the storage as basis for the substitution map there is a substitution that can be performed
on the constraints, therefore refining them.
:param storage: The storage basis for the substitution map
:param constraints: The constraint list containing the expressions to be substituted.
:return: True if the substitution would change the constraint list.
"""
storage_names = ["storage[" + str(key) + "]" for key, _ in storage.items()]
for name in storage_names:
for constraint in constraints:
if name in constraint.slot_names:
return True
return False | de82087c41d95240ee9d15bd51810b7c5594ef0f | 706,657 |
def normalize(data, train_split):
""" Get the standard score of the data.
:param data: data set
:param train_split: number of training samples
:return: normalized data, mean, std
"""
mean = data[:train_split].mean(axis=0)
std = data[:train_split].std(axis=0)
return (data - mean) / std, mean, std | cfc45ac5bd6ae7a30169253a1ae3ed64c1bd1118 | 706,659 |
def perimRect(length,width):
"""
Compute perimiter of rectangle
>>> perimRect(2,3)
10
>>> perimRect(4, 2.5)
13.0
>>> perimRect(3, 3)
12
>>>
"""
return 2*(length+width) | 50fdd92430352f443d313d0931bab50ad5617622 | 706,662 |
def simple_list(li):
"""
takes in a list li
returns a sorted list without doubles
"""
return sorted(set(li)) | 1e36f15cea4be4b403f0a9795a2924c08b2cb262 | 706,666 |
import copy
def _clean_root(tool_xml):
"""XSD assumes macros have been expanded, so remove them."""
clean_tool_xml = copy.deepcopy(tool_xml)
to_remove = []
for macros_el in clean_tool_xml.getroot().findall("macros"):
to_remove.append(macros_el)
for macros_el in to_remove:
clean_tool_xml.getroot().remove(macros_el)
return clean_tool_xml | 9df0980265b26a2de1c88d2999f10cd5d1421e0b | 706,668 |
import json
import zlib
import base64
def convert_gz_json_type(value):
"""Provide an ArgumentParser type function to unmarshal a b64 gz JSON string.
"""
return json.loads(zlib.decompress(base64.b64decode(value))) | 1cf0300f40c8367b9129f230a7fef0c9b89ba012 | 706,669 |
import string
import random
def get_random_string(length: int) -> str:
"""
Returns a random string starting with a lower-case letter.
Later parts can contain numbers, lower- and uppercase letters.
Note: Random Seed should be set somewhere in the program!
:param length: How long the required string must be. length > 0 required.
:return: a randomly created string
:raises: ValueError for zero and negative length
"""
if length < 1:
raise ValueError("Random Strings must have length 1 minimum.")
# choose from all lowercase letter
letters = string.ascii_letters + string.digits
first_letter = random.choice(string.ascii_lowercase)
result_str = ''.join(random.choice(letters) for i in range(length - 1))
return first_letter + result_str | 6cf20ce7d158ac158ffa49cac427c396cfd840db | 706,671 |
def factorial(n):
"""
Return n! - the factorial of n.
>>> factorial(1)
1
>>> factorial(0)
1
>>> factorial(3)
6
"""
if n<=0:
return 0
elif n==1:
return 1
else:
return n*factorial(n-1) | da5bc6f68375c7db03b7b2bdac1fec2b476ba563 | 706,672 |
def clean_cells(nb_node):
"""Delete any outputs and resets cell count."""
for cell in nb_node['cells']:
if 'code' == cell['cell_type']:
if 'outputs' in cell:
cell['outputs'] = []
if 'execution_count' in cell:
cell['execution_count'] = None
return nb_node | 67dce7ecc3590143730f943d3eb07ae7df9d8145 | 706,675 |
def username(request):
""" Returns ESA FTP username """
return request.config.getoption("--username") | 2393884c2c9f65055cd7a14c1b732fccf70a6e28 | 706,679 |
import struct
def parse_monitor_message(msg):
"""decode zmq_monitor event messages.
Parameters
----------
msg : list(bytes)
zmq multipart message that has arrived on a monitor PAIR socket.
First frame is::
16 bit event id
32 bit event value
no padding
Second frame is the endpoint as a bytestring
Returns
-------
event : dict
event description as dict with the keys `event`, `value`, and `endpoint`.
"""
if len(msg) != 2 or len(msg[0]) != 6:
raise RuntimeError("Invalid event message format: %s" % msg)
event = {
'event': struct.unpack("=hi", msg[0])[0],
'value': struct.unpack("=hi", msg[0])[1],
'endpoint': msg[1],
}
return event | df71541d34bc04b1ac25c6435b1b298394e27362 | 706,681 |
import math
def atan2(y, x):
"""Returns angle of a 2D coordinate in the XY plane"""
return math.atan2(y, x) | ede5a647c175bebf2800c22d92e396deff6077e2 | 706,685 |
def get_column(data, column_index):
"""
Gets a column of data from the given data.
:param data: The data from the CSV file.
:param column_index: The column to copy.
:return: The column of data (as a list).
"""
return [row[column_index] for row in data] | 3fd5c8c76ccfed145aba0e685aa57ad01b3695a5 | 706,686 |
from typing import List
import logging
def get_vocab(iob2_files:List[str]) -> List[str]:
"""Retrieve the vocabulary of the iob2 annotated files
Arguments:
iob2_files {List[str]} -- List of paths to the iob2 annotated files
Returns:
List[str] -- Returns the unique list of vocabulary found in the files
"""
vocab = set()
for iob2_file in iob2_files:
logging.info("Loading file %s for creating corpus embeddings", iob2_file)
for line in open(iob2_file):
token = line.split("\t")[0]
vocab.add(token)
return list(vocab) | 0dc2a1f969ed6f92b36b1b31875c855d5efda2d9 | 706,687 |
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
else:
# add new field
output_fields.append(field)
return {"fields": output_fields} | e97827ac0d8ee943b88fc54506af3f6fc8285d71 | 706,690 |
def parse_flarelabels(label_file):
"""
Parses a flare-label file and generates a dictionary mapping residue identifiers (e.g. A:ARG:123) to a
user-specified label, trees that can be parsed by flareplots, and a color indicator for vertices.
Parameters
----------
label_file : file
A flare-label file where each line contains 2-3 columns formatted as
- CHAIN:RESN:RESI (e.g. A:ARG:123)
- [[TOPLEVEL.]MIDLEVEL.]LABEL (e.g. Receptor.Helix2.2x44)
- COLOR (e.g. #FF0000 or white)
Returns
-------
dict of str : (dict of str : str)
Keys are all residue identifiers and values are dicts that hold both the LABEL by itself (key "label", the full
tree-path (key "treepath") and a CSS-compatible color string (key "color").
Raises
------
AssertionError
if a residue identifier (CHAIN:RESN:RESI) is specified twice in the file, or if a LABEL appears twice.
"""
if label_file is None:
return None
ret = {}
flarelabels = set() # Only used to check for duplicates
for line in label_file:
line = line.strip()
if not line:
continue # Ignore empty lines
columns = line.split("\t")
residentifier = columns[0]
flaretreepath = columns[1] if len(columns) > 1 else columns[0]
flarelabel = flaretreepath.split(".")[-1]
flarecolor = columns[2] if len(columns) > 2 else "white"
if residentifier in ret:
raise AssertionError("Residue identifier '"+residentifier+"' appears twice in "+label_file.name)
if flarelabel in flarelabels:
raise AssertionError("Flare label '"+flarelabel+"' used twice in "+label_file.name)
ret[residentifier] = {"label": flarelabel, "treepath": flaretreepath, "color": flarecolor}
flarelabels.add(flarelabel)
return ret | 23df49af14af720311b320f65894e995983365bf | 706,691 |
def get_hmm_datatype(query_file):
"""Takes an HMM file (HMMer3 software package) and determines what data
type it has (i.e., generated from an amino acid or nucleic acid alignment).
Returns either "prot" or "nucl".
"""
datatype = None
with open(query_file) as infh:
for i in infh:
if i.startswith('ALPH'):
dname = i.strip().split(' ')[1]
if dname == 'amino':
datatype = 'prot'
elif dname == 'DNA':
datatype = 'nucl'
break
# Check that it worked.
assert datatype is not None, """Error: Data type could not be
determined for input file: %s""" % query_file
# Return the data type.
return datatype | 27653784b8a9fbae92226f8ea7d7b6e2b647765e | 706,692 |
def none(**_):
""" Input: anything
Return: 0.0 (float)
Descr.: Dummy method to handle no temperature correction"""
return 0.0 | e06b22f91d5a73450ddb4ca53fbb2569d567dcf1 | 706,698 |
def get_custom_headers(manifest_resource):
"""Generates the X-TAXII-Date-Added headers based on a manifest resource"""
headers = {}
times = sorted(map(lambda x: x["date_added"], manifest_resource.get("objects", [])))
if len(times) > 0:
headers["X-TAXII-Date-Added-First"] = times[0]
headers["X-TAXII-Date-Added-Last"] = times[-1]
return headers | 6c3acf2ea330b347387bfec574b4f8edfffa69ab | 706,699 |
import csv
def read_pinout_csv(csv_file, keyname="number"):
"""
read a csv file and return a dict with the given keyname as the keys
"""
reader = csv.DictReader(open(csv_file))
lst = []
for row in reader:
lst.append(row)
d = {}
for item in lst:
d[item[keyname]] = item
return d | 07a30b1191d311fee315c87773e3b3c1111d7624 | 706,701 |
import torch
def pick_action(action_distribution):
"""action selection by sampling from a multinomial.
Parameters
----------
action_distribution : 1d torch.tensor
action distribution, pi(a|s)
Returns
-------
torch.tensor(int), torch.tensor(float)
sampled action, log_prob(sampled action)
"""
m = torch.distributions.Categorical(action_distribution)
a_t = m.sample()
return a_t | ac7ceb0df860876ec209563eaa6bdd3f8bd09189 | 706,707 |
from typing import List
def word_tokenizer(text: str) -> List[str]:
"""Tokenize input text splitting into words
Args:
text : Input text
Returns:
Tokenized text
"""
return text.split() | dc6e4736d7a1f564bcfc6fed081a1869db38eea5 | 706,708 |
def to_bits_string(value: int) -> str:
"""Converts unsigned value to a bit string with _ separators every nibble."""
if value < 0:
raise ValueError(f'Value is not unsigned: {value!r}')
bits = bin(value)[2:]
rev = bits[::-1]
pieces = []
i = 0
while i < len(rev):
pieces.append(rev[i:i + 4])
i += 4
return '0b' + '_'.join(pieces)[::-1] | 07dea253378686a1c65c97fad3d0b706e02335c4 | 706,712 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.