content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def valid_pairs(pairs, chain):
"""
Determine if the chain contains any invalid pairs (e.g. ETH_XMR)
"""
for primary, secondary in zip(chain[:-1], chain[1:]):
if not (primary, secondary) in pairs and \
not (secondary, primary) in pairs:
return False
return True | c9e36d0490893e1b1a6cd8c3fb0b14b382d69515 | 4,214 |
from typing import Any
def fqname_for(obj: Any) -> str:
"""
Returns the fully qualified name of ``obj``.
Parameters
----------
obj
The class we are interested in.
Returns
-------
str
The fully qualified name of ``obj``.
"""
if "<locals>" in obj.__qualname__:
raise RuntimeError(
"Can't get fully qualified name of locally defined object. "
f"{obj.__qualname__}"
)
return f"{obj.__module__}.{obj.__qualname__}" | 6d4e5db255715c999d1bb40533f3dbe03b948b07 | 4,215 |
def symbol_size(values):
""" Rescale given values to reasonable symbol sizes in the plot. """
max_size = 50.0
min_size = 5.0
# Rescale max.
slope = (max_size - min_size)/(values.max() - values.min())
return slope*(values - values.max()) + max_size | a33f77ee8eeff8d0e63035c5c408a0788b661886 | 4,216 |
def import_by_name(name):
"""
动态导入
"""
tmp = name.split(".")
module_name = ".".join(tmp[0:-1])
obj_name = tmp[-1]
module = __import__(module_name, globals(), locals(), [obj_name])
return getattr(module, obj_name) | 714ca90704d99a8eafc8db08a5f3df8e17bc6da4 | 4,217 |
def hexColorToInt(rgb):
"""Convert rgb color string to STK integer color code."""
r = int(rgb[0:2],16)
g = int(rgb[2:4],16)
b = int(rgb[4:6],16)
color = format(b, '02X') + format(g, '02X') + format(r, '02X')
return int(color,16) | 59b8815d647b9ca3e90092bb6ee7a0ca19dd46c2 | 4,218 |
def insert_at_index(rootllist, newllist, index):
""" Insert newllist in the llist following rootllist such that newllist is at the provided index in the resulting llist"""
# At start
if index == 0:
newllist.child = rootllist
return newllist
# Walk through the list
curllist = rootllist
for i in range(index-1):
curllist = curllist.child
# Insert
newllist.last().child=curllist.child
curllist.child=newllist
return rootllist | 767cde29fbc711373c37dd3674655fb1bdf3fedf | 4,219 |
def priority(n=0):
"""
Sets the priority of the plugin.
Higher values indicate a higher priority.
This should be used as a decorator.
Returns a decorator function.
:param n: priority (higher values = higher priority)
:type n: int
:rtype: function
"""
def wrapper(cls):
cls._plugin_priority = n
return cls
return wrapper | 58ab19fd88e9e293676943857a0fa04bf16f0e93 | 4,221 |
def sanitize_option(option):
"""
Format the given string by stripping the trailing parentheses
eg. Auckland City (123) -> Auckland City
:param option: String to be formatted
:return: Substring without the trailing parentheses
"""
return ' '.join(option.split(' ')[:-1]).strip() | ece0a78599e428ae8826b82d7d00ffc39495d27f | 4,222 |
def node_values_for_tests():
"""Creates a list of possible node values for parameters
Returns:
List[Any]: possible node values
"""
return [1, 3, 5, 7, "hello"] | b919efc5e59a5827b3b27e4f0a4cd070ceb9a5a4 | 4,223 |
import six
def get_from_module(identifier, module_params, module_name,
instantiate=False, kwargs=None):
"""The function is stolen from keras.utils.generic_utils.
"""
if isinstance(identifier, six.string_types):
res = module_params.get(identifier)
if not res:
raise Exception('Invalid ' + str(module_name) + ': ' +
str(identifier))
if instantiate and not kwargs:
return res()
elif instantiate and kwargs:
return res(**kwargs)
else:
return res
elif type(identifier) is dict:
name = identifier.pop('name')
res = module_params.get(name)
if res:
return res(**identifier)
else:
raise Exception('Invalid ' + str(module_name) + ': ' +
str(identifier))
return identifier | 406a1da5843feb8556bbd1802426b57e7a33b20d | 4,225 |
def get_equations(points):
""" Calculate affine equations of inputted points
Input : 1
points : list of list
ex : [[[x1, y1], [x2, y2]], [[xx1, yy1], [xx2, yy2]]] for 2 identified
elements
Contains coordinates of separation lines i.e.
[[[start points x, y], [end points x, y]] [...], [...]]
Output : 2
columns_a : list of list
Contains all the a coefficients of an affine equation (y = ax + b)
of all the calculated lines, in the same order as the input
columns_b : list of list
Contains all the b coefficients of an affine equation (y = ax + b)
of the all the calculated lines, in the same order as the input"""
columns_a, columns_b = [], []
# iterate throught points
for k in points:
# calculate the a coefficients of start and end separation lines of this element
a1 = (k[0][1] - k[1][1])/(k[0][0] - k[1][0])
a2 = (k[2][1] - k[3][1])/(k[2][0] - k[3][0])
columns_a.append([a1, a2])
# then calculate the b coefficients of start and end separation lines
# using the a coeff calculated before
b1 = k[0][1] - a1*k[0][0]
b2 = k[2][1] - a2*k[2][0]
columns_b.append([b1, b2])
return (columns_a, columns_b) | 4eea43aee8b5f9c63793daae0b28e3c8b4ce0929 | 4,226 |
def Temple_Loc(player, num):
"""temple location function"""
player.coins -= num
player.score += num
player.donation += num
# player = temple_bonus_check(player) for acheivements
return (player) | dced7b9f23f63c0c51787291ab12701bd7021152 | 4,227 |
import pickle
def from_pickle(input_path):
"""Read from pickle file."""
with open(input_path, 'rb') as f:
unpickler = pickle.Unpickler(f)
return unpickler.load() | 4e537fcde38e612e22004007122130c545246afb | 4,229 |
def get_only_metrics(results):
"""Turn dictionary of results into a list of metrics"""
metrics_names = ["test/f1", "test/precision", "test/recall", "test/loss"]
metrics = [results[name] for name in metrics_names]
return metrics | 1b0e5bb8771fdc44dcd22ff9cdb174f77205eadd | 4,230 |
def pkcs7_unpad(data):
"""
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
"""
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]] | 4b43b80220e195aa51c129b6cbe1f216a94360cd | 4,233 |
def minus (s):
""" заменить последний минус на равенство """
q = s.rsplit ('-', 1)
return q[0] + '=' + q[1] | 8d4ae538d866a930603b71ccdba0b18145af9988 | 4,234 |
def _chk_y_path(tile):
"""
Check to make sure tile is among left most possible tiles
"""
if tile[0] == 0:
return True
return False | cf733c778b647654652ae5c651c7586c8c3567b8 | 4,235 |
import re
def alphanum_key(string):
"""Return a comparable tuple with extracted number segments.
Adapted from: http://stackoverflow.com/a/2669120/176978
"""
convert = lambda text: int(text) if text.isdigit() else text
return [convert(segment) for segment in re.split('([0-9]+)', string)] | 0e5e3f1d6aa43d393e1fb970f64e5910e7dc53fc | 4,236 |
import copy
def update_cfg(base_cfg, update_cfg):
"""used for mmcv.Config or other dict-like configs."""
res_cfg = copy.deepcopy(base_cfg)
res_cfg.update(update_cfg)
return res_cfg | c03dcfa7ac6d2f5c6745f69028f7cdb2ebe35eec | 4,237 |
import subprocess
def get_sub_bibliography(year, by_year, bibfile):
"""Get HTML bibliography for the given year"""
entries = ','.join(['@' + x for x in by_year[year]])
input = '---\n' \
f'bibliography: {bibfile}\n' \
f'nocite: "{entries}"\n...\n' \
f'# {year}'
out = subprocess.run(['pandoc', '--filter=pandoc-citeproc',
'-f', 'markdown'],
input=input, capture_output=True,
encoding='utf-8')
if out.returncode != 0:
raise AssertionError(out.stderr)
return out.stdout | 7c990c0a1e463f1db0fdc9a522f7c224f5969f7f | 4,238 |
def GetDepthFromIndicesMapping(list_indices):
"""
GetDepthFromIndicesMapping
==========================
Gives the depth of the nested list from the index mapping
@param list_indices: a nested list representing the indexes of the nested lists by depth
@return: depth
"""
return max([len(x[0]) for x in list_indices])+1 | c2318b3c6a398289c2cbf012af4c562d3d8bc2da | 4,240 |
from typing import Dict
def generate_person(results: Dict):
"""
Create a dictionary from sql that queried a person
:param results:
:return:
"""
person = None
if len(results) > 0:
person = {
"id": results[0],
"name": results[1].decode("utf-8"),
"img_url": results[2].decode("utf-8"),
"location": results[3].decode("utf-8"),
"colors": (results[4].decode("utf-8")).split(",")
}
return person | 21c2f2c8fa43c43eabf06785203556ccae708d45 | 4,241 |
def paliindrome_sentence(sentence: str) -> bool:
"""
`int`
"""
string = ''
for char in sentence:
if char.isalnum():
string += char
return string[::-1].casefold() == string.casefold() | 4559f9f823f748f137bbe1eb96070dba8e7d867d | 4,242 |
def get_default_pool_set():
"""Return the names of supported pooling operators
Returns:
a tuple of pooling operator names
"""
output = ['sum', 'correlation1', 'correlation2', 'maximum']
return output | 32d28fdb80ecdacab8494251edd87b566128fd79 | 4,243 |
import os
def excel_file2():
"""Test data for custom data column required fields."""
return os.path.join('test', 'data', 'NADataErrors_2018-05-19_v1.0.xlsx') | 84a6ae00e88b8035f92b9c4c8702a63a0631ec0f | 4,244 |
import numpy as np
def rate_multipressure(qD, delta_p, B, mu, perm, h):
"""Calculate Rate as Sum of Constant Flowing Pressures"""
return ((.007082 * perm * h) / (B * mu)) * (np.sum(qD * delta_p)) | a8621613abb63bb6f15c71ab3ba02d65ab160e6b | 4,246 |
def is_leap_year(year):
"""
Is the current year a leap year?
Args:
y (int): The year you wish to check.
Returns:
bool: Whether the year is a leap year (True) or not (False).
"""
if year % 4 == 0 and (year % 100 > 0 or year % 400 == 0): return True
return False | 16e4c83adc9d42dae2396186f980755b33af9188 | 4,248 |
def pandoc_command(event, verbose=True):
#@+<< pandoc command docstring >>
#@+node:ekr.20191006153547.1: *4* << pandoc command docstring >>
"""
The pandoc command writes all @pandoc nodes in the selected tree to the
files given in each @pandoc node. If no @pandoc nodes are found, the
command looks up the tree.
Each @pandoc node should have the form: `@pandoc x.adoc`. Relative file names
are relative to the base directory. See below.
By default, the pandoc command creates AsciiDoctor headings from Leo
headlines. However, the following kinds of nodes are treated differently:
- @ignore-tree: Ignore the node and its descendants.
- @ignore-node: Ignore the node.
- @no-head: Ignore the headline. Do not generate a heading.
After running the pandoc command, use the pandoc tool to convert the x.adoc
files to x.html.
Settings
--------
@string pandoc-base-directory specifies the base for relative file names.
The default is c.frame.openDirectory
Scripting interface
-------------------
Scripts may invoke the adoc command as follows::
event = g.Bunch(base_dicrectory=my_directory, p=some_node)
c.markupCommands.pandoc_command(event=event)
This @button node runs the adoc command and coverts all results to .html::
import os
paths = c.markupCommands.pandoc_command(event=g.Bunch(p=p))
paths = [z.replace('/', os.path.sep) for z in paths]
input_paths = ' '.join(paths)
g.execute_shell_commands(['asciidoctor %s' % input_paths])
"""
#@-<< pandoc command docstring >>
c = event and event.get('c')
if not c:
return None
return c.markupCommands.pandoc_command(event, verbose=verbose) | 972cd1e683b70c175e6b1710b9efb875932d8359 | 4,249 |
def random_majority_link_clf():
"""
for link classification we do not select labels from a fixed distribution
but instead we set labels to the number of possible segments in a sample.
I.e. we only predict a random link out of all the possible link paths in a sample.
"""
def clf(labels, k:int):
##only to self
#return np.arange(k)
# only one forward
#return [min(i+1, k-1) for i in range(k)]
# only one back
return [max(0, i-1) for i in range(k)]
## link to the segment behind or the one ahead.
## If i == k-1, we take i or i-1. if i == 0, we take i or i+1
#return [random.choice([max(0, i-1), i, min(i+1, k-1)]) for i in range(k)]
return clf | f53f2fee85914e25d5e407808dcfbee623b0782a | 4,250 |
def weight_point_in_circle(
point: tuple,
center: tuple,
radius: int,
corner_threshold: float = 1.5
):
"""
Function to decide whether a certain grid coordinate should be a full, half or empty tile.
Arguments:
point (tuple): x, y of the point to be tested
center (tuple): x, y of the origin (center) point
radius (int): radius of certainly empty tiles, does not include half tiles
corner_threshold (float): threshold that decides if the tile should be a half tile instead of empty
Returns:
int: the type of the tested tile
0 if empty tile
1 if full tile
2 if half tile
"""
diff_x, diff_y = map(lambda x, y: abs(x - y), center, point) # subtract point from center then abs for both x and y
if (diff_y > radius) or (diff_x > radius):
return 0 # eliminate any obviously out of bounds tiles
# precalculate pythagoras distance squared
dist_squared = (diff_x * diff_x) + (diff_y * diff_y)
# precalculate radius sqaured
radius_squared = radius * radius
# precalculate rounded distance
rounded_distance = round(dist_squared)
if rounded_distance < radius_squared: # distance within radius
return 1 # full tile
elif rounded_distance < radius_squared * corner_threshold and diff_x < radius: # distance on edge
return 2 # half tile
# outside of any thresholds
return 0 | db0da5e101184975385fb07e7b22c5e8a6d4fd47 | 4,251 |
import os
def disk_usage(path):
"""returns disk usage for a path"""
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
child_path = os.path.join(path, filename)
total += disk_usage(child_path)
print(f"{total:<10} {path}")
return total | 0c4901f94d562d7def81afcef4ffa27fe48c106c | 4,252 |
def get_arrival_times(inter_times):
"""Convert interevent times to arrival times."""
return inter_times.cumsum() | 7197fc6315d3eaca118ca419f23aed7c0d7cd064 | 4,254 |
def contains_vendored_imports(python_path):
"""
Returns True if ``python_path`` seems to contain vendored imports from botocore.
"""
# We're using a very rough heuristic here: if the source code contains
# strings that look like a vendored import, we'll flag.
#
# Because Python is dynamic, there are lots of ways you could be
# importing the vendored modules that wouldn't be caught this way, but:
#
# 1. Doing it in a complete foolproof way is incredibly complicated, and
# I don't care that much.
# 2. If you're writing your Lambda code in a deliberately obfuscated way,
# you have bigger problems than vendor deprecations.
#
# In practice, Python imports are usually near the top of the file, so we
# read it line-by-line. This means if we find an import, we can skip
# reading the rest of the file.
#
with open(python_path, "rb") as python_src:
for line in python_src:
if (
b"import botocore.vendored" in line
or b"from botocore.vendored import " in line
):
return True
return False | 90ed6939d7f43cac29eb66c3e27e911b9cc62532 | 4,255 |
def filter_uniq(item):
"""Web app, feed template, creates unique item id"""
detail = item['item']
args = (item['code'], item['path'], str(detail['from']), str(detail['to']))
return ':'.join(args) | 914fa4e3fcdf6bc7e6a30b46c8f33eecd08adcf1 | 4,256 |
def atoi(s, base=None): # real signature unknown; restored from __doc__
"""
atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return 0 | 420c9a68c1fe829a665eaba830df757114a81b47 | 4,257 |
def precip_units(units):
"""
Return a standardized name for precip units.
"""
kgm2s = ['kg/m2/s', '(kg/m^2)/s', 'kg/m^2/s', 'kg m^-2 s^-1',
'kg/(m^2 s)', 'kg m-2 s-1']
mmday = ['mm/day', 'mm day^-1']
if units.lower() in kgm2s:
return 'kg m^-2 s^-1'
elif units.lower() in mmday:
return 'mm day^-1'
else:
raise ValueError('Unknown units ' + units) | e5f94c3dd41b68d2e7b6b7aa1905fd5508a12fab | 4,258 |
def calcul_acc(labels, preds):
"""
a private function for calculating accuracy
Args:
labels (Object): actual labels
preds (Object): predict labels
Returns:
None
"""
return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels) | 3dc22c8707c181dda50e2a37f2cd822b2a31590d | 4,259 |
def get_preprocessor(examples, tokenize_fn, pad_ids):
"""
Input:
examples: [List[str]] input texts
tokenize_fn: [function] encodes text into IDs
Output:
tf input features
"""
def generator():
for example in examples:
tokens = tokenize_fn(example)
yield pad_ids + tokens
return generator | 0b2fb2217e04183fee027faedd163a8f8a048e9a | 4,260 |
def read_user(msg):
"""Read user input.
:param msg: A message to prompt
:type msg: ``str``
:return: ``True`` if user gives 'y' otherwhise False.
:rtype: ``bool``
"""
user_input = input("{msg} y/n?: ".format(msg=msg))
return user_input == 'y' | 662e95002130a6511e6e9a5d6ea85805f6b8f0f5 | 4,261 |
import requests
def get_tenants(zuul_url):
""" Fetch list of tenant names """
is_witelabel = requests.get(
"%s/info" % zuul_url).json().get('tenant', None) is not None
if is_witelabel:
raise RuntimeError("Need multitenant api")
return [
tenant["name"]
for tenant in requests.get("%s/tenants" % zuul_url).json()
] | 97944d2de2a8dfc2dd50dbea46a135a184e7aa37 | 4,262 |
def get_accuracy(pred, target):
"""gets accuracy either by single prediction
against target or comparing their codes """
if len(pred.size()) > 1:
pred = pred.max(1)[1]
#pred, target = pred.flatten(), target.flatten()
accuracy = round(float((pred == target).sum())/float(pred.numel()) * 100, 3)
return accuracy | f30e57602e4a06b0a0e3cd131bf992cf8f9b514e | 4,263 |
import numpy
def ifourier_transform(F,dt,n):
"""
See Also
-------
fourier_transform
"""
irfft = numpy.fft.irfft
shift = numpy.fft.fftshift
return (1.0/dt)*shift(irfft(F,n=n)) | d068cdbbe95f58d4210d2e799dfaee878fb9bf98 | 4,264 |
import argparse
def parse_arguments():
"""
Function to parse command line arguements
from the user
Returns
-------
opts : dict
command line arguements from the user
"""
info = 'Divides pdb info files for parallelization'
parser = argparse.ArgumentParser(description=info)
# program arguments
parser.add_argument('-f', '--in-file',
type=str,
required=True,
help='PDB info file to divide')
parser.add_argument('-n', '--num-splits',
default=1000,
type=int,
help='Number of splits to perform (Default: 1000)')
parser.add_argument('-m', '--mut-file',
type=str,
required=True,
help='File containing mutation information')
parser.add_argument('--split-dir',
default = "../data/split_pdbs/",
type=str,
help='Output directory for split PDB info files')
args = parser.parse_args()
opts = vars(args)
return opts | 8bdc260c1dcb779c7b30927651e26c05a9c0d5f5 | 4,265 |
def stringify_parsed_email(parsed):
"""
Convert a parsed email tuple into a single email string
"""
if len(parsed) == 2:
return f"{parsed[0]} <{parsed[1]}>"
return parsed[0] | 6552987fe6a06fdbb6bd49e5d17d5aadaae3c832 | 4,267 |
def base_to_str( base ):
"""Converts 0,1,2,3 to A,C,G,T"""
if 0 == base: return 'A'
if 1 == base: return 'C'
if 2 == base: return 'G'
if 3 == base: return 'T'
raise RuntimeError( 'Bad base: %d' % base ) | f1c98b7c24fae91c1f809abe47929d724c886168 | 4,268 |
import argparse
def parse_args(args):
"""
function parse_args takes arguments from CLI and return them parsed for later use.
:param list args : pass arguments from sys cmd line or directly
:return dict: parsed arguments
"""
parser = argparse.ArgumentParser()
required = parser.add_argument_group()
required.add_argument("-d", "--date", help="Date of Flight", required=True)
# name origin used because of conflicting name "from" in tests etc. cannot use parser.from
required.add_argument("-fr", "--from", help="IATA code of Departure",
dest="origin", required=True)
required.add_argument("-t", "--to", help="IATA code of Destination",
required=True)
days_in_destination = parser.add_mutually_exclusive_group()
days_in_destination.add_argument("-o", "--one-way", action="store_const",
help="Oneway ticket", dest="days_in_destination",
const="oneway")
days_in_destination.add_argument("-r", "--return", action="store",
help="Round ticket followed by number of days in destination",
dest="days_in_destination")
days_in_destination.set_defaults(days_in_destination='oneway')
sort = parser.add_mutually_exclusive_group()
sort.add_argument("-c", "--cheapest", action="store_const",
help="Book cheapest flight", dest="sort", const="price")
sort.add_argument("-fa", "--fastest", action="store_const",
help="Book fastest flight", dest="sort",
const="duration")
sort.set_defaults(sort='price')
parser.add_argument("-b", "--bags", help="Number of checked-in baggages", default='0')
parser.add_argument("-v", "--verbose", help="sets verbose output", action='store_true')
return parser.parse_args(args) | e3783dc173696b5758d8fc90bc810841e043be0a | 4,269 |
def dictmask(data, mask, missing_keep=False):
"""dictmask masks dictionary data based on mask"""
if not isinstance(data, dict):
raise ValueError("First argument with data should be dictionary")
if not isinstance(mask, dict):
raise ValueError("Second argument with mask should be dictionary")
if not isinstance(missing_keep, bool):
raise ValueError("Argument missing_keep should be bool type")
res = {}
for k, v in data.items():
if k not in mask:
if missing_keep is True:
res[k] = v
continue
if mask[k] is None or mask[k] is False:
continue
if mask[k] is True or data[k] is None:
res[k] = v
continue
if isinstance(data[k], dict) and isinstance(mask[k], dict):
res[k] = dictmask(data[k], mask[k])
continue
if isinstance(data[k], list) and isinstance(mask[k], list):
if len(mask[k]) != 1:
raise ValueError("Mask inside list should have only one item")
res2 = []
for i in range(len(data[k])):
res2.append(dictmask(data[k][i], mask[k][0], missing_keep))
res[k] = res2
else:
raise ValueError(
f"Cannot proceed key {k} with values of different types:"
f"{type(data[k])}, {type(mask[k])}"
)
return res | d18f6effb4367628ba85095024189d0f6694dd52 | 4,270 |
def _format_warning(message, category, filename, lineno, line=None):
"""
Replacement for warnings.formatwarning that disables the echoing of
the 'line' parameter.
"""
return "{}:{}: {}: {}\n".format(filename, lineno, category.__name__, message) | 8267150c5890759d2f2190ccf4b7436ea8f55204 | 4,272 |
def wordify_open(p, word_chars):
"""Prepend the word start markers."""
return r"(?<![{0}]){1}".format(word_chars, p) | 8b267aaca897d6435a84f22064f644727ca6e83c | 4,274 |
def user_info(context, **kwargs):
"""
Отображает информацию о текущем авторизованом пользователе, либо ссылки на авторизацию и регистрацию
Пример использования::
{% user_info %}
:param context: контекст
:param kwargs: html атрибуты оборачивающего тега
:return:
"""
request = context['request']
return {'user': request.user, 'data': kwargs} | 20321056fd5fdf8f51e79fb66d335272e85ada0d | 4,275 |
def is_array_of(obj, classinfo):
"""
Check if obj is a list of classinfo or a tuple of classinfo or a set of classinfo
:param obj: an object
:param classinfo: type of class (or subclass). See isinstance() build in function for more info
:return: flag: True or False
"""
flag = False
if isinstance(obj, classinfo):
pass
elif all(isinstance(item, classinfo) for item in obj):
flag = True
return flag | 5fecce974b5424cff7d5e6a4a9f9bd1482e10e85 | 4,276 |
from textwrap import dedent
def make_check_stderr_message(stderr, line, reason):
"""
Create an exception message to use inside check_stderr().
"""
return dedent("""\
{reason}:
Caused by line: {line!r}
Complete stderr: {stderr}
""").format(stderr=stderr, line=line, reason=reason) | a6510e8036ab27e6386e6bc8e6c33727849282c0 | 4,277 |
def stripped_spaces_around(converter):
"""Make converter that strippes leading and trailing spaces.
``converter`` is called to further convert non-``None`` values.
"""
def stripped_text_converter(value):
if value is None:
return None
return converter(value.strip())
return stripped_text_converter | b92f38d3eb8d191f615488bbd11503bae56ef6de | 4,278 |
def list_in_list(a, l):
"""Checks if a list is in a list and returns its index if it is (otherwise
returns -1).
Parameters
----------
a : list()
List to search for.
l : list()
List to search through.
"""
return next((i for i, elem in enumerate(l) if elem == a), -1) | 494d9a880bcd2084a0f50e292102dc8845cbbb16 | 4,280 |
def sent_to_idx(sent, word2idx, sequence_len):
"""
convert sentence to index array
"""
unknown_id = word2idx.get("UNKNOWN", 0)
sent2idx = [word2idx.get(word, unknown_id) for word in sent.split("_")[:sequence_len]]
return sent2idx | ffaa65741d8c24e02d5dfbec4ce84c03058ebeb8 | 4,281 |
def list_data(args, data):
"""List all servers and files associated with this project."""
if len(data["remotes"]) > 0:
print("Servers:")
for server in data["remotes"]:
if server["name"] == server["location"]:
print(server["user"] + "@" + server["location"])
else:
print(
server["user"] + "@" + server["name"] + " ("
+ server["location"] + ")")
else:
print("No servers added")
print("Included files and directories:")
print(data["file"] + ".py")
if len(data["files"]) > 0:
print("\n".join(data["files"]))
return data | 6a005b6e605d81985fca85ca54fd9b29b28128f5 | 4,282 |
def anchor_inside_flags(flat_anchors, valid_flags, img_shape,
allowed_border=0, device='cuda'):
"""Anchor inside flags.
:param flat_anchors: flat anchors
:param valid_flags: valid flags
:param img_shape: image meta info
:param allowed_border: if allow border
:return: inside flags
"""
img_h, img_w = img_shape[:2]
if device == 'cuda':
img_h = img_h.cuda()
img_w = img_w.cuda()
img_h = img_h.float()
img_w = img_w.float()
valid_flags = valid_flags.bool()
if allowed_border >= 0:
inside_flags = (valid_flags & (flat_anchors[:, 0] >= -allowed_border) & (
flat_anchors[:, 1] >= -allowed_border) & (
flat_anchors[:, 2] < img_w + allowed_border) & (
flat_anchors[:, 3] < img_h + allowed_border))
else:
inside_flags = valid_flags
return inside_flags | 500fe39f51cbf52bd3417b14e7ab7dcb4ec2f9cc | 4,283 |
import sys
def parseExonBounds(start, end, n, sizes, offsets):
"""
Parse the last 2 columns of a BED12 file and return a list of tuples with
(exon start, exon end) entries.
If the line is malformed, issue a warning and return (start, end)
"""
offsets = offsets.strip(",").split(",")
sizes = sizes.strip(",").split(",")
offsets = offsets[0:n]
sizes = sizes[0:n]
try:
starts = [start + int(x) for x in offsets]
ends = [start + int(x) + int(y) for x, y in zip(offsets, sizes)]
except:
sys.stderr.write("Warning: Received an invalid exon offset ({0}) or size ({1}), using the entry bounds instead ({2}-{3})\n".format(offsets, sizes, start, end))
return [(start, end)]
if len(offsets) < n or len(sizes) < n:
sys.stderr.write("Warning: There were too few exon start/end offsets ({0}) or sizes ({1}), using the entry bounds instead ({2}-{3})\n".format(offsets, sizes, start, end))
return [(start, end)]
return [(x, y) for x, y in zip(starts, ends)] | 8252fea73d80dc0a78cd28fb05e63eb687fb1f27 | 4,284 |
def _GenerateGstorageLink(c, p, b):
"""Generate Google storage link given channel, platform, and build."""
return 'gs://chromeos-releases/%s-channel/%s/%s/' % (c, p, b) | e5e4a0eb9e27b0f2d74b28289c8f02dc0454f438 | 4,285 |
def _has_desired_permit(permits, acategory, astatus):
"""
return True if permits has one whose
category_code and status_code match with the given ones
"""
if permits is None:
return False
for permit in permits:
if permit.category_code == acategory and\
permit.status_code == astatus:
return True
return False | 4cac23303e2b80e855e800a7d55b7826fabd9992 | 4,287 |
def computeHashCheck(ringInputString, ringSize):
"""Calculate the knot hash check.
Args:
ringInputString (str): The list of ints to be hashed as a comma-separated list.
ringSize (int): The size of the ring to be \"knotted\".
Returns:
int: Value of the hash check.
"""
ringInputList = [int(i) for i in ringInputString.split(',')]
ringContents = [i for i in range(ringSize)]
cursorPosition = 0
skipSize = 0
# Hashing algorithm as defined in AoC Day 10 instructions...
for length in ringInputList:
#
# Duplicate the ring contents to allow for exceeding the length of the original list
#
doubleContents = ringContents + ringContents
# Reverse the order of that length of elements in the list, starting with the element
# at the current position
sublist = doubleContents[cursorPosition:cursorPosition+length]
sublist.reverse()
doubleContents[cursorPosition:cursorPosition+length] = sublist
if cursorPosition + length > ringSize:
ringContents = doubleContents[ringSize:cursorPosition+ringSize] + doubleContents[cursorPosition:ringSize]
else:
ringContents = doubleContents[:ringSize]
# Move the current position forward by that length plus the skip size
cursorPosition = cursorPosition + length + skipSize
# Deal with going around the ring
if cursorPosition > ringSize:
cursorPosition -= ringSize
# Increase the skip size by one
skipSize += 1
# The hash is then the product of the first two elements in the transformed list
check = ringContents[0] * ringContents[1]
#print(ringContents)
return check | 75dce4aacdd4ae03fa34532471a21a43a81fbd13 | 4,289 |
def compute_dl_target(location):
"""
When the location is empty, set the location path to
/usr/sys/inst.images
return:
return code : 0 - OK
1 - if error
dl_target value or msg in case of error
"""
if not location or not location.strip():
loc = "/usr/sys/inst.images"
else:
loc = location.rstrip('/')
dl_target = loc
return 0, dl_target | 419b9fcad59ca12b54ad981a9f3b265620a22ab1 | 4,292 |
import re
def find_classes(text):
"""
find line that contains a top-level open brace
then look for class { in that line
"""
nest_level = 0
brace_re = re.compile("[\{\}]")
classname_re = "[\w\<\>\:]+"
class_re = re.compile(
"(?:class|struct)\s*(\w+)\s*(?:\:\s*public\s*"
+ classname_re + "(?:,\s*public\s*" + classname_re + ")*)?\s*\{")
classes = []
lines = text.split("\n")
for (i,line) in enumerate(lines):
if True:#nest_level == 0 and (i==0 or "template" not in lines[i-1]):
classes.extend(class_re.findall(line))
braces = brace_re.findall(line)
for brace in braces:
if brace == "{": nest_level += 1
elif brace == "}": nest_level -= 1
return classes | 126bc091a809e152c3d447ffdd103c764bc6c9ac | 4,293 |
def get_input(request) -> str:
"""Get the input song from the request form."""
return request.form.get('input') | de237dc0ad3ce2fa6312dc6ba0ea9fe1c2bdbeb3 | 4,294 |
from typing import Hashable
import math
def _unit_circle_positions(item_counts: dict[Hashable, tuple[int, int]], radius=0.45, center_x=0.5,
center_y=0.5) -> dict[Hashable, tuple[float, float]]:
"""
computes equally spaced points on a circle based on the radius and center positions
:param item_counts: item dict LinkedNetwork.get_item_link_count_dict()
:param radius: radius of the circle
:param center_x: x center position
:param center_y: y center position
:return: dict of items and their corresponding positions
"""
r = radius
cx, cy = center_x, center_y
a = math.radians(360) / len(item_counts)
points = {}
i = 0
for key, _ in item_counts.items():
points[key] = (math.cos(a * i) * r + cx, math.sin(a * i) * r + cy)
i += 1
return points | 66f60f5b90f7825f2abfdd2484375c9558786250 | 4,295 |
def replace_na(str_value: str, ch: str = "0") -> str:
"""replaces \"0\" with na, specifically designed for category list, may not work for others need
Args:
str_value (str): category list
ch (str, optional): Replacemet char. Defaults to "0".
Returns:
str: clean cotegory name
"""
if str_value is not None:
len_str = len(str_value)
if len_str > 0:
if str_value == "0":
return "na"
all_indices = [i for i, ltr in enumerate(str_value) if ltr == ch]
if all_indices:
for i in all_indices:
if i == 0 and str_value[1].isalpha():
str_value = "na"+str_value[1:]
elif i == (len_str - 1) and (str_value[len_str-2].isalpha() or str_value[len_str-2] != "."):
str_value = str_value[:len_str] + "na"
elif str_value[len_str-2] != ".":
str_value = str_value[:i] + "na" + str_value[(i+1):]
return str_value | d8e6dfe6806c7a008163ba92c62e7b2b18633538 | 4,296 |
def d1_to_q1(A, b, mapper, cnt, M):
"""
Constraints for d1 to q1
"""
for key in mapper['ck'].keys():
for i in range(M):
for j in range(i, M):
# hermetian constraints
if i != j:
A[cnt, mapper['ck'][key](i, j)] += 0.5
A[cnt, mapper['ck'][key](j, i)] += 0.5
A[cnt, mapper['kc'][key](j, i)] += 0.5
A[cnt, mapper['kc'][key](i, j)] += 0.5
b[cnt, 0] = 0.0
else:
A[cnt, mapper['ck'][key](i, j)] += 1.0
A[cnt, mapper['kc'][key](j, i)] += 1.0
b[cnt, 0] = 1.0
cnt += 1
return A, b, cnt | 1ee9ec17f4464ef280aa22780d6034309941954e | 4,297 |
import os
def relative_of(base_path: str, relative_path: str) -> str:
"""Given a base file and path relative to it, get full path of it"""
return os.path.normpath(os.path.join(os.path.dirname(base_path), relative_path)) | b35e580ff2afc4cf196f6e53eedd5f4383579c6e | 4,299 |
def get_gs_distortion(dict_energies: dict):
"""Calculates energy difference between Unperturbed structure and most favourable distortion.
Returns energy drop of the ground-state relative to Unperturbed (in eV) and the BDM distortion that lead to ground-state.
Args:
dict_energies (dict):
Dictionary matching distortion to final energy, as produced by organize_data()
Returns:
(energy_difference, BDM_ground_state_distortion)
"""
if len(dict_energies['distortions']) == 1:
energy_diff = dict_energies['distortions']['rattled'] - dict_energies['Unperturbed']
if energy_diff < 0 :
gs_distortion = 'rattled' #just rattle (no BDM)
else:
gs_distortion = "Unperturbed"
else:
lowest_E_RBDM = min(dict_energies['distortions'].values()) #lowest E obtained with RBDM
energy_diff = lowest_E_RBDM - dict_energies['Unperturbed']
if lowest_E_RBDM < dict_energies['Unperturbed'] : #if energy lower that with Unperturbed
gs_distortion = list(dict_energies['distortions'].keys())[list(dict_energies['distortions'].values()).index( lowest_E_RBDM )] #BDM distortion that lead to ground-state
else:
gs_distortion = "Unperturbed"
return energy_diff, gs_distortion | 2f23103ccac8e801cb6c2c4aff1fb4fc08341e78 | 4,300 |
import sys
def get_pcap_bytes(pcap_file):
"""Get the raw bytes of a pcap file or stdin."""
if pcap_file == "-":
pcap_bytes = sys.stdin.buffer.read()
else:
with open(pcap_file, "rb") as f:
pcap_bytes = f.read()
return pcap_bytes | 51abbefeb918016edef6f8f40c7c40cb973e2fc0 | 4,305 |
import subprocess
def run(s, output_cmd=True, stdout=False):
"""Runs a subprocess."""
if output_cmd:
print(f"Running: {s}")
p_out = subprocess.run(
s, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, check=False
)
if stdout:
return p_out.stdout.decode("utf-8").strip()
else:
return p_out | efcfe30a536789a69662642d2b2da1afc04ebe57 | 4,306 |
from types import ModuleType
import sys
def _create_module(module_name):
"""ex. mod = _create_module('tenjin.util')"""
mod = ModuleType(module_name.split('.')[-1])
sys.modules[module_name] = mod
return mod | bfc1092bce61f7716a42ceeccc0604ced3696cdd | 4,307 |
def sortKSUID(ksuidList):
"""
sorts a list of ksuids by their date (recent in the front)
"""
return sorted(ksuidList, key=lambda x: x.getTimestamp(), reverse=False) | 0476bc0ef19f8730488041ac33598ba7471f96e7 | 4,308 |
from typing import Counter
def get_vocabulary(list_):
"""
Computes the vocabulary for the provided list of sentences
:param list_: a list of sentences (strings)
:return: a dictionary with key, val = word, count and a sorted list, by count, of all the words
"""
all_the_words = []
for text in list_:
for word in text:
all_the_words.append(word)
vocabulary_counter = Counter(all_the_words)
vocabulary_sorted = list(map(lambda x: x[0], sorted(vocabulary_counter.items(), key=lambda x: -x[1])))
return vocabulary_sorted, vocabulary_counter | d6c357a5768c2c784c7dfe97743d34795b2695c0 | 4,310 |
import math
def split(value, precision=1):
"""
Split `value` into value and "exponent-of-10", where "exponent-of-10" is a
multiple of 3. This corresponds to SI prefixes.
Returns tuple, where the second value is the "exponent-of-10" and the first
value is `value` divided by the "exponent-of-10".
Args
----
value : int, float
Input value.
precision : int
Number of digits after decimal place to include.
Returns
-------
tuple
The second value is the "exponent-of-10" and the first value is `value`
divided by the "exponent-of-10".
Examples
--------
.. code-block:: python
si_prefix.split(0.04781) -> (47.8, -3)
si_prefix.split(4781.123) -> (4.8, 3)
See :func:`si_format` for more examples.
"""
negative = False
digits = precision + 1
if value < 0.:
value = -value
negative = True
elif value == 0.:
return 0., 0
expof10 = int(math.log10(value))
if expof10 > 0:
expof10 = (expof10 // 3) * 3
else:
expof10 = (-expof10 + 3) // 3 * (-3)
value *= 10 ** (-expof10)
if value >= 1000.:
value /= 1000.0
expof10 += 3
elif value >= 100.0:
digits -= 2
elif value >= 10.0:
digits -= 1
if negative:
value *= -1
return value, int(expof10) | 776ded073807773b755dcd7ab20c47d1f33ca1e1 | 4,312 |
def transitive_closure(graph):
"""
Compute the transitive closure of the graph
:param graph: a graph (list of directed pairs)
:return: the transitive closure of the graph
"""
closure = set(graph)
while True:
new_relations = set((x, w) for x, y in closure for q, w in closure if q == y)
closure_until_now = closure | new_relations
if closure_until_now == closure:
break
closure = closure_until_now
closure_no_doubles = [(x, y) for (x, y) in closure if not x == y]
return closure_no_doubles | 3bb6567033cf920ccced7565e75f8f789c55c37d | 4,314 |
def get_logo_color():
"""Return color of logo used in application main menu.
RGB format (0-255, 0-255, 0-255). Orange applied.
"""
return (255, 128, 0) | a6eee63d816a44af31893830ac641d6c0b1b9ba1 | 4,315 |
from numpy import sqrt
def vel_gradient(**kwargs):
"""
Calculates velocity gradient across surface object in supersonic
flow (from stagnation point) based upon either of two input variable
sets.
First method:
vel_gradient(R_n = Object radius (or equivalent radius, for
shapes that are not axisymmetric),
p_0 = flow stagnation pressure,
p_inf = flow freestream static pressure
rho = flow density)
Second method:
vel_gardient(R_n = Object radius (or equivalent radius, for
shapes that are not axisymmetric),
delta = Shock stand-off distance (from object
stagnation point),
U_s = Flow velocity immediately behind shock)
"""
if ('R_n' in kwargs) and ('p_0' in kwargs) and ('p_inf' in kwargs) and \
('rho' in kwargs):
vel_gradient = (1 / kwargs['R_n']) * sqrt((2 * (kwargs['p_0'] - \
kwargs['p_inf'])) / kwargs['rho'])
elif ('R_n' in kwargs) and ('U_s' in kwargs) and ('delta' in kwargs):
b = kwargs['delta'] + kwargs['R_n']
vel_gradient = (kwargs['U_s'] / kwargs['R_n']) * (1 + ((2 + ((b**3) / \
(kwargs['R_n']**3))) / (2 * (((b**3) / (kwargs['R_n']**3)) - 1))))
else:
raise KeyError('Incorrect variable assignment')
return vel_gradient | 8ee3ef490c113551e9200743e52378a8206a3666 | 4,316 |
def compute_agg_tiv(tiv_df, agg_key, bi_tiv_col, loc_num):
""" compute the agg tiv depending on the agg_key"""
agg_tiv_df = (tiv_df.drop_duplicates(agg_key + [loc_num], keep='first')[list(set(agg_key + ['tiv', 'tiv_sum', bi_tiv_col]))]
.groupby(agg_key, observed=True).sum().reset_index())
if 'is_bi_coverage' in agg_key:
# we need to separate bi coverage from the other tiv
agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==False, 'agg_tiv'] = agg_tiv_df['tiv_sum'] - agg_tiv_df[bi_tiv_col]
agg_tiv_df.loc[agg_tiv_df['is_bi_coverage']==True, 'agg_tiv'] = agg_tiv_df[bi_tiv_col]
else:
agg_tiv_df['agg_tiv'] = agg_tiv_df['tiv_sum']
return agg_tiv_df[agg_key + ['agg_tiv']] | 246ea2d61230f3e3bfe365fdf8fdbedbda98f25b | 4,317 |
def replacelast(string, old, new, count = 1):
"""Replace the last occurances of a string"""
return new.join(string.rsplit(old,count)) | 6af2cd56cc43e92b0d398e8aad4e25f0c6c34ddd | 4,320 |
def load_secret(name, default=None):
"""Check for and load a secret value mounted by Docker in /run/secrets."""
try:
with open(f"/run/secrets/{name}") as f:
return f.read().strip()
except Exception:
return default | 1aac980ad6bc039964ef9290827eb5c6d1b1455f | 4,321 |
def len_adecuada(palabra, desde, hasta):
"""
(str, int, int) -> str
Valida si la longitud de la palabra está en el rango deseado
>>> len_adecuada('hola', 0, 100)
'La longitud de hola, está entre 0 y 100'
>>> len_adecuada('hola', 1, 2)
'La longitud de hola, no está entre 1 y 2'
:param palabra:
:param desde:
:param hasta:
:return:
"""
return 'La longitud de {0}, {1}está entre {2} y {3}'\
.format(palabra, "" if desde <= len(palabra) <= hasta else "no ", desde, hasta) | df217a0159cd04c76f5eb12ca42e651ee62fcd99 | 4,322 |
import re
def convert_as_number(symbol: str) -> float:
"""
handle cases:
' ' or '' -> 0
'10.95%' -> 10.95
'$404,691,250' -> 404691250
'$8105.52' -> 8105.52
:param symbol: string
:return: float
"""
result = symbol.strip()
if len(result) == 0:
return 0
result = re.sub('[%$, *]', '', result)
return float(result) | cea1d6e894fa380ecf6968d5cb0ef1ce21b73fac | 4,323 |
def smiles_dict():
"""Store SMILES for compounds used in test cases here."""
smiles = {
"ATP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)OP(=O)(O)O)[C"
+ "@@H](O)[C@H]1O",
"ADP": "Nc1ncnc2c1ncn2[C@@H]1O[C@H](COP(=O)(O)OP(=O)(O)O)[C@@H](O)[C" + "@H]1O",
"meh": "CCC(=O)C(=O)O",
"l_ala": "C[C@H](N)C(=O)O",
"d_ala": "C[C@@H](N)C(=O)O",
"FADH": "Cc1cc2c(cc1C)N(CC(O)C(O)C(O)COP(=O)(O)OP(=O)(O)OCC1OC(n3cnc"
+ "4c(N)ncnc43)C(O)C1O)c1nc(O)nc(O)c1N2",
"S-Adenosylmethionine": "C[S+](CC[C@H](N)C(=O)O)C[C@H]1O[C@@H](n2cnc"
+ "3c(N)ncnc32)[C@H](O)[C@@H]1O",
}
return smiles | 080373bdfb250f57e20e0e2b89702ac07c430f69 | 4,324 |
import operator
def most_recent_assembly(assembly_list):
"""Based on assembly summaries find the one submitted the most recently"""
if assembly_list:
return sorted(assembly_list, key=operator.itemgetter('submissiondate'))[-1] | 1d7ecf3a1fa862e421295dda0ba3d89863f33b0f | 4,327 |
def _get_individual_id(individual) -> str:
"""
Returns a unique identifier as string for the given individual.
:param individual: The individual to get the ID for.
:return: A string representing the ID.
"""
if hasattr(individual, "identifier") and (isinstance(individual.identifier, list) and
len(individual.identifier) > 0 and
type(individual.identifier[0]) in [int, str]) or (
type(individual.identifier) in [int, str]):
return str(individual.identifier[0])
else:
return str(individual) | e606d5eef7bfbcd0d76113c20f450be3c1e6b2ab | 4,329 |
def ConvertToMeaningfulConstant(pset):
""" Gets the flux constant, and quotes it above some energy minimum Emin """
# Units: IF TOBS were in yr, it would be smaller, and raw const greater.
# also converts per Mpcs into per Gpc3
units=1e9*365.25
const = (10**pset[7])*units # to cubic Gpc and days to year
Eref=1e40 #erg per Hz
Emin=10**pset[0]
Emax=10**pset[1]
gamma=pset[3]
factor=(Eref/Emin)**gamma - (Emax/Emin)**gamma
const *= factor
return const | e393f66e72c3a43e91e9975f270ac7dcf577ad3e | 4,330 |
def hw_uint(value):
"""return HW of 16-bit unsigned integer in two's complement"""
bitcount = bin(value).count("1")
return bitcount | 9a9c6017d3d6da34c4e9132a0c89b267aa263ace | 4,331 |
def evenly_divides(x, y):
"""Returns if [x] evenly divides [y]."""
return int(y / x) == y / x | dbf8236454e88805e71aabf58d9b7ebd2b2a6393 | 4,333 |
import collections
def order_items(records):
"""Orders records by ASC SHA256"""
return collections.OrderedDict(sorted(records.items(), key=lambda t: t[0])) | a9117282974fcea8d0d99821ea6293df82889b30 | 4,334 |
def convert_group_by(response, field):
"""
Convert to key, doc_count dictionary
"""
if not response.hits.hits:
return []
r = response.hits.hits[0]._source.to_dict()
stats = r.get(field)
result = [{"key": key, "doc_count": count} for key, count in stats.items()]
result_sorted = sorted(
result, key=lambda i: i["doc_count"], reverse=True
) # sort by count
return result_sorted | 888321f300d88bd6f150a4bfda9420e920bab510 | 4,335 |
def compOverValueTwoSets(setA={1, 2, 3, 4}, setB={3, 4, 5, 6}):
"""
task 0.5.9
comprehension whose value is the intersection of setA and setB
without using the '&' operator
"""
return {x for x in (setA | setB) if x in setA and x in setB} | 2b222d6c171e0170ace64995dd64c352f03aa99b | 4,336 |
def longestCommonPrefix(strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) > 0:
common = strs[0]
for str in strs[1:]:
while not str.startswith(common):
common = common[:-1]
return common
else:
return '' | a860d46df8dbaeaab90bb3bc69abb68484216b5b | 4,338 |
def insert(shape, axis=-1):
"""Shape -> shape with one axis inserted"""
return shape[:axis] + (1,) + shape[axis:] | 8c786df81b76cfa5dae78b51d16b2ee302263c53 | 4,339 |
def is_numpy_convertable(v):
"""
Return whether a value is meaningfully convertable to a numpy array
via 'numpy.array'
"""
return hasattr(v, "__array__") or hasattr(v, "__array_interface__") | 163da2cf50e2172e1fc39ae8afd7c4417b02a852 | 4,341 |
from datetime import datetime
def get_fake_datetime(now: datetime):
"""Generate monkey patch class for `datetime.datetime`, whose now() and utcnow() always returns given value."""
class FakeDatetime:
"""Fake datetime.datetime class."""
@classmethod
def now(cls):
"""Return given value."""
return now
@classmethod
def utcnow(cls):
"""Return given value."""
return now
return FakeDatetime | f268640c6459f4eb88fd9fbe72acf8c9d806d3bc | 4,342 |
import os
def get_processing_info(data_path, actual_names, labels):
"""
Iterates over the downloaded data and checks which one is in our database
Returns:
files_to_process: List of file paths to videos
labs_to_process: list of same length with corresponding labels
"""
files_to_process = []
labs_to_process = []
for img_type in os.listdir(data_path):
if img_type[0] == ".":
continue
# img_type is B-lines, cardiac etc
for vid in os.listdir(os.path.join(data_path, img_type)):
# print(vid)
if vid in actual_names:
full_path = os.path.join(data_path, img_type, vid)
files_to_process.append(full_path)
ind = actual_names.index(vid)
labs_to_process.append(labels[ind])
return files_to_process, labs_to_process | 0e2ed514159bd230d9315d1b668ce8a59d36b545 | 4,343 |
def compress_pub_key(pub_key: bytes) -> bytes:
"""Convert uncompressed to compressed public key."""
if pub_key[-1] & 1:
return b"\x03" + pub_key[1:33]
return b"\x02" + pub_key[1:33] | 05824112c6e28c36171c956910810fc1d133c865 | 4,346 |
def _(text):
"""Normalize white space."""
return ' '.join(text.strip().split()) | f99f02a2fe84d3b214164e881d7891d4bfa0571d | 4,347 |
import os
def get_tmp_directory_path():
"""Get the path to the tmp dir.
Creates the tmp dir if it doesn't already exists in this file's dir.
:return: str -- abs path to the tmp dir
"""
tmp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'tmp')
if not os.path.exists(tmp_directory):
os.mkdir(tmp_directory)
return tmp_directory | b480578e6ae7a1840e8bf4acce36a63253a33d80 | 4,348 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.