content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def trace_down(all_bags, bag):
"""
For the input bag "bag", trace down the path along all of its bags that it
can hold and count them.
"""
n = 0 # number of children seen at this level
if len(bag.can_hold) == 0:
return n
for bag_type, n_can_hold in bag.can_hold.items():
child_bag = all_bags[bag_type]
for i in range(n_can_hold):
n += 1 # count once for the child at this level
n += trace_down(
all_bags, child_bag
) # counts for all children at lower levels
return n | 0eec32f12536d88da6232eb925b95d9bc8fb1a8a | 26,437 |
import requests
import hashlib
from datetime import datetime
def get_feed_hash(url):
""" Returns hash of a feed and timestamp """
fetched = requests.get(url)
m= hashlib.sha256()
m.update(fetched.text.encode())
text_hash = m.hexdigest()
ts = int(datetime.utcnow().timestamp())
return text_hash, ts | b813ffaaed3bb80d82b87633555b18f823b90ab5 | 26,439 |
from pathlib import Path
def security_result(name):
"""Load a security result file from the sample directory, and return the content"""
filename = Path(__file__).parent / 'security' / '{name}.out'.format(name=name)
with filename.open() as f:
return f.read() | 1c804fd0a711376135013a21031e26578317d728 | 26,445 |
def get_string_commas_num( num ):
"""
This is the secret-sauce of formatting integers as strings with commas for every 3 digits. For example, ``1234`` becomes "1,234". I copied code from `this location`_.
:param int num: input number.
:returns: the nicely formatted output :py:class:`string <str>` representing an input number.
:type: str
.. _`this location`: https://intellipaat.com/community/2447/how-to-print-number-with-commas-as-thousands-separators
"""
return "%s" % f"{num:,d}" | 4f49f4bb755ff012b3ca9bbe039919348a52285d | 26,446 |
def modify_range(val):
"""
Modify value from range 0,1 -> -1,1 and preserve ratio
:param val:
:return: value in rage -1,1
"""
return (val * 2) - 1 | d3604c6ed682483e1e7276b31aece4eda325ed19 | 26,456 |
def sanitize_price(price_str):
"""
Normalise a price string and convert to float value.
:param price_str: Price string to normalise.
:return: Float price value.
"""
if price_str is None:
return None
price_str = price_str.strip('$ \t\n\r')
price_str = price_str.replace(',', '')
return float(price_str) | 84af7c25c19bcbef690cc19554f01e6e284446f2 | 26,457 |
def _get_number_of_column_label(label):
"""
This function returns a number which corresponds to the label.
Example : 'A' -> 1 , 'Z' -> 26 , 'AA' -> 27 , 'BA' -> 53
Args :
label : Type-str
Denotes the label given to the column by sheets
Returns :
num : Type-int
Denotes the numbering of columns(1-indexed)
"""
num = 0
power_of_26 = 1
for i in range(len(label)-1,-1,-1):
value = ord(label[i]) - ord('A') + 1
num += power_of_26*value
power_of_26 = 26*power_of_26
return num | 578dd51f9ac397b079a8b8490837aeedf1000285 | 26,460 |
def get_recommendations_user_filtred_reduce(data):
"""
reduce for the get_recommendations_user_filtred function
"""
item, scores = data
ssim = 0
ssim_x_score = 0
for sim, sim_x_score in scores:
ssim += sim
ssim_x_score += sim_x_score
return (item, ssim, ssim_x_score) | c3abf750b2e73389cd1d146396a791eaa9e666d2 | 26,463 |
def remote_branch(stdin_first_line):
"""
Reads the name of the remote git branch from runtime parameters.
In the pre-push.py hook the name of the remote branch is passed as the $1 parameter.
:param stdin_first_line the first line of the standard input
>>> remote_branch("refs/heads/master a9d45baccd631601087a75a6605909c16bbfdbca refs/heads/master 67b6dc7a5e256ae590d305a766e627258b164899")
'master'
>>> remote_branch("refs/heads/master a9d45baccd631601087a75a6605909c16bbfdbca refs/heads/hot-fix 67b6dc7a5e256ae590d305a766e627258b164899")
'hot-fix'
"""
stdin_parts = stdin_first_line.split(" ")
remote = stdin_parts[2]
remote_parts = remote.split("/")
return remote_parts[-1] | c227bea23b19c0c3a9196003e9caf5e13d031730 | 26,467 |
def parse_prefixes(root):
"""Returns a dictionary of unit of measure prefixes."""
prefixes = {}
for node in root.find("{*}prefixSet"):
name = node.find('{*}name').text
prefixes[name] = dict(
symbol = node.find('{*}symbol').text,
multiplier = node.find('{*}multiplier').text,
)
common_name = node.find('{*}commonName')
if common_name is not None:
prefixes[name]['common_name'] = common_name.text
return prefixes | ee62160bf43f7096c6ee37f20bea78afad7522c7 | 26,468 |
import requests
import json
def get_role_description_from_galaxy_response(response: requests.Response) -> str:
"""
Extract description for a role from galaxy response
Args:
response (requests.Response): response from ansible galaxy (json body)
Returns:
str: description
"""
# Parse json response
try:
data = json.loads(response.text)
except:
data = {}
# description is in .data.repository.description
return data.get("data", {}).get("repository", {}).get("description", None) | a092da43d6f003b88f33e705da7f3e88341f08b7 | 26,471 |
def camera_name_from_calibrator(calibrator):
"""
Returns the name of the camera associated with the given camera calibrator.
"""
return calibrator.split('/')[2] | 8f35c4987f02f5eb62101ac2c9af2edcb8a4746a | 26,472 |
def urldecode(url):
"""Decode %7B/%7D to {}."""
return url.replace('%7B', '{').replace('%7D', '}') | 91e2f969e59bc68004e1696434b5b0329012342f | 26,473 |
def gearinches(chainring, cassette_range, wheel_diameter_inch=26.5):
"""Convert gear ratios into gearinches"""
return chainring / cassette_range * wheel_diameter_inch | 2f5e63713e17632a7f5f90607b1d2087e433b657 | 26,476 |
def parse_gtf_info_field(info_str):
""" Parse gtf info string into a dictionary
Args:
info_str (str): info field from gtf
Return:
{key, value for field in info}
"""
d = {}
for pair in info_str.split('; '):
key, value = pair.split(' ')
d[key] = value.strip('"')
return d | bfb94655afabea3674105884d4d9b4da7eb861f3 | 26,478 |
def average_position(pos1=(0.0, 0.0, 0.0), pos2=(0.0, 0.0, 0.0), weight=0.5):
"""
Returns the average of the two given positions. You can weight between 0 (first input) or 1 (second_input)
:param pos1: tuple, first input position
:param pos2: tuple, second input position
:param weight: float, amount to weight between the two input positions
:return: tuple
"""
return (
pos1[0] + ((pos2[0] - pos1[0]) * weight),
pos1[1] + ((pos2[1] - pos1[1]) * weight),
pos1[2] + ((pos2[2] - pos1[2]) * weight)
) | 9d2fe664267fd89ad89013fa98c6118760fab0d3 | 26,483 |
def encode_complex(data, complex_names):
""" Encodes complex data to having arbitrary complex field names.
Encodes complex `data` to have the real and imaginary field names
given in `complex_numbers`. This is needed because the field names
have to be set so that it can be written to an HDF5 file with the
right field names (HDF5 doesn't have a native complex type, so
H5T_COMPOUND have to be used).
Parameters
----------
data : arraylike
The data to encode as a complex type with the desired real and
imaginary part field names.
complex_names : tuple of 2 str
``tuple`` of the names to use (in order) for the real and
imaginary fields.
Returns
-------
d : encoded data
`data` encoded into having the specified field names for the
real and imaginary parts.
See Also
--------
decode_complex
"""
# Grab the dtype name, and convert it to the right non-complex type
# if it isn't already one.
dtype_name = data.dtype.name
if dtype_name[0:7] == 'complex':
dtype_name = 'float' + str(int(float(dtype_name[7:])/2))
# Create the new version of the data with the right field names for
# the real and complex parts. This is easy to do with putting the
# right dtype in the view function.
return data.view([(complex_names[0], dtype_name),
(complex_names[1], dtype_name)]) | ed32626096d799fb0b5c678b01df4dc7e0645239 | 26,484 |
def extract_simplices(simplex_tree):
"""Extract simplices from a gudhi simplex tree.
Parameters
----------
simplex_tree: gudhi simplex tree
Returns
-------
simplices: List of dictionaries, one per dimension d. The size of the dictionary
is the number of d-simplices. The dictionary's keys are sets (of size d
+ 1) of the 0-simplices that constitute the d-simplices. The
dictionary's values are the indexes of the simplices in the boundary
and Laplacian matrices.
"""
simplices = [dict() for _ in range(simplex_tree.dimension()+1)]
for simplex, _ in simplex_tree.get_skeleton(simplex_tree.dimension()):
k = len(simplex)
simplices[k-1][frozenset(simplex)] = len(simplices[k-1])
return simplices | 70c2dc9bf660c217449ecaeee24f15b5bfadacf7 | 26,485 |
def make_year_labels(years, yearly_data, format='%d (%s)'):
"""
Combine years with corresponding yearly data and return list of labels.
>>> make_year_labels([2005, 2006], [18, 29])
['2005 (18)', '2006 (29)']
>>> make_year_labels([2006, 2007], ['good', 'bad'], '%d was %s')
['2006 was good', '2007 was bad']
"""
return [format % (y, c) for (y, c) in zip(years, yearly_data)] | f543250b066765e211a5057ff21ed12d082b2f62 | 26,486 |
def find_rank_type(ranks):
"""Find and return the rank type of the 3 ranks given
Rank type results:
1: no particularly interesting rank order, i.e. High Card
2: pair rank
4: straight
5: three of a kind
"""
ranks.sort()
if ranks[0] == ranks[1] == ranks[2]:
return 5
elif ranks[1] == ranks[2] or ranks[0] == ranks[1] or ranks[0] == ranks[2]:
return 2
elif (ranks[0] + 1) == ranks[1] and (ranks[1] + 1) == ranks[2]:
return 4
elif 14 in ranks and 2 in ranks and 3 in ranks:
return 4
else:
return 1 | 656c93cfeebdebeba8341d87530c87f1155933bb | 26,492 |
def get_host_finding_details_hr(host_finding_detail):
"""
Prepare human readable json for "risksense-get-host-finding-detail" command.
Including basic details of host finding.
:param host_finding_detail: host finding details from response
:return: List of dict
"""
return [{
'Title': host_finding_detail.get('title', ''),
'Host Name': host_finding_detail.get('host', {}).get('hostName', ''),
'Ip Address': host_finding_detail.get('host', {}).get('ipAddress', ''),
'Source': host_finding_detail.get('source', ''),
'Network': host_finding_detail.get('network', {}).get('name', ''),
'Risk Rating': host_finding_detail.get('riskRating', '')
}, {}] | 29c70762ff844d11e140fb9a9489a6706ad8c99b | 26,495 |
def clean_layer_name(input_name: str,
strip_right_of_last_backslash: bool=True,
strip_numerics_after_underscores: bool=True):
"""
There exist cases when layer names need to be concatenated in order to create new, unique
layer names. However, the indices added to layer names designating the ith output of calling
the layer cannot occur within a layer name apart from at the end, so this utility function
removes these.
Parameters
----------
input_name: str, required
A Keras layer name.
strip_right_of_last_backslash: bool, optional, (default = True)
Should we strip anything past the last backslash in the name?
This can be useful for controlling scopes.
strip_numerics_after_underscores: bool, optional, (default = True)
If there are numerical values after an underscore at the end of the layer name,
this flag specifies whether or not to remove them.
"""
# Always strip anything after :, as these will be numerical
# counts of the number of times the layer has been called,
# which cannot be included in a layer name.
if ':' in input_name:
input_name = input_name.split(':')[0]
if '/' in input_name and strip_right_of_last_backslash:
input_name = input_name.rsplit('/', 1)[0]
if input_name.split('_')[-1].isdigit() and strip_numerics_after_underscores:
input_name = '_'.join(input_name.split('_')[:-1])
return input_name | 69c3bb315c770b58ecd6f3afd0dae95a53b2a59f | 26,498 |
def parse(argv):
"""Parse optional list of keyword arguments into a dict.
Parses a list of keyword arguments defined by a leading ``--`` and separated by ``=`` (for example, --key=value).
Args:
argv (listof str):
Keyword dict to use as an update.
Examples::
# Import the kwconfig module
import kwconfig
# Create a sample list of keyword arguments
argv = ['--key1=value1', '--key2=value2']
# Parse into a keyword dict
kwdict = kwconfig.parse(argv)
# View the values of key1 and key2
print('key1: ' + kwdict['key1'])
print('key2: ' + kwdict['key2'])
"""
kwdict = {}
for kv in argv:
k, v = kv.split('=', 1)
kwdict[k.strip('-')] = v
return(kwdict) | a82162666245b4d95f6cc439437d13a596f381ff | 26,499 |
def cut_neighbor_sequences(seq_s, flanking_i):
"""
cut the flanking sequences
:param seq_s: string, seq
:param flanking_i: size of flanking seq
:return: strings, cut (start), cut (the rest), cut (last)
"""
assert type(seq_s) is str
return seq_s[0:flanking_i], seq_s[flanking_i:-flanking_i], seq_s[-flanking_i:] | 65f9b1fb45c46e0d968533ff0e81099e0526f571 | 26,510 |
import math
def distance_l2(loc_1, loc_2):
"""
:param loc_1: a tuple (x_1, y_1)
:param loc_2: a tuple (x_2, y_2)
:return: L2 distance between loc_1 and loc_2
"""
x_difference = loc_1[0] - loc_2[0]
y_difference = loc_1[1] - loc_2[1]
return math.sqrt(x_difference*x_difference + y_difference*y_difference) | c4600f3862d491dd718fc69d0d7901f9c091b990 | 26,511 |
import hashlib
def calc_md5(content: bytes) -> str:
"""
Calc the md5 checksum for the specified bytes.
"""
return hashlib.md5(content).hexdigest() | 7cfa752840502ab07ac1d321ac504fa23584e6fd | 26,512 |
def replace(line, a, b):
"""
if line starts with string a, then
replace string a with string b in line
"""
mline = line
if line.startswith(a):
mline = line.replace(a,b)
return(mline) | 2228404e10b21b9095257e347bdd1b289d1707c2 | 26,514 |
def _has_symbol(symbol, name):
"""
Check if has provided symbol in name.
Recognizes either the _SYMBOL pattern at end of string, or _SYMBOL_ in
middle.
"""
return name.endswith('_' + symbol) or ('_' + symbol + '_') in name | cfd2fb8ba0751f7abc939ac6c84fbe8b1aa3925f | 26,521 |
import requests
def joke(context) -> str:
"""Tell a random joke"""
resp = requests.get(
"https://icanhazdadjoke.com/", headers={"Accept": "application/json"}
)
return resp.json()["joke"] | 83ec9089a7853ef95832de0408888bf458f36852 | 26,523 |
def has_duplicates(l):
"""
Returns whether a given list contains duplicate elements.
"""
seen = set()
for x in l:
if x in seen:
return True
seen.add(x)
return False | 572b64dd885cb3726176a708b656409b0f484a5e | 26,533 |
def index_structure(structure, path):
"""Follows :obj:`path` in a nested structure of objects, lists, and dicts."""
for key in path.split("/"):
if isinstance(structure, list):
try:
index = int(key)
structure = structure[index] if index < len(structure) else None
except ValueError:
raise ValueError("Expected a list index, got %s instead" % key)
elif isinstance(structure, dict):
structure = structure.get(key)
else:
structure = getattr(structure, key, None)
if structure is None:
raise ValueError("Invalid path in structure: %s" % path)
return structure | 33ef0551b0c0a142b930c1593fac0d5870289a4d | 26,539 |
from typing import Dict
def generate_options_string(options: Dict[str, str]) -> str:
"""Build the options string from the options dict."""
options_list = [
'{key} {option}'.format(key=key, option=options[key])
for key in options
]
return ' '.join(options_list) | 577105dea1dc2ec4e0012fe5dbdf546d6eefc550 | 26,541 |
import socket
def get_ip(fqdn: str):
"""Get IP Address of fqdn."""
return socket.gethostbyname(fqdn) | f6a682112071915f098c8fdd682b6400fb3c74f7 | 26,543 |
def progress_heuristic(losses):
"""
The progress heuristic: how to determine that it's time to stop CG?
There are many possible ways to address this question, and the
progerss heuristic is a pretty decent way; we look at the sequence of
losses as defined by the quadratic model. That is, losses[i] =
ip(g,x_i) -.5*ip(x_i,Ax_i). If the progress heuristic notices that
losses stops getting better relative to the size of the reduction,
it'll return True, which means that CG should stop. Otherwise it
should return false. It is copied verbatim from the original HF paper.
"""
eps = 0.0005
i = len(losses)
k = int(max(10, 0.1*i))
if len(losses) < k+1:
return False
phi_x_i = losses[-1]
phi_x_imk = losses[-1-k]
if i>k and phi_x_i<0 and (phi_x_i-phi_x_imk)/phi_x_i < k*eps:
return True
else:
return False | bba245dd7e3229786d225e58a5bbd664181d1e4a | 26,545 |
def 进制_二到十(二进制文本):
"""
将二进制转换成十进制(返回十进制整数)
"""
return int(二进制文本, base=2) | 3e2e6e55e05626599c62cab3788c00a6d3d0ae30 | 26,549 |
def read_file(filename):
"""
Fully reads a file into a UTF-8 string.
"""
with open(filename, 'r') as f:
return f.read() | 8c83f748682bb2c1857f927e7749f37175463c46 | 26,550 |
def format_fasta_filename(*args):
"""
Format a FASTA filename of the form "otu.isolate.sequence_id.fa".
:param args: the filename parts
:return: a compound FASTA filename
:rtype: str
"""
if len(args) > 3:
raise ValueError("Unexpected number of filename parts")
if len(args) == 0:
raise ValueError("At least one filename part required")
filename = ".".join(args).replace(" ", "_") + ".fa"
return filename.lower() | e0a61fa1bed49b3a1ea2e721443261903d2f5755 | 26,556 |
def is_file_wanted(f, extensions):
"""
extensions is an array of wanted file extensions
"""
is_any = any([f.lower().endswith(e) for e in extensions])
return is_any | c84250126c9700966248b969ded3121ae2c96764 | 26,558 |
def _parse_names(last_name_dict):
"""Helper function to unpack the data when grouped by last name letter
"""
big_list = []
for last_letter, people_with_last in last_name_dict.items():
for person in people_with_last:
big_list.append(person)
return big_list | 7157476a2128bd183a8fac2540c3e2d9f1812760 | 26,559 |
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
lines = []
chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock))
return '\n'.join(lines) | cf72c8b5855d8f99364891fbc416f63dc406942a | 26,560 |
from dateutil.parser import parser
from datetime import datetime
def datetime_string(string, custom_format=None):
"""
Takes a string and parses it into a datetime object with the dateutil module if present.
If not it will fall back to a more rudimentary method of using strptime with a list of
predefined formats with the option of passing in a custom format to try before the others.
The first one to parse correctly gets returned.
"""
try:
# noinspection PyUnresolvedReferences
return parser().parse(string)
except ImportError:
string = string.replace('/', '-')
formats = [
'%Y',
'%Y-%m',
'%Y-%m-%d',
'%Y-%m-%d %H',
'%Y-%m-%d %I %p'
'%Y-%m-%d %H:%M',
'%Y-%m-%d %I:%M %p'
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %I:%M:%S %p'
]
if custom_format:
formats.insert(0, custom_format)
for f in formats:
try:
return datetime.strptime(string, f)
except ValueError:
continue
raise ValueError('The string did not match any configured format') | d0ac85bb76cef4ff8585fd67de61863438afacd5 | 26,561 |
from typing import Tuple
def get_namespace_and_name_from_role(role: str) -> Tuple[str, str]:
"""
Extract namespace and name for a role.
Args:
role (str): role in the form {{role_namespace}}.{{role_name}}
Returns:
Tuple[str, str]: namespace, name
"""
# role comes in the form {{role_namespace}}.{{role_name}}, so split by .
role_data = role.split(".")
role_namespace, role_name = role_data[0], role_data[1]
return role_namespace, role_name | c4682d8457b49c12fc7bf01279f8cd5583eea13c | 26,564 |
def initialize_graph(graph, graph_info):
"""Initializes a graph according to given graph_info"""
graph.uri = graph_info.uri
graph._name = graph_info.name
return graph | 204942f44c55f5066c3c18412b4b1b71c99e2186 | 26,566 |
def _process_axes_functions(axes, axes_functions):
"""Process axes functions of the form `axes.functions(*args, **kwargs)."""
if axes_functions is None:
return None
output = None
for (func, attr) in axes_functions.items():
axes_function = getattr(axes, func)
# Simple functions (argument directly given)
if not isinstance(attr, dict):
try:
out = axes_function(*attr)
except TypeError:
out = axes_function(attr)
# More complicated functions (args and kwargs given)
else:
args = attr.get('args', [])
kwargs = attr.get('kwargs', {})
# Process 'transform' kwargs
if 'transform' in kwargs:
kwargs['transform'] = getattr(axes, kwargs['transform'])
out = axes_function(*args, **kwargs)
# Return legend if possible
if func == 'legend':
output = out
return output | 7c8d5bfcdd5756a9c0e02fe7010748c961b7ecb5 | 26,568 |
import base64
def get_headers(username, password):
"""Formats the header to contain username and password variables """
login_string = "{}:{}".format(username, password)
# Python 2.7 and 3.6 support
base64_login = base64.b64encode(str.encode(login_string))
str_base64_login = base64_login.decode("utf-8")
return {
"Authorization": "Basic {}".format(str_base64_login),
"Content-Type": "application/x-www-form-urlencoded",
"Cache-Control": "no-cache"
} | 983c8f57c393b51bceae4d941cb620d6713b5650 | 26,571 |
def parse_request(request, listOfSelectedParameter):
"""
-> Parse result of a request and return only the paramaters
present in listOfSelectedParameter.
-> Request is a dict generated by a search() operation from
the tinyDB package
-> listOfSelectedParameter is a list of selected parameters
-> return a list of dict, each dict is a vector with paramaters
present in listOfSelectedParameter.
"""
structureToReturn = []
for patient in request:
vectorToReturn = {}
for parameter in listOfSelectedParameter:
vectorToReturn[parameter] = patient[parameter]
structureToReturn.append(vectorToReturn)
return structureToReturn | fd095c7ea7a964a70786e4ee8b827b5bc5764602 | 26,572 |
def get_rpi_hostname(self):
"""
Returns hostname from the Pi
:returns: String containing hostname of the Pi
"""
hostname = self.run_command_get_output('hostname').replace("\n", "")
return hostname | 5ea2bc35952974b65a5e74fb4f7bb4015c50f500 | 26,574 |
def get_valid_and_invalid_lists(in_list, checker):
"""
Take a list of strings and return two lists valid_pwds, invalid_pwds
based on checker.
signature of checker(password, left, right, in_char)
"""
valid_pwds = []
invalid_pwds = []
for line in in_list:
rule, password = line.split(":")
config, in_char = rule.split(" ")
left, right = config.split("-")
left = int(left)
right = int(right)
password = password.strip()
in_char = in_char.strip()
is_valid = checker(password, left, right, in_char)
if is_valid:
valid_pwds.append(password)
else:
invalid_pwds.append(password)
return valid_pwds, invalid_pwds | 6e500e7357957028d8c8161be1234c2f4b7fa02e | 26,578 |
import math
def count_digits(n):
"""
Count digits in integer
"""
if n > 0:
digits = int(math.log10(n)) + 1
elif n == 0:
digits = 1
else:
digits = int(math.log10(-n)) + 1
return digits | 03e5f041e096f2137153418f99349f3e8e844d41 | 26,589 |
def reorder_column_list(column_list_to_reorder, reference_column_list):
"""Keep the target list in same order as the training dataset, for consistency of forecasted columns order"""
reordered_list = []
for column_name in reference_column_list:
if column_name in column_list_to_reorder:
reordered_list.append(column_name)
return reordered_list | 018f05099d662d399e8a8f7bd8308fa4ff355c94 | 26,590 |
import pickle
def message_encode(msg):
"""
Encodes the message object.
This method compresses the message payload and then serializes the whole
message object into bytes, using pickle.
Args:
msg: the message to encode.
"""
msg.payload.encode()
return pickle.dumps(msg) | 5064c58c5681ca5da93f9caa87ff1a936002f5da | 26,592 |
async def override_record(record, existing_record, overrides):
"""
Removes fields from record if user has overriden them on airtable.
Args:
record (``dictionary``): Record from which fields will be removed if overwritten.
existing_record (``dictionary``): Record to check for overrides.
overrides (``list``): List of dictionaries
Each dictionary is composed of two items: 1. The override checkbox field name, 2. The override field name
{"ref_field": "field name", "override_field": "field name"}
Return:
record.
""" # noqa
for override in overrides:
ref_field = override.get("ref_field")
override_field = override.get("override_field")
if existing_record["fields"].get(ref_field):
record["fields"][override_field] = existing_record["fields"][
override_field
]
return record | 6a8a1d68985ed3adb044e3a1d5370d7fddf6c2e6 | 26,593 |
def differentiate_polynomial(coefficients):
"""
Calculates the derivative of a polynomial and returns
the corresponding coefficients.
"""
new_coeffs = []
for deg, prev_coef in enumerate(coefficients[1:]):
new_coeffs.append((deg + 1) * prev_coef)
return new_coeffs | dcc422e9acae53a8162f45ff78ceb7084dedf6f0 | 26,602 |
async def hello_world():
"""Hello world endpoint for testing if FastAPI works properly"""
return {"message": "Hello World, E!"} | 2ed1e49952064209a0b5b5cf7184d82bbe44b1b2 | 26,609 |
def get_extent(geotransform, cols, rows):
"""
Return list of corner coordinates from a geotransform
From Metageta and http://gis.stackexchange.com/a/57837/2910
@type geotransform: C{tuple/list}
@param geotransform: geotransform
@type cols: C{int}
@param cols: number of columns in the dataset
@type rows: C{int}
@param rows: number of rows in the dataset
@rtype: C{[float,...,float]}
@return: coordinates of each corner
"""
ext = []
xarr = [0, cols]
yarr = [0, rows]
for px in xarr:
for py in yarr:
x = geotransform[0] + (px * geotransform[1]) + (py * geotransform[2])
y = geotransform[3] + (px * geotransform[4]) + (py * geotransform[5])
ext.append([x, y])
yarr.reverse()
return ext | 56bd7ae9e78f8892e37918c03ce6c7f3976f612d | 26,610 |
def GetQueryFields(referenced_fields, prefix):
"""Returns the comma separated list of field names referenced by the command.
Args:
referenced_fields: A list of field names referenced by the format and filter
expressions.
prefix: The referenced field name resource prefix.
Returns:
The comma separated list of field names referenced by the command.
"""
if not referenced_fields:
return None
return ','.join(['nextPageToken'] +
['.'.join([prefix, field]) for field in referenced_fields]) | 40d928cb6b07fcc66fe257306e4d6b50753f7d7b | 26,612 |
def qfactor_dielectric(tand):
"""Calculate Q-factor due to dielectric filling.
Args:
tand: loss tangent
Returns:
Q-factor
"""
return 1 / tand | c4ef6818deb8b6617657eec38b020d4b73a7d905 | 26,613 |
def flatten(texts):
"""
Flattens list of lists
params:
texts: list of lists
return: flattened list
"""
flattened_list = [item for items in texts for item in items]
return flattened_list | a4427d4389e44d600d1b81fcba3609ee8ea4b14b | 26,615 |
def _make_retry_timeout_kwargs(retry, timeout):
"""Helper for methods taking optional retry / timout args."""
kwargs = {}
if retry is not None:
kwargs["retry"] = retry
if timeout is not None:
kwargs["timeout"] = timeout
return kwargs | 15e106b7da47b4b23d7406cffd573b75805880b9 | 26,618 |
def convert_units(distance, input_units, output_units):
"""Convert units of distance (miles, meters, and kilometers)."""
conversion_factors = {
'miles': {
'miles': 1.0,
'meters': 1609.34,
'kilometers': 1.60934
},
'kilometers': {
'kilometers': 1.0,
'meters': 1000.0,
'miles': 0.621371,
},
'meters': {
'meters': 1.0,
'miles': 0.000621371,
'kilometers': 0.001
}
}
allowed_units = conversion_factors.keys()
if not all(x in allowed_units for x in (input_units, output_units)):
raise ValueError('Invalid units provided. Should use "miles", '
'"kilometers", or "meters".')
return distance*conversion_factors[input_units][output_units] | ff46176552bb1ba06694bd9280e0e45b9137d2ed | 26,624 |
def safe_get_from_list(data,attr,default_value):
"""
Returns data['attr'] if attr is in data, else returns default_value
"""
return data[attr] if attr in data.keys() else default_value | 509714de5ced48df33539d5c31db633965beff6e | 26,634 |
def compute_helpful_vars(job_dict, dirs):
"""
Helper function, that adds what (I see as) helpful variables to
your job dictionary.
:param job_dict: a dictionary representing a row. for example, if
you had a csv file with rows [sub,ses,task,run,order_id], and also
defined globals [conda_env_path, matlab_path], you would get a dict
{
sub: NIDA2322 ses: 1, task: "rest", run: 2, order_id:5,
conda_env_path:"/project2/conda/myenv", matlab_path:"/usr/bin/matlab"
}
:param dirs: output of ..utils.io:calculate_directories()
:return: augmented job_dict
"""
job_dict["job_id"] = "%05d" % job_dict["order_id"]
# for bids
if "run" in job_dict.keys():
job_dict["run_id"] = "%02d" % job_dict["run"]
return job_dict | 523f77b8882dc19ce184a1674ad83ab5836bc556 | 26,636 |
def to_tuple(tensor):
"""
Convert tensor to tuple.
Args:
tensor (torch.Tensor): any tensor
Returns:
tup (tuple): tuple form
"""
tup = tuple(tensor.cpu().tolist())
return tup | 07dc7e0e11f86331f31fbca8b9cb43b2a36a3846 | 26,639 |
def well_type_from_position(df):
"""Assign the WellType from the position on the plate.
Controls are in column 11 and 12"""
result = df.copy()
result["WellType"] = "Compound"
result["WellType"][(result["plateColumn"] == 11) | (result["plateColumn"] == 12)] = "Control"
return result | e89bc4f3e05947ef977c46daf3d0f1ae6bb402a7 | 26,642 |
def t_name(key):
"""
Rename the feature keys so that they don't clash with the raw keys when
running the Evaluator component.
Args:
key: The original feature key
Returns:
key with '_xf' appended
"""
return key + '_xf' | 04348282ee3e3139cb6ce9f2d66cd821afbfa9fa | 26,645 |
from typing import List
from typing import Dict
def get_index_of_the_arline_sorted_list(airport_sorted_by_nb_of_flights: List[str]) -> Dict:
"""
Create a dictionnary of the flight arranged in alphabetical order with their index as value to keep the right order
of the airport sorted by number of flight.\n
:param airport_sorted_by_nb_of_flights: list of airport sorted by number of flight\n
:return:a dictionnary of the airport with their index as value to keep the right order
"""
sorterIndex = dict(zip(airport_sorted_by_nb_of_flights, range(len(airport_sorted_by_nb_of_flights))))
return sorterIndex | 004355204a04d053b165569294c513261074f704 | 26,651 |
import math
def hsv_to_rgb(h,s,v):
""" Convert H,S,V values to RGB values.
Python implementation of hsvToRgb in src/neuroglancer/util/colorspace.ts """
h*=6
hue_index = math.floor(h)
remainder = h - hue_index
val1 = v*(1-s)
val2 = v*(1-(s*remainder))
val3 = v*(1-(s*(1-remainder)))
hue_remainder = hue_index % 6
if hue_remainder == 0:
return (v,val3,val1)
elif hue_remainder == 1:
return (val2,v,val1)
elif hue_remainder == 2:
return (val1,v,val3)
elif hue_remainder == 3:
return (val1,val2,v)
elif hue_remainder == 4:
return (val3,val1,v)
elif hue_remainder == 5:
return (v,val1,val2) | 3795f6bde05f181489be7f7750fdbd1a4886dffd | 26,652 |
def kron_delta(a:float, b:float) -> int:
"""Cannonical Kronecker Delta function.
Returns 1 if inputs are equal returns 0 otherwise.
Args:
(:obj:`float`): First input argument
(:obj:`float`): Second input argument
Returns:
(:obj:`int`) Kronecker delta result {0, 1}
"""
if a == b:
return 1
else:
return 0 | 0e7bad79e230740cf35a614dbb21cac90e1cae0c | 26,653 |
def rectangle_area(a,b):
"""Calculate rectangle area"""
return a*b | 60bfd42f38b489ba04c48badbff8fec717ecc53b | 26,655 |
import torch
def cross_entropy(y_hat, y):
"""
Compute the cross entropy as the loss of some nets
Parameters
----------
y_hat : [tensor]
the prediction value
y : [tensor]
the real labels
Returns
-------
[tensor]
the vector of every cross entropy loss. They are negetive to be optimized
"""
# here we use gather() to get the according prediction values
# then we compute the log of the tensor to satisfy the cross entropy's need
return -torch.log(y_hat.gather(1, y.view(-1, 1))) | 622fb9a525c3f99eba0bc3e03a2f4a290f6a36df | 26,656 |
def unique_fname(full_path: str) -> str:
"""Get unique file name for given full path to MELD data file. The return format is '[dialog]_[utterance]'.
:param full_path: full path to MELD .mp4 data file
:return: unique id of data file (only unique within dataset directory)
"""
fname = full_path.split('/')[-1].split('.')[0]
return fname.replace('dia', '').replace('utt', '').replace('final_videos_test', '') | 29bffc8a2028ac126709fe17d9e2e1d2914bf769 | 26,658 |
import struct
def read_lc_int(buf):
"""
Takes a buffer and reads an length code string from the start.
Returns a tuple with buffer less the integer and the integer read.
"""
if not buf:
raise ValueError("Empty buffer.")
lcbyte = buf[0]
if lcbyte == 251:
return (buf[1:], None)
elif lcbyte < 251:
return (buf[1:], int(lcbyte))
elif lcbyte == 252:
return (buf[3:], struct.unpack('<xH', buf[0:3])[0])
elif lcbyte == 253:
return (buf[4:], struct.unpack('<I', buf[1:4] + b'\x00')[0])
elif lcbyte == 254:
return (buf[9:], struct.unpack('<xQ', buf[0:9])[0])
else:
raise ValueError("Failed reading length encoded integer") | fcc1b0fe4cfd8186537700ee124e849473b1fd07 | 26,668 |
def prefixM(prefix, s):
"""为字符串添加前缀,对于多行字符串,每行都添加前缀"""
if '\n' not in s:
return prefix+s
ret = ''
lines = s.split('\n')
lineCount = len(lines)
for index in range(lineCount):
ret += prefix
ret += lines[index]
if index != lineCount-1: # 最后一行不要添加换行符
ret += '\n'
return ret | 135043bb3d3f26afe6393a8b6b005c1e4508b1a4 | 26,670 |
def saddle_points(matrix):
"""
Find saddle points in a matrix
:param matrix list - A list of rows containing values.
:return list - A list containing dictionary(ies) indicating
where the saddle point(s) in the matrix are.
It's called a "saddle point" because it is greater than or
equal to every element in its row and less than or equal
to every element in its column.
A matrix may have zero or more saddle points.
The matrix can have a different number of rows and columns (Non square).
"""
points = []
rows_num = len(matrix)
if rows_num > 0:
colums_num = len(matrix[0])
# travese the row, and find highest value
for current_row_index, row in enumerate(matrix):
if len(row) != colums_num:
raise ValueError(f"Irregular matrix, row {current_row_index + 1} "
f"has {len(row)} columns instead of expected {colums_num}.")
max_value = max(row)
# for cases where the highest value occurs in multiple colums, iterate
max_value_count = row.count(max_value)
next_index = 0
while max_value_count > 0:
# Given the column index for candidate (highest value in row)
# Find out if it's the lowest in the column
col_index = row.index(max_value, next_index)
next_index = col_index + 1
max_value_count -= 1
is_lowest_in_col = True
for row_index in range(0, rows_num):
# skip 'current' row
if row_index == current_row_index:
continue
# check to make sure col exists in row
if len(matrix[row_index]) - 1 < col_index:
raise ValueError(f"Irregular matrix, row {row_index} is missing column {col_index}")
#continue
value = matrix[row_index][col_index]
if value < max_value:
# we found a value in the col that's less than the candidate
# so it's not a saddle in this column
is_lowest_in_col = False
break
if is_lowest_in_col:
# current_row_index and col_index start at 0, so up 1
points.append({"row": current_row_index + 1, "column": col_index + 1})
return points | d1cd547f2026f3529bea27003b4172ce13e73c8f | 26,672 |
def Cross(a,b):
"""Cross returns the cross product of 2 vectors of length 3, or a zero vector if the vectors are not both length 3."""
assert len(a) == len(b) == 3, "Cross was given a vector whose length is not 3. a: %s b: %s" % (a, b)
c = [a[1]*b[2] - a[2]*b[1], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0]]
return c | 1e4af47c6e6a1d9ab9572d0618b5c6b9e27829be | 26,675 |
from typing import Iterable
from typing import Dict
def get_substr_frequency(string: str, substrings: Iterable[str]) -> Dict[str, int]:
"""Get dictionary with frequencies (vals) of substrings (keys) in given string."""
return {s: string.count(s) for s in substrings} | 370e0d7fcaef7efa3ca21a80519b9f36934e23a7 | 26,680 |
import torch
def poolfeat(input, prob, avg = True):
""" A function to aggregate superpixel features from pixel features
Args:
input (tensor): input feature tensor.
prob (tensor): one-hot superpixel segmentation.
avg (bool, optional): average or sum the pixel features to get superpixel features
Returns:
cluster_feat (tensor): the superpixel features
Shape:
input: (B, C, H, W)
prob: (B, N, H, W)
cluster_feat: (B, N, C)
"""
B, C, H, W = input.shape
B, N, H, W = prob.shape
prob_flat = prob.view(B, N, -1)
input_flat = input.view(B, C, -1)
cluster_feat = torch.matmul(prob_flat, input_flat.permute(0, 2, 1))
if avg:
cluster_sum = torch.sum(prob_flat, dim = -1).view(B, N , 1)
cluster_feat = cluster_feat / (cluster_sum + 1e-8)
return cluster_feat | 0bcc1dd7b449997491e1e25378ae09eb8f3d0592 | 26,683 |
import hashlib
def calculate_identicon(user_id: str) -> str:
"""
Calculate an identicon hash string based on a user name.
:param user_id: the user name
:return: an identicon string
"""
return hashlib.sha256(user_id.encode()).hexdigest() | e22e817da8a38ab289e4c623f8cbcba370317223 | 26,685 |
def fold_fortran_code(content, width=79):
"""Simplistic fold to n columns, breaking at whitespace.
Fortran line continuation (&) with a six-space following indent
are used where necessary.
"""
lines = content.split(sep="\n")
result = ""
for input_line in lines:
words = input_line.split()
output_line = ""
l = 0 # current length of output line
for w in words:
# 3 = space before w, space after w, &
if l + len(w) < width - 3:
if len(output_line):
addme = " %s" % (w)
else:
addme = w
output_line += addme
l += len(addme)
else:
if len(output_line):
output_line += " &\n" # Fortran line continuation...
result += output_line
output_line = 6*" " + w # ...and indent
else:
output_line = w
l = len(output_line)
result += (output_line + "\n")
return result | 17c7288e412fc9567a9bec1b4c1e740145cf27b7 | 26,689 |
import shutil
def command_exists(command: str) -> bool:
"""
Checks whether some external utility is installed and accessible to this script.
Args:
command: The name of the binary/command to look for. It must be a single name; arguments and shell command lines
are not accepted.
Returns:
True if the command exists and is accessible (as per `which`).
"""
return shutil.which(command) is not None | f9160163289f75af6a602641fc357addf0fc18bc | 26,690 |
from typing import Sequence
from typing import Iterator
def chunks(elements: Sequence, size, lazy=False) -> Iterator[Sequence] | list[Sequence]:
"""Create successive n-sized chunks from elements."""
generator_ = (elements[i:i + size] for i in range(0, len(elements), size))
if lazy:
return generator_
else:
return list(generator_) | 1112ebd4868fe5efb6f444a527ced436e4d59f1b | 26,697 |
def compute_sliced_len(slc, sequence_len):
"""
Compute length of sliced object.
Parameters
----------
slc : slice
Slice object.
sequence_len : int
Length of sequence, to which slice will be applied.
Returns
-------
int
Length of object after applying slice object on it.
"""
# This will translate slice to a range, from which we can retrieve length
return len(range(*slc.indices(sequence_len))) | 57b330de7bb7a54d2d6331a72f0a88e005c83d10 | 26,700 |
def _get_json_schema_node_id(fully_qualified_name: str) -> str:
"""Returns the reference id (i.e. HTML fragment id) for a schema."""
return 'json-%s' % (fully_qualified_name,) | eafdacc1e7c4f2feabcd5b486fb264d33332266d | 26,701 |
def create_reverse_complement(input_sequence):
"""
Given an input sequence, returns its reverse complement.
"""
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
bases = list(input_sequence)
bases = reversed([complement.get(base, base) for base in bases])
bases = ''.join(bases)
return bases | 6d33d449f17bcfec59f762c7d1f1e3b86ea8a975 | 26,704 |
def truncate(string, max_bytes):
"""
Truncates a string to no longer than the specified number of bytes.
>>> truncate('foobar', 8)
'foobar'
>>> truncate('hello', 5)
'hello'
Lob off "partial" words, where practical:
>>> truncate('lorem ipsum dolor sit amet', 21)
'lorem ipsum […]'
>>> truncate('lorem ipsum dolor sit amet', 22)
'lorem ipsum […]'
>>> truncate('lorem ipsum dolor sit amet', 23)
'lorem ipsum dolor […]'
Otherwise, break apart the word:
>>> truncate('howdeedoodeethere', 11)
'howdee[…]'
Note that ``max_bytes`` must be ≥ what's required to return the worst-case truncation:
>>> truncate('hello world', 5)
'[…]'
>>> truncate('hello world', 4)
Traceback (most recent call last):
...
AssertionError: max_bytes ≱ 5
"""
# These should really be constants, but meh…
ellipsis = '[…]'
space = ' '
ellipsis_bytes = len(ellipsis.encode())
max_bytes_available_when_truncated = max_bytes - ellipsis_bytes
assert max_bytes_available_when_truncated >= 0, 'max_bytes ≱ {0:d}'.format(ellipsis_bytes)
# If we're within budget, brill…
if len(string.encode()) <= max_bytes:
return string
# Cut things down to size. If we snip across a multibyte character, we've asked the decoder to turn a blind eye…
string = string.encode()[:max_bytes_available_when_truncated].decode(errors='ignore')
# If the string (is non-empty and) ends with a "partial" word, then lob that off…
if string and (not string[-1].isspace()):
split = string.rsplit(maxsplit=1)
if len(split) == 2:
string = split[0] + space
# Finally, tack on the ellipsis, and call it a day…
truncated_string = string + ellipsis
assert len(truncated_string.encode()) <= max_bytes
return truncated_string | 67c6fd0c0b4ec709ce0d38ae010aed888ce9d11c | 26,705 |
def _merge_str(l):
"""Concatenate consecutive strings in a list of nodes."""
out = []
for node in l:
if (out and isinstance(out[-1], str) and
isinstance(node, str)):
out[-1] += node
else:
out.append(node)
return out | d83de96151ad10576d65866b2f38f38d839ba99f | 26,715 |
def has_gendered_pronouns(doc):
"""
Doc-level spaCy attribute getter, which returns True if
there are any pronouns (tag_ "PRP" or "PRP$") in the Doc
with a "m" or "f" gender.
"""
pronoun_genders = [token._.gender for token in doc if token.tag_ in ["PRP", "PRP$"]]
has_gendered_pronoun = any([g in ["m", "f"] for g in pronoun_genders])
return has_gendered_pronoun | 2b58db4fb972766502ca94e17140946ddd51467e | 26,717 |
def int_to_char(int_):
"""Return an ascii character in byte string form for a given int"""
return bytes([int_]) | 58ea0118590caa730746540761dc6c4bed42b630 | 26,719 |
from typing import List
from typing import Callable
from typing import Any
def decorate(decorators: List[Callable[..., Any]]) -> Callable[..., Any]:
"""Use this decorator function to apply a list of decorators to a function.
Useful when sharing a common group of decorators among functions.
The original use case is with click decorators (see: https://github.com/pallets/click/issues/108)
"""
def func_with_shared_decorators(func: Callable[..., Any]) -> Callable[..., Any]:
for option in reversed(decorators):
func = option(func)
return func
return func_with_shared_decorators | 2f31acdd75067a8943509c98243c855399092109 | 26,722 |
from typing import Deque
def json_path(absolute_path: Deque[str]):
"""Flatten a data path to a dot delimited string.
:param absolute_path: The path
:returns: The dot delimited string
"""
path = "$"
for elem in absolute_path:
if isinstance(elem, int):
path += "[" + str(elem) + "]"
else:
path += "." + elem
return path | 7c61f784fa269925e42ac5f1cc3de9e2c55b9718 | 26,725 |
def getbytes(obj):
"""Converts an object to bytes.
- If the object is None an empty byte string is returned.
- If the object is a byte string, it is returned.
- If the object is a str, the value of `str.encode('utf-8')` is returned.
- If the object is a memoryview, the value of `memoryview.tobytes()` is returned.
- If the object is a bytearray, the value of `bytes(bytearray)` is returned.
- If the object is an int, the value of `bytes([int])` is returned.
Raises:
TypeError: The object could not be converted to a byte.
ValueError: The object is an integer that can not be represented with a single byte.
"""
if obj is None:
return b''
elif isinstance(obj, bytes):
return obj
elif isinstance(obj, str):
return obj.encode('utf-8')
elif isinstance(obj, memoryview):
return obj.tobytes()
elif isinstance(obj, bytearray):
return bytes(obj)
elif isinstance(obj, int):
if 0 <= obj <= 255:
return bytes((obj,))
raise ValueError(f'{obj} can not be represented with a single byte')
raise TypeError(f'Expected a str, int or bytes-like object, got {type(obj).__name__}') | 6f44bcbb31fa9c1b0af812932ada08acf2f3cbfe | 26,726 |
def composite(vol, cmr):
""" Ranks securities in a composite fashion.
Parameters:
- `vol` : :class:`dict` volatility portfolio.
- `cmr` : :class:`dict` momentum portfolio.
.. note::
at this point, the same tickers are present in both portfolios. Their
ranking only is different.
The function builds a :class:`dict` with the tickers and set their score
to zero; sample {'ticker': 0}. Then it adds to the ticker score their index
in volatility and momentum portfolio.
The tickers are then sorted ascendingly, after having been transformed into
a :class:`tuple`.
Returns a :class:`dict` containing tickers and their score.
"""
vector = {} # used to store tickers indexes
v_sort = [] # to store ranked tickers
composite = {} # to store the return of the function
# populates a dict with all the tickers and attributes them a score of 0
for item in vol.keys():
vector[item] = 0
for i, j in enumerate(vol.keys()): vector[j] += i
for i, j in enumerate(cmr.keys()): vector[j] += i
# translates to tuple to sort
for item in vector.keys():
v_sort.append((item, vector[item]))
v_sort.sort(key = lambda x: x[1])
# back to dict
for item in v_sort:
composite[item[0]] = item[1]
return composite | e6e74c1f53477b8200b777e749cf556dc981c51e | 26,729 |
def get_input_mode(mode_description_ls, input_tips=None):
"""
获取输入,并根据 mode_description_ls 返回选择的 mode
:param mode_description_ls: 第一个item为默认模式 [[mode, description], ……]
description 为对应的输入
:param input_tips: 不设定时将根据 mode_description_ls 自动生成
"""
description_mode_dict = {str(description): str(mode) for mode, description in mode_description_ls}
if not input_tips:
input_tips = "pls choose %s(%s|default)" % (mode_description_ls[0][0], mode_description_ls[0][1])
for mode, description in mode_description_ls[1:]:
input_tips += "/%s(%s)" % (mode, description)
input_tips += " mode:\n"
while True:
input_str = input(input_tips)
try:
if not input_str: # 输入""使用默认值
mode = str(mode_description_ls[0][0])
else:
mode = description_mode_dict.get(input_str, None)
if mode is None:
print("ERR:input value exceeds range!")
continue
print("Mode %s is selected" % mode)
break
except Exception as e:
print(Exception, ":", e)
return mode | e78209bc3119308af9040e854597f1f7c56b601a | 26,731 |
def find_namespace_vars_usages(analysis, namespace_usage):
"""
Returns usages of Vars from namespace_usage.
It's useful when you want to see Vars (from namespace_usage) being used in your namespace.
"""
usages = []
for var_qualified_name, var_usages in analysis.get("vindex_usages", {}).items():
namespace, _ = var_qualified_name
if namespace == namespace_usage.get("to"):
usages.extend(var_usages)
return usages | 8d70b838c18e71e9483d7ccc18c7e70e2ba2a1f6 | 26,737 |
def f_Wint(a, b, c, dh_IS, m_max):
"""
y-intercept of Willans Line
"""
return (c/a)*(m_max*dh_IS - b) | 488c133b973f312359602f41edcb70a487d733d1 | 26,740 |
import torch
def linreg(X, w, b):
"""
Return the matrix multiply result of X*w+b
Parameters
----------
X : [tensor]
the variablies of the question
w : [tensor]
the weight of this linear reg
b : [tensor]
the bias of this linear reg
Returns
-------
[tensor]
the matrix multiply result of X*w+b
"""
# mm means matrix multiply
return torch.mm(X, w)+b | 311cd5636977c608986d536c585e9c50a8a32557 | 26,743 |
import json
def load_dexpreopt_configs(configs):
"""Load dexpreopt.config files and map module names to library names."""
module_to_libname = {}
if configs is None:
configs = []
for config in configs:
with open(config, 'r') as f:
contents = json.load(f)
module_to_libname[contents['Name']] = contents['ProvidesUsesLibrary']
return module_to_libname | b3a8763ee182fa7e9da968404369933e494663b5 | 26,746 |
from typing import Any
from typing import Callable
def cast_field(field_value: Any, column_type: Callable) -> Any:
"""
Returns the casted field value according to the DATATYPE_MAPPING above
:param field_value: value of the field (Any)
:param column_type: class constructor / function that casts the datatype to the correct type (Callable)
:return: (Any)
"""
if field_value is None:
return field_value
return column_type(field_value) | d2aa57bd593f9bc992e3f1d51af62d1077ccda44 | 26,748 |
def hit_location_cmp(hit1, hit2):
"""
Is the location of hit1 before the location of hit2? Used to sort hits.
"""
diff = hit1.location.start() - hit2.location.start()
return 0 != diff and diff or hit1.location.end() - hit2.location.end() | 938c9c5e8d8e00afd24b32eaeaa847300268e61d | 26,749 |
def match(freq1, freq2):
"""
Due to noise considerations, consider frequencies with difference
less than 20Hz as equal.
"""
return abs(freq1 - freq2) < 20 | ef5d023f8ca9c69e1ee2a2f17387ed825183d94c | 26,752 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.