content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Union
def format_numbers_consistently(number: Union[int, float]) -> Union[int, float]:
"""
Formats numeric values in a consistent way.
Prevents inconsistencies with how numbers are formatted (e.g. '12.0' as '12')
:type number: float or int
:param number: numeric value to format
:rtype float or int
:return number as an integer if applicable, otherwise float
"""
number = float(number)
if number.is_integer():
number = int(number)
return number | ebf7acdca53ac331ac7a5e1e8ba2ee416cc7112b | 700,353 |
import re
def extract_date_from_date_time(date_time: str) -> str:
"""
Given a date in format YYYY-MM-DDTHH:MM:SS, extract the date
part (i.e. YYYY-MM-DD)
:param date_time : str (DATETIME_FORMAT)
:return str
a date in DATE_FORMAT
"""
assert type(date_time) == str, "date_time must be of type str. Got %s" % str(type(date_time))
match = re.findall("\d{4}-\d{2}-\d{2}", date_time)
if len(match) == 1:
return match[0]
raise ValueError("Invalid date_time input given. Got (%s)" % date_time) | 4727273615fa38a48eace841c4ff8760ab10d08e | 700,354 |
from datetime import datetime
def dt_to_dec(dt):
"""Convert a datetime to decimal year."""
year_start = datetime(dt.year, 1, 1)
year_end = year_start.replace(year=dt.year+1)
return dt.year + ((dt - year_start).total_seconds() / # seconds so far
float((year_end - year_start).total_seconds())) | 0841f21c245b0f3a2a1404c7c8c5bff9a26aae21 | 700,360 |
def enhanceEntries(entriesList, feedId, feedName):
"""
Add Id of feed to each entry so that we only need the item, which then
contains all information that we need
Parameters
----------
entriesList : list
A List of RSSEntries (FeedParserDicts)
feedId : string
The URL of the source feed
feedName : string
The clear text name of the source
Returns
-------
entriesList : dict
The enhanced entriesList
"""
for entry in entriesList:
entry["source"]=feedId
entry["feed_name"]=feedName
return entriesList | db4bf6a82ca4fe41ee0797d361b962126836a7b8 | 700,361 |
import random
import string
def get_unique_key(str_len: int) -> str:
"""Returns a random string of length str_len.
Args:
str_len (int): Length of string.
Returns:
str: Random string
"""
return "".join([random.choice(string.ascii_lowercase) for _ in range(str_len)]) | de8a7743ac5b802e3fe5687e4305ec14e26987a8 | 700,367 |
from typing import OrderedDict
def sort_by_key(value, reverse=False):
"""
sorts a dictionary by its keys.
:param dict value: dict to be sorted.
:param bool reverse: sort by descending order.
defaults to False if not provided.
:rtype: OrderedDict
"""
result = sorted(value.items(), key=lambda x: x[0], reverse=reverse)
return OrderedDict(result) | df200eaf2810e04281e8f8379ded09f21ed6df74 | 700,369 |
import redis
def connect_redis(dsn):
"""
Return the redis connection
:param dsn: The dsn url
:return: Redis
"""
return redis.StrictRedis.from_url(url=dsn) | 0b17418c36cd9a6eb5c0b4ea40a40762c1e41259 | 700,375 |
def _tag_depth(path, depth=None):
"""Add depth tag to path."""
# All paths must start at the root
if not path or path[0] != '/':
raise ValueError("Path must start with /!")
if depth is None:
depth = path.count('/')
return "{}{}".format(depth, path).encode('utf-8') | 6746ce97ac2569e2775cbdc13510263df0860deb | 700,376 |
def calc_sl_price(contract_price, sl_percent):
"""Returns price that is sl_percent below the contract price"""
return round(contract_price * (1 - sl_percent), 2) | 487690208c81dcc708b526630929f1ba424b0c0f | 700,377 |
import hashlib
def verifyhash_password_hashlib(password: str, hash: str) -> bool:
"""check if password is correct using hashlib
Args:
password (str): user password
hash (str): user password encrypted
Returns:
bool: True if correct password else False
"""
return hashlib.md5(password.encode('utf-8')).hexdigest() == hash | a3ca663abc33777df4f33d0483ba598cef8c5c3b | 700,379 |
def get_bonds(input_group):
"""Utility function to get indices (in pairs) of the bonds."""
out_list = []
for i in range(len(input_group.bond_order_list)):
out_list.append((input_group.bond_atom_list[i * 2], input_group.bond_atom_list[i * 2 + 1],))
return out_list | 4f39d9d588a1d3e919fcd5e369cd72c6dbac3442 | 700,381 |
def read_data(file, delimiter="\n"):
"""Read the data from a file and return a list"""
with open(file, 'rt', encoding="utf-8") as f:
data = f.read()
return data.split(delimiter) | e0854a2f7ac2190f3b296725b36da0bb4ad14ce3 | 700,383 |
def get_sources(dataframe):
"""
extract sources
:param pandas.core.frame.DataFrame dataframe:
:rtype: set
:return: set of archive.org links
"""
sources = set()
for index, row in dataframe.iterrows():
sources.update(row['incident_sources'].keys())
return sources | 468c0cf6428833c9b05c06415917a516471189a5 | 700,385 |
from datetime import datetime
def set_mediafile_attrs(mediafile, ufile, data, user):
"""
Copy metadata from uploaded file into Model
"""
mediafile.name = ufile.name
mediafile.original_filename = ufile.name
mediafile.filesize = ufile.size
mediafile.original_path = data['pathinfo0']
# Date format from jupload is "dd/MM/yyyy HH:mm:ss"
filedate = datetime.strptime(data['filemodificationdate0'],
"%d/%m/%Y %H:%M:%S")
mediafile.original_filedate = filedate
mediafile.md5sum = data['md5sum0']
mediafile.mime_type = data['mimetype0']
mediafile.uploaded_by = user
return mediafile | e7c373dadb3cd0087184fc725fb749e1a13e0b57 | 700,386 |
import ipaddress
def is_valid_ipv6_address(ip):
"""Return True if valid ipv6 address """
try:
ipaddress.IPv6Address(ip)
return True
except ipaddress.AddressValueError:
return False | 33f4785e768f5117c6fe43c320e2290791fc86a5 | 700,387 |
def get_list_view_name(model):
"""
Return list view name for model.
"""
return '{}-list'.format(
model._meta.object_name.lower()
) | 765f4b2456d319a6cc5657dbe1e04d3eab471b42 | 700,389 |
import copy
def render_plugins(plugins, context, placeholder, processors=None):
"""
Renders a collection of plugins with the given context, using the appropriate processors
for a given placeholder name, and returns a list containing a "rendered content" string
for each plugin.
This is the main plugin rendering utility function, use this function rather than
Plugin.render_plugin().
"""
c = []
total = len(plugins)
for index, plugin in enumerate(plugins):
plugin._render_meta.total = total
plugin._render_meta.index = index
c.append(plugin.render_plugin(copy.copy(context), placeholder, processors=processors))
return c | 2217033cea70a0c88dd6ab378cfc60f71ccfaa4f | 700,390 |
def getkey(value, key):
"""
Return a dictionary item specified by key
"""
return value[key] | 708ee08610b97180be0e0c118646ff853bc7b2a6 | 700,391 |
def spring1s(ep,ed):
"""
Compute element force in spring element (spring1e).
:param float ep: spring stiffness or analog quantity
:param list ed: element displacements [d0, d1]
:return float es: element force [N]
"""
k = ep
return k*(ed[1]-ed[0]); | 8253fcde40ecd1b66d7db99348297f2239faa23d | 700,392 |
import re
def remove_comments(codelines):
""" Removes all comments from codelines. """
lines_removed = []
for l in codelines:
# remove comments
lines_removed.append(re.sub("#.*", "", l))
return lines_removed | 50cbb10d14f111aac6ccc05fec6dd35842a272cd | 700,396 |
def getBoardStr(board):
"""Return a text-representation of the board."""
return '''
{}|{}|{} 1 2 3
-+-+-
{}|{}|{} 4 5 6
-+-+-
{}|{}|{} 7 8 9'''.format(board['1'], board['2'], board['3'], board['4'], board['5'], board['6'], board['7'], board['8'], board['9']) | 0bd05b2bf33477a7ba8115c3b5d9a7c8a4a1563c | 700,403 |
def seed_from_str(s: str) -> int:
"""
Obtains an integer seed from a string using the hash function
"""
return hash(s) % (2 ** 32) | 2fd808916f349102c15db945cc60b5d3793e50b7 | 700,404 |
def destos_to_binfmt(key):
"""
Returns the binary format based on the unversioned platform name,
and defaults to ``elf`` if nothing is found.
:param key: platform name
:type key: string
:return: string representing the binary format
"""
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf' | 89e1233f494a80ea4b0dd9ef2053352e462e3b3d | 700,407 |
def to_int(s, default=None):
"""Attempts to convert the provided string to an integer.
:param s: The text to convert
:param default: Default value to return if cannot be converted
:returns: The integer if converted, otherwise the default value
"""
try:
return int(s)
except ValueError:
pass
return default | 9f8214efc65035b433af22e9872cb4fe1e4e1cf7 | 700,408 |
def get_first_key(obj, key):
"""Return the value of the first key matching the given key.
Recursively searches the obj.
obj should contain at least one dict. Can be a nested dict or list of dicts
or whatever.
Returns None if not found
"""
if type(obj) is dict and key in obj:
return obj[key]
elif type(obj) is dict:
for k,v in obj.items():
gfk = get_first_key(v, key)
if gfk is not None:
return gfk
elif type(obj) is list:
for item in obj:
gfk = get_first_key(item, key)
if gfk is not None:
return gfk
return None | 034069fc0a9d73a78d387d6a78dae1ceea7c87b9 | 700,409 |
def get_partitions(num_items, buckets, prefix):
"""
Given a number of items and a number of buckets, return all possible combination of divider locations.
Result is a list of lists, where each sub-list is set of divider locations. Each divider is placed after
the 1-based index provided (or, alternately, *before* the zero-based index provided).
"""
partitions = []
num_dividers = len(prefix)
if num_dividers:
last_divider_loc = prefix[-1]
else:
last_divider_loc = 0
remaining_dividers = (buckets - 1) - num_dividers
for next_divider_loc in range(last_divider_loc + 1, num_items - (remaining_dividers - 1)):
new_prefix = prefix.copy() + [next_divider_loc]
if remaining_dividers == 1:
partitions.append(new_prefix)
else:
partitions.extend(get_partitions(num_items, buckets, new_prefix))
return partitions | 503d7afcb544e231c00a83ed77adfe95c4283d16 | 700,413 |
def normalize_rate(rate):
"""
Function to change any string 'n/a' values in rate limit information to
None values.
:param rate: dictionary
:return: dictionary
"""
for key in rate.keys():
if rate[key] == "n/a":
rate[key] = None
return rate | 1539cd2059724be8b474d2b81b761164837274c8 | 700,415 |
import re
def parse_tf(constants : dict) -> set:
"""Read user configured variables in variables tf and return the entire set
Args:
constants: config read from config.yml
Returns:
all variables defined in variables.tf
"""
magma_root = constants['magma_root']
tf_root = f'{magma_root}/orc8r/cloud/deploy/terraform'
# parse input variables
orc8r_var_fn_list = [
f'{tf_root}/orc8r-aws/variables.tf',
f'{tf_root}/orc8r-helm-aws/variables.tf'
]
var_search_re = re.compile(r'variable\s\"(?P<variable_name>\w+?)\"\s\{')
actual_vars = set()
for fn in orc8r_var_fn_list:
with open(fn) as f:
# parse variables
for line in f.readlines():
m = var_search_re.search(line)
if m and m.group('variable_name'):
actual_vars.add(m.group('variable_name'))
# parse output variables
orc8r_var_fn_list = [
f'{tf_root}/orc8r-aws/outputs.tf',
]
# remove variables which are set through outputs
output_search_re = re.compile(r'output\s\"(?P<variable_name>\w+?)\"\s\{')
for fn in orc8r_var_fn_list:
with open(fn) as f:
# parse variables
for line in f.readlines():
m = output_search_re.search(line)
if m and m.group('variable_name'):
output_var = m.group('variable_name')
if output_var in actual_vars:
actual_vars.remove(output_var)
return actual_vars | 814dcde6bb7b84c9898387f1eb8c3c7d82cf117d | 700,416 |
def filterdictvals(D, V):
"""
dict D with entries for valeus V removed.
filterdictvals(dict(a=1, b=2, c=1), 1) => {'b': 2}
"""
return {K: V2 for (K, V2) in D.items() if V2 != V} | 5bfafd8d5c383791b2dd3f792c48cf7a6c16a9de | 700,418 |
def parse_regions(text):
"""Return a list of (start, end) tuples."""
_regions = []
region_pairs = text.strip().split(",")
for region_pair in region_pairs:
split_pair = region_pair.split("..")
start = split_pair[0]
end = split_pair[1]
_regions.append([start, end])
return _regions | 20a0de9fe4f75fbdc9c8f006d675e53f0b305d62 | 700,421 |
import re
def _get_numbers_from_string(string):
"""
Return a list of numbers (strings) that appear in parameter string.
Match integers, decimals and numbers such as +1, 2e9, +2E+09, -2.0e-9.
"""
numbers = re.findall(r"[-+]?(?:(?:\d+\.\d+)|(?:\d+))(?:[Ee][+-]?\d+)?", string)
return numbers | 970d57aaec18bdc6ab4a9c55f6445fbbc74998d7 | 700,426 |
def GetVersion(vm):
"""Returns the version of the memcached server installed."""
results, _ = vm.RemoteCommand('memcached -help |grep -m 1 "memcached"'
'| tr -d "\n"')
return results | ba0a16d7e9f8f7702d6f5f41595a6a47a39e0f6b | 700,428 |
def get_sentiment_compound(value):
"""
function get value and return whether it's Positive, Negative or Neutral
:param value: floating number
:return: whether value is Positive, Negative or Neutral
"""
# decide sentiment as positive, negative and neutral
if value > 0:
sentiment_compound = "Positive"
elif value < 0:
sentiment_compound = "Negative"
else:
sentiment_compound = "Neutral"
return sentiment_compound | 7feae8fd085efb9a2e684844a25ab1a1d9b3dad0 | 700,430 |
from functools import reduce
def run_map(generator, mapper) -> list:
"""
Map function caller for each line in each file
:param generator: file and line generator
:param mapper: map function to be called for each line in the files
:return: generator of key value pairs returned by the map function
"""
return reduce(lambda x, y: x + y,
map(lambda kv_file_lines:
reduce(lambda x, y: x + y,
map(lambda line:
list(mapper(kv_file_lines[0], line)),
kv_file_lines[1])),
generator)) | 468467f9274572d82b8843449ffe9691b68331cc | 700,431 |
def put_pm_to_pandas_data(data: dict) -> dict:
"""
Change the +- to \pm for latex display.
Note: to have the pandas frame display the table string correctly use the escapte=False as in:
latex_table: str = df.to_latex(index=False, escape=False, caption='caption goes here', label='label_goes_here')
ref:
- https://stackoverflow.com/questions/70008992/how-to-print-a-literal-backslash-to-get-pm-in-a-pandas-data-frame-to-generate-a
"""
for column_name, data_values in data.items():
data[column_name] = [data_value.replace('+-', ' $\pm$ ') for data_value in data_values]
return data | 50193af607b8321601f35350283386021509b1bd | 700,434 |
import pathlib
def abst_path(path):
"""Returns a PurePath for a string representation, after checking a
path string is from the root."""
p = pathlib.PurePath(path)
assert p.is_absolute()
return p | b86ecd8585575e3642ddd1ff54de91eb1cd5d6d9 | 700,436 |
def nvl(value, default):
""" Evaluates if value es empty or None, if so returns default
Parameters:
value: the evalue to evaluate
default: the default value
Returns:
value or default
"""
if value:
return value
return default | 67df45a6e63c107dcef99fc7bdbaa7064b695f66 | 700,438 |
def isnetid(s):
"""
Returns True if s is a valid Cornell netid.
Cornell network ids consist of 2 or 3 lower-case initials followed by a
sequence of digits.
Examples:
isnetid('wmw2') returns True
isnetid('2wmw') returns False
isnetid('ww2345') returns True
isnetid('w2345') returns False
isnetid('WW345') returns False
Parameter s: the string to check
Precondition: s is a string
"""
assert type(s) == str
var1 = s[:2]
var2 = s[2:]
var3 = s[:3]
var4 = s[3:]
result = ((var1.isalpha() and var1.islower() and var2.isnumeric() and
('-' not in s)) or (var3.isalpha() and var3.islower() and
var4.isnumeric() and ('-' not in s)))
return result
#pass | d4ddd91a9a7a7a4e2e7de778525718ec41c42cbc | 700,443 |
from pathlib import Path
import glob
def filter_paths(paths, excluded_paths):
"""Filter out path matching one of excluded_paths glob
Args:
paths: path to filter.
excluded_paths: List for glob of modules to exclude.
Returns: An iterable of paths Python modules (i.e. *py files).
"""
excluded = set(Path(f) for excluded_path in excluded_paths
for f in glob.glob(excluded_path, recursive=True))
return set(paths) - excluded | 8b8e9e19ebc0a47d84e1b55a497438b961af6764 | 700,445 |
def merge_runs_by_tag(runs, tags):
"""
Collect the (step, value) tuples corresponding to individual tags for all runs.
Therefore the result might look like this:
<tagA>
+ step:
- <run-1-steps>
- <run-2-steps>
+ value:
- <run-1-values>
- <run-2-values>
...
Arguments:
runs (dict):
Collection of data from all runs.
Usually the output of `visualization.helpers.load_run`
tags (list):
List of the tags to merge.
Returns:
data (dict):
Dictionary containing the merged scalar data of all runs.
The data.keys is composed of `tags`.
"""
_merged_runs = dict()
# Merge the steps and values for each tag over all runs.
for tag in tags:
_run_values = [runs[run][tag]['value'] for run in runs]
_run_steps = [runs[run][tag]['step'] for run in runs]
_merged_runs[tag] = {
'step': _run_steps,
'value': _run_values
}
return _merged_runs | 90440ec8d718692978e617920eac52e0ea43bfce | 700,446 |
def authenticated_user(client, account):
"""Create an authenticated user for a test"""
# user = G(User, email='[email protected]')
account.email = '[email protected]'
account.set_password('my_password123')
account.save()
client.login(email='[email protected]', password='my_password123')
return account | b1156f21ca94129fbf0dee8d0b0dbac834fbf59d | 700,452 |
import torch
def prepare_values(y_true, y_pred):
"""Converts the input values to numpy.ndarray.
Parameters
----------
y_true : torch.tensor
Either a CPU or GPU tensor.
y_pred : torch.tensor
Either a CPU or GPU tensor.
Returns
-------
y_true, y_pred : numpy.ndarray
Numpy.ndarray of the input tensor data.
"""
if isinstance(y_true, torch.Tensor):
if y_true.is_cuda:
y_true = y_true.to(torch.device("cpu"))
y_true = y_true.numpy()
if isinstance(y_pred, torch.Tensor):
if y_pred.is_cuda:
y_pred = y_pred.to(torch.device("cpu"))
y_pred = y_pred.numpy()
return y_true, y_pred | f5fa8a05a3d29c00d7ea310a91a09db1158524be | 700,453 |
import html
def make_email_lists(items):
"""Make an HTML and plain text list of items, to be used in emails.
"""
if not items or len(items) == 0:
return "", ""
htm = ["<li>{}</li>".format(html.escape(i if i else "\'blank\'")) for i in items]
htm = "<ul>{}</ul>".format("".join(htm))
text = [f" - {i}" for i in items]
text = "\r\n".join(text)
return htm, text | 38fa2cd0abb1e95e1c8294050a5957ac48d2e7c7 | 700,458 |
import re
def is_valid_hostname(hostname):
"""
Check that hostname passed in is valid.
Pretty much a copy paste of this
https://stackoverflow.com/questions/2532053/validate-a-hostname-string
"""
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile("(?!-)[A-Z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split(".")) | aacf57c7ef740c4cebfe008c67867921d4b8a958 | 700,464 |
def rreplace(s, old, new, occurrence = 1):
"""
Replaces the last occurence(s) of an expression in a string.
"""
return new.join(s.rsplit(old, occurrence)) | 92a3fdb0e5a2014debd6e3530c7c6c754ed45953 | 700,466 |
import re
def num_groups(aregex):
""" Counts groups in regexp """
return re.compile(aregex).groups | 3ce4dc9f08ec5ae2e0edfeac889c213569a3053e | 700,468 |
def attr_visitor_name(attr_name: str) -> str:
"""
Returns the visitor_method name for `attr_name`, e.g.::
>>> attr_visitor_name('class')
'attr_class'
"""
# assert re.match(r'\w+$', node_name)
return 'attr_' + attr_name | 5fb31f9ea9d563ba05b4a80046c0b48ace35e9b5 | 700,471 |
from typing import List
def mk_string(strings: List[str], separator: str) -> str:
"""
Creates a string from a list of strings, with a separator in between elements.
:param strings: the list of strings.
:param separator: the separator.
:return: the string of elements separated by the separator.
"""
result = ''
sep = ''
for string in strings:
result += f'{sep}{string}'
sep = separator
return result | 0230022eb3168c3dae92b6edb1cc0cdf252158d6 | 700,472 |
def Normalize(df):
"""
This function takes a pandas dataframe and normalize it
Arguments
----------
- df: pandas dataframe
Return
----------
- df: pandas dataframe
The initial dataframe normalized
"""
df = (df - df.min())/(df.max() - df.min())
return df | b16a196ea14c93d2100d8030ef1417a7112560c7 | 700,473 |
def _filter_featured_downloads(lst):
"""Filter out the list keeping only Featured files."""
ret = []
for item in lst:
if 'Featured' in item['labels']:
ret.append(item)
return ret | d722fdd01966f1650575912715f8a6f07d793dda | 700,475 |
def handle_no_range(node):
"""Returns stl_node with range set to [0,0] if not previously set"""
if node.range_start==None or node.range_end==None:
node.range_start = 0
node.range_end = 0
return node | 1baad2869cf769d6caac73e52d2799d04b1fc16d | 700,477 |
def merge_kwargs(kwargs, defaults):
"""Helper function to merge ``kwargs`` into ``defaults``.
Args:
kwargs: Keyword arguments.
defaults: Default keyword arguments.
Returns:
Merged keyword arguments (``kwargs`` overrides ``defaults``).
"""
if defaults is not None:
kw = defaults.copy()
kw.update(kwargs)
else:
kw = kwargs
return kw | 09a887bbcefdd2e0795fee043354d4bdf8e806a8 | 700,478 |
def format_decimal(value):
"""Format value to 2 decimal places"""
formatter = "{0:.2f}"
return float(formatter.format(value)) | 694efd34d7e36d493a66ef25ab5a552f93eac087 | 700,479 |
import re
def rename_pretrained(name: str):
""" Matches the name of a variable saved in the pre-trained MobileNet
networks with the name of the corresponding variable in this network.
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md
Parameters
----------
name: the original name of the variable.
Returns
-------
name: the new name of the variable.
"""
replace_list = {
r"BatchNorm": r"batch_normalization",
r"Conv2d_(\d+)_(\w+)/": r"Conv2d_\1/\2/",
r"Conv2d_1c_1x1/biases": r"conv2d_final/bias",
r"Conv2d_1c_1x1/weights": r"conv2d_final/kernel",
r"depthwise_weights": r"weights"
}
for match, replace in replace_list.items():
name = re.sub(match, replace, name)
return name | 6913159aef0d9b4019af34dcd72984d3dd6870e9 | 700,480 |
def build_table_def(table_name, keys, additional_attributes=None, global_secondary_indices=None):
"""
Creates a minimal dynamodb definition suitable for use with localstack.
Args:
table_name: The full name of the test table
keys: The key definitions to use - a list of 4-tuples (<name>, <key_type>, <data_type>, <index_name>).
For example - ('cz_res_id', 'HASH', 'S', 'MyGSI')
additional_attributes: additional attributes, beyond the keys, that need to be defined
global_secondary_indices: The list of keys for which global secondary indices will be
generated. global_secondary_indices must be a subset of 'keys'.
Returns:
A dict containing the table def - suitable for use with boto3.
"""
all_attributes = [{'AttributeName': k[0], 'AttributeType': k[2]} for k in keys]
all_attributes = all_attributes + [{'AttributeName': a[0], 'AttributeType': a[1]}
for a in (additional_attributes or [])]
table_def = {
'TableName': table_name,
'KeySchema': [{'AttributeName': k[0], 'KeyType': k[1]} for k in keys],
'AttributeDefinitions': all_attributes,
'ProvisionedThroughput': {
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1
}
}
if global_secondary_indices:
table_def['AttributeDefinitions'] += [
{
'AttributeName': k[0],
'AttributeType': k[2]
} for k in global_secondary_indices
]
table_def['GlobalSecondaryIndexes'] = [
{
'IndexName': f'{k[3]}',
'ProvisionedThroughput': table_def['ProvisionedThroughput'],
'KeySchema': [
{
'AttributeName': k[0],
'KeyType': k[1],
}
],
'Projection': {'ProjectionType': 'ALL'}
}
for k in global_secondary_indices
]
return table_def | 84b124ac0a084f05685ed4e636fb8eda78317c07 | 700,483 |
def execution_has_failures(playbook_results):
"""Return value 2 means failure in ansible-playbook"""
if type(playbook_results) != list:
playbook_results = [playbook_results]
return 2 in playbook_results | 1c8ea2767a78ca72147f65f3ca5fa90018f8c141 | 700,484 |
def parse_load_balancer_name(load_balancer_arn):
"""
Parse name out from load balancer ARN
Example:
ARN of load balancer: 'arn:aws:elasticloadbalancing:us-east-1:881508045124:loadbalancer/app/alb-1/72074d479748b405',
Load balancer name: 'alb-1'
return: load balancer name
"""
return load_balancer_arn.split('/')[-2] | c64e4545d30c18bca2c05df3433958e012f0ed47 | 700,486 |
from pathlib import Path
def get_paths(dir_name, glob):
""" returns a generator of the recursive paths on input glob """
return Path(f'./{dir_name}').rglob(glob) | 6803981e40397d000900dd0a8fc8ee32eddc6bc4 | 700,487 |
def inverse_relation(dst, rel):
"""
Similar to :meth:``forwards_relation`` but selects
the source nodes instead, given a destination node.
:param dst: The destination node.
:param rel: The relation.
"""
statement = 'SELECT src FROM %s WHERE dst = ?'
return statement % rel, (dst,) | f3dd3ab848ccad4d5594710659fc342ad584e6b7 | 700,488 |
import pickle
def load_model(model_path):
"""Loads existing model
Parameters:
model_path - path to the model file
Returns: loaded model
"""
with open(model_path, 'rb') as f:
return pickle.load(f) | a9bf3abd5717e05a381bbaf7228a02217b8bccc5 | 700,490 |
def get_package_attribute(name):
"""Retrieve package attributes from the package itself."""
with open("darwin/__init__.py") as init_file:
for line in init_file:
if line.startswith(name):
return eval(line.split("=")[-1]) | 33e7cdb53fffa844576e4e9387218b3ccbe6926d | 700,491 |
import re
def isBaidu(url):
"""Return True if this url matches the pattern for Baidu searches"""
#Example: http://www.baidu.com/s?wd=mao+is+cool&rsv_bp=0&ch=&tn=baidu&bar=&rsv_spt=3&ie=utf-8
pattern = 'http://www.baidu.com/s\?wd=[\S+]+'
matches = re.match(pattern, url)
if matches != None:
return True
return False | b4c06154f1f4f2bd6a18bbcb08c2ce0b4d2cbbc9 | 700,492 |
from typing import List
from typing import Tuple
from typing import Any
def min_w_ind(lst: List) -> Tuple[int, Any]:
"""
Returns the min value and its index
:param lst: The list to get the max value in
:return: Min value and its index
"""
val = min(lst)
ind = lst.index(val)
return ind, val | d0da7d1ab1c762f1af2333de6e8b829f8218fd25 | 700,498 |
def count(iterable):
"""
Returns the number of items in `iterable`.
"""
return sum(1 for whatever in iterable) | a9bb4ac70cef36613372c1225202f97f70a642cf | 700,499 |
import torch
def xy_to_cxcy(xy):
"""
Convert bounding boxes from boundary coordinates (x_min, y_min, x_max, y_max) to center-size coordinates (c_x, c_y, w, h).
:param xy: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)
:return: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)
"""
return torch.cat([(xy[:, 2:] + xy[:, :2]) / 2, # c_x, c_y
xy[:, 2:] - xy[:, :2]], 1) | 46eef5bb63c85a84050a57a2fb27618721b31eaa | 700,500 |
def selection(triple, variables):
"""Apply selection on a RDF triple"""
bindings = set()
if variables[0] is not None:
bindings.add((variables[0], triple[0]))
if variables[1] is not None:
bindings.add((variables[1], triple[1]))
if variables[2] is not None:
bindings.add((variables[2], triple[2]))
return bindings | 7f237f271d7c980e5ece21eacde671ee0f8a1d05 | 700,503 |
from bs4 import BeautifulSoup
import codecs
def get_saml_response(html):
"""
Parse SAMLResponse from Shibboleth page
>>> get_saml_response('<input name="a"/>')
>>> get_saml_response('<body xmlns="bla"><form><input name="SAMLResponse" value="eG1s"/></form></body>')
'xml'
"""
soup = BeautifulSoup(html, "html.parser")
for elem in soup.find_all('input', attrs={'name': 'SAMLResponse'}):
saml_base64 = elem.get('value')
xml = codecs.decode(saml_base64.encode('ascii'), 'base64').decode('utf-8')
return xml | 24800c318f7f5f6000c6652d15421c68a97a04a6 | 700,506 |
def _exception_to_dict(exception):
"""Convert exception to an error object to be sent as response to APIs."""
return {"error": {"type": type(exception).__name__,
"message": exception}} | 4abb81516b604bdfd2f6360a1272556b0b6f6932 | 700,508 |
def _any_none(*args):
"""Returns a boolean indicating if any argument is None"""
for arg in args:
if arg is None:
return True
return False | fadb9330fc1f4ffca2cf0105d8513d0be0d5fae1 | 700,510 |
def precision(judged_data):
"""
Our relevance judgments are on a graded scale of 0-3, where scores of 1-3 are considered
relevant, and less than 1 is irrelevant. We compute precision of the result set based on
this quanitization.
Args:
judged_data (pandas.DataFrame): A DataFrame with at a minimum query, domain, judgment,
result_fxf, and result_position columns
Returns:
A floating point value corresponding to the precision of the result set.
"""
return (len(judged_data[judged_data["judgment"] >= 1]) /
float(len(judged_data))) if len(judged_data) else 1.0 | 6d4c4ee7793625e4590c01120318afe33cba0b0c | 700,511 |
def _GetAndroidVersionFromMetadata(metadata):
"""Return the Android version from metadata; None if is does not exist.
In Android PFQ, Android version is set to metadata in master
(MasterSlaveLKGMSyncStage).
"""
version_dict = metadata.GetDict().get('version', {})
return version_dict.get('android') | e1d1ed9d0bbf2f65d646c11007739f6b5a9b78ec | 700,514 |
from functools import reduce
def get_in(d, t, default=None):
"""
look up if you can get a tuple of values from a nested dictionary,
each item in the tuple a deeper layer
example: get_in({1: {2: 3}}, (1, 2)) -> 3
example: get_in({1: {2: 3}}, (2, 3)) -> {}
"""
result = reduce(lambda d, t: d.get(t, {}), t, d)
if result is False:
return result
elif not result:
return default
else:
return result | f92e6c94e3485f4b02a8487f832064de7a42eba5 | 700,515 |
def divisible_by_five(n: int) -> bool:
"""Return True if an integer is divisible by 5, and false otherwise."""
if n % 5 > 0:
return False
return True | e4cd2adf7067000f10aa655bb9fd819e525527d2 | 700,516 |
def _should_package_clang_runtime(ctx):
"""Returns whether the Clang runtime should be bundled."""
# List of crosstool sanitizer features that require packaging some clang
# runtime libraries.
features_requiring_clang_runtime = {
"asan": True,
"tsan": True,
"ubsan": True,
}
for feature in ctx.features:
if feature in features_requiring_clang_runtime:
return True
return False | 7f54b903f4acc288abca294d857e774ce3daa8b3 | 700,517 |
import math
def round_nearest_towards_infinity(x, infinity=1e+20):
""" Rounds the argument to the nearest integer.
For ties like 1.5 the ceiling integer is returned.
This is called "round towards infinity"
Args:
x: the value to round
infinity: the model's infinity value. All values above infinity are set to +INF
Returns:
an integer value
Example:
round_nearest(0) = 0
round_nearest(1.1) = 1
round_nearest(1.5) = 2
round_nearest(1.49) = 1
"""
if x == 0:
return 0
elif x >= infinity:
return infinity
elif x <= -infinity:
return -infinity
else:
raw_nearest = math.floor(x + 0.5)
return int(raw_nearest) | e3edd7dfca255648f02eb9845f411f52c24d3120 | 700,518 |
def create_html_popup(friends: list) -> str:
"""creates html popup for markrer
Args:
friends (list): list of names
Returns:
str: html in string format
"""
html_template = "Friends:"
for friend in friends:
html_template += f"""<br>
<p>{friend}</p><br>
"""
return html_template | 54b1ac02cf59a086a711dcb0b47f14c0ea4e782d | 700,519 |
def euler_step(theta,dtheta,ddtheta,dt):
"""
Euler Step
Parameters
----------
theta (tf.Tensor):
Joint angles
(N,nq)
dtheta (tf.Tensor):
Joint velocities
(N,nq)
ddtheta (tf.Tensor):
Joint accelerations
(N,nq)
dt (float):
Delta t
Returns
-------
(thetalistNext, dthetalistNext) (tupe of tf.Tensor):
Next joint angles and velocities
(N,nq), (N,nq)
"""
return theta + dt * dtheta, dtheta + dt * ddtheta | 402b30a3f24440707ef56b7113b545eb827c704e | 700,520 |
def beta1() -> float:
"""Mock beta1."""
return 0.9 | 1152a45a891c749b196cab780f8f76afc70c2a65 | 700,522 |
import base64
def b64s_to_s(b64s: str) -> str:
"""convert base 64 strting to string
:param b64s: input base 64 string
:type b64s: str
:return: output string
:rtype: str
"""
s = base64.b64decode(b64s.encode('utf8')).decode('utf8')
return s | beb49deb87d45da43cc8f12b3aacec37d7bfd1ed | 700,523 |
def first_or_default(iterable, predicate=None, default=None):
"""First the first value matching a perdicate otherwise a default value.
:param iterable: The items over which to iterate.
:param predicate: A predicate to apply to each item.
:param default: The value to return if no item matches.
:return: The first value matching a predicate otherwise the default value.
"""
for item in iterable:
if not predicate or predicate(item):
return item
return default | 94ee1c97cc752b1f5bb0ac4cc7c3a6a8f3fe4675 | 700,526 |
def parse_simple_expression_list(l):
"""
This parses a comma-separated list of simple_expressions, and
returns a list of strings. It requires at least one
simple_expression be present.
"""
rv = [ l.require(l.simple_expression) ]
while True:
if not l.match(','):
break
e = l.simple_expression()
if not e:
break
rv.append(e)
return rv | 8c3772e1bd4cc9587c8095dcba640878c01fc0f4 | 700,534 |
from typing import Iterable
from typing import Any
from typing import List
import itertools
def flatten(val: Iterable[Iterable[Any]]) -> List[Any]:
"""
Flatens a list of list into a list
>>> flatten( [['abc','def'],[12,34,46],[3.14, 2.22]])
['abc', 'def', 12, 34, 46, 3.14, 2.22]
"""
return list(itertools.chain(*val)) | cd9ae9e393569ba7800735d09c8621f0d64beed3 | 700,535 |
import io
import csv
def to_csv(members):
"""Convert JSON data structure to CSV string."""
with io.StringIO() as fp:
cw = csv.writer(fp)
if len(members):
cw.writerow(members[0].keys())
for member in members:
cw.writerow(member.values())
value = fp.getvalue()
return value | 0ed85a10417dee1ca98a28e05e1ae2d24dcebfe0 | 700,536 |
def split_sentences(inp_text, nlp_model):
"""
Splits an input string into sentence determined by spacy model
:param inp_text: string with input text
:param nlp_model: sciSpacy model
:return: list of sentences in string format
"""
doc = nlp_model(inp_text)
sentences_list = [sentence.text for sentence in doc.sents]
return sentences_list | b5617d9334509edf09bf4a0360d3030e67ddf800 | 700,537 |
def _check_selection(selection):
"""Handle default and validation of selection"""
available = ["counts", "exposure", "background"]
if selection is None:
selection = available
if not isinstance(selection, list):
raise TypeError("Selection must be a list of str")
for name in selection:
if name not in available:
raise ValueError("Selection not available: {!r}".format(name))
return selection | 454b36833a5117b2d1bab377094e8e3ec0c06969 | 700,542 |
from typing import Dict
from typing import Any
def _make_version_config(version,
scaling: str,
instance_tag: str,
instances: int = 10) -> Dict[str, Any]:
"""Creates one version config as part of an API response."""
return {scaling: {instance_tag: instances}, 'id': version} | 1d4f613f35ace2f540629e6c79b3f571b3c2a255 | 700,546 |
def sec_from_hms(start, *times):
""" Returns a list of times based on adding each offset tuple in times
to the start time (which should be in seconds). Offset tuples can be
in any of the forms: (hours), (hours,minutes), or (hours,minutes,seconds).
"""
ret = []
for t in times:
cur = 0
if len(t) > 0:
cur += t[0] * 3600
if len(t) > 1:
cur += t[1] * 60
if len(t) > 2:
cur += t[2]
ret.append(start+cur)
return ret | 4c279736f173cbec4170880cfdf279e801847b5a | 700,548 |
def remove_missing_targets(this_data,target_var):
"""
Gets raw data and removes rows with missing targets
Parameters
----------
this_data : dataframe
The raw data which has been compiled from Yahoo!Finance
target_var : string
Column name of target variable
Returns
-------
this_data
A dataframe without missing targets
"""
this_data = this_data[~this_data[target_var].isnull()]
this_data = this_data[~this_data["Date"].isnull()]
return this_data | 82d9d5f77821c7ee1b6eec53d3b9bb474922beac | 700,550 |
def remove_child_items(item_list):
"""
For a list of filesystem items, remove those items that are duplicates or children of other items
Eg, for remove_child_items['/path/to/some/item/child', '/path/to/another/item', '/path/to/some/item']
returns ['/path/to/another/item', '/path/to/some/item']
If one if the items is root, then it wins
Also, all items should be the full path starting at root (/). Any that aren't are removed
"""
if '/' in item_list:
return ['/']
# Remove duplicates and any non-full path items
item_list = sorted(list(set(filter(lambda x: x.startswith('/'), item_list))))
remove_items = set([])
for i, item1 in enumerate(item_list[:-1]):
for item2 in item_list[i + 1:]:
if item1 != item2 and item2.startswith(item1 + '/'):
remove_items.add(item2)
for remove_item in remove_items:
item_list.remove(remove_item)
return sorted(list(set(item_list))) | 02f92094cb697be40a4c16d90e8ee6b33f965438 | 700,557 |
def isen_nozzle_mass_flow(A_t, p_t, T_t, gamma_var, R, M):
"""
Calculates mass flow through a nozzle which is isentropically expanding
a given flow
Input variables:
A_t : nozzle throat area
gamma_var : ratio of specific heats
p_t : pressure at throat
T_t : temperature at throat
M : Mach number at throat
R : Perfect gas constant
"""
m_dot = (A_t * p_t * (T_t**0.5)) * ((gamma_var / R)**0.5) * \
M * ((1 + (((gamma_var - 1) / 2) * \
(M**2)))**(-((gamma_var + 1) / (2 * (gamma_var - 1)))))
return m_dot | d2cb14d099167c4dbca4ce51a67d248bd15a4a88 | 700,558 |
def calc_overturning_stf(ds,grid,doFlip=True):
"""
Only for simple domains, compute meridional overturning streamfunction
Parameters
----------
ds : xarray Dataset from MITgcm output, via
e.g. xmitgcm.open_mdsdataset
must contain 'V' or 'VVELMASS' fields
grid : xgcm grid object defined via xgcm.Grid(ds)
doFlip : if true, compute by accumulating from bottom to top
Output
------
ov_stf : xarray DataArray containing 2D field with
overturning streamfunction in Sv above
"""
# Grab the right velocity field from dataset
if 'V' in ds.keys():
vstr = 'V'
elif 'VVELMASS' in ds.keys():
vstr = 'VVELMASS'
else:
raise TypeError('Could not find recognizable velocity field in input dataset')
# Compute volumetric transport
v_trsp = ds[vstr] * ds['dxG'] * ds['drF']
if vstr != 'VVELMASS':
print(f' *** Multiplying {vstr} by hFacS***')
v_trsp = v_trsp * ds['hFacS']
v_trsp = v_trsp.sum(dim=['XC'])
# flip dim, accumulate in vertical, flip back
if doFlip:
v_trsp = v_trsp.isel(Z=slice(None,None,-1))
ov_stf = grid.cumsum(v_trsp,'Z',boundary='fill')
if doFlip:
ov_stf = -ov_stf.isel(Zl=slice(None,None,-1))
# Convert m/s to Sv
ov_stf = ov_stf * 10**-6
return ov_stf | d7d25368268dc16c4603a88a3a11607772f04da4 | 700,559 |
def value_at_diviner_channels(xarr):
"""Return value of xarr at each diviner channel."""
dwls = [
3,
7.8,
8.25,
8.55,
(13 + 23) / 2,
(25 + 41) / 2,
(50 + 100) / 2,
(100 + 400) / 2,
] # [microns]
return xarr.interp({"wavelength": dwls}) | 4897ed8250e2f02409205c02a26d63cfafb50a52 | 700,562 |
from typing import List
def txt2list(path: str) -> List:
"""
Opens a text file and converts to list by splitting on new lines
"""
with open(path, "r") as f:
txt = f.read()
return list(filter(None, txt.split("\n"))) | 02feac063537e3434e053556f19cf8bf66b5df68 | 700,563 |
def obtain_value(entry):
"""Extract value from entry.
The entries could be like: '81.6', ': ', '79.9 e', ': e'.
"""
entry = entry.split(' ', maxsplit=-1)[0] # Discard notes.
if not entry or entry == ':':
return None
return float(entry) | cabb8c9314716fe988102a6226734dd7408be736 | 700,564 |
import re
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output) | 1269b502fdf7d46165b0c0dca3dab50cd2e36550 | 700,568 |
def IsInclude(line):
"""Return True if line is an include of another file."""
return line.startswith("@include ") | 42278a2d0ea3582111a9cbae26860e1f229398b3 | 700,569 |
def err_ratio(predict, dataset, examples=None, verbose=0):
"""Return the proportion of the examples that are NOT correctly predicted.
verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
examples = examples or dataset.examples
if len(examples) == 0:
return 0.0
right = 0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print(' OK: got {} for {}'.format(desired, example))
elif verbose:
print('WRONG: got {}, expected {} for {}'.format(
output, desired, example))
return 1 - (right/len(examples)) | 9159fdb65e47a817381fe08dfdee77f967a47ef5 | 700,571 |
def _get_label_from_dv(dv, i):
"""Return label along with the unit of the dependent variable
Args:
dv: DependentVariable object.
i: integer counter.
"""
name, unit = dv.name, dv.unit
name = name if name != "" else str(i)
label = f"{name} / ({unit})" if unit != "" else name
return label | fd18e3d4b5f61bec6febc27b1d7d86b378205a0a | 700,572 |
def get_seconds(value, scale):
"""Convert time scale dict to seconds
Given a dictionary with keys for scale and value, convert
value into seconds based on scale.
"""
scales = {
'seconds': lambda x: x,
'minutes': lambda x: x * 60,
'hours': lambda x: x * 60 * 60,
'days': lambda x: x * 60 * 60 * 24,
'weeks': lambda x: x * 60 * 60 * 24 * 7,
'months': lambda x: x * 60 * 60 * 24 * 30,
'years': lambda x: x * 60 * 60 * 24 * 365,
}
return scales[scale](value) | 92245d0568ea44eb5a906021badcb888535dc5a0 | 700,574 |
def points_2_inches(points) -> float:
"""
Convert points to inches
"""
return points / 72 | 8744659807046c892f2d1f36c3dc47a44417ca96 | 700,576 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.