content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
from typing import Iterable
def filter_array(func, arr: Iterable) -> list:
"""
Filters the arr using the given function. The function must return True or False whether
the element should be part of the result or not.
"""
res = list()
for el in arr:
if func(el):
res.append(el)
return res | 53e1db35e1876475efa1427aefc4b6728d97087e | 700,079 |
def slices(series: str, length: int) -> list:
"""slices - a.k.a Grouped Slices -
:param series: str:
:param length: int:
:returns: A list of grouped slices of n length from a string
"""
if length not in range(len(series) + 1):
raise ValueError(f'Length {length} not in range for this series')
return [series[tailcut - length:tailcut] for tailcut in range(length, len(series) + 1)] | 53a15a0b6322a22b95fc8943fbd7546da4419a77 | 700,080 |
def s2human(time):
"""Convert a time in second into an human readable string"""
for delay, desc in [(86400,'d'),(3600,'h'),(60,'m')]:
if time >= delay:
return str(int(time / delay)) + desc
return str(int(time)) + "s" | a2d2264fde357534e52444b754de81398eeacea7 | 700,081 |
def i_priority_node(g, i):
"""
Returns all nodes of priority i in game graph g.
:param g: the game graph.
:param i: the requested priority.
:return: a list of nodes of priority i in g.
"""
nodes = g.nodes # Nodes from g
# get all node indexes in node tuple (index, (node_player, node_priority)) when node_priority is i
return [k for k, v in nodes.iteritems() if v[1] == i] | 4d81fab7c7ea7ac75d21dfa36735b1e9a8981444 | 700,088 |
def sum_path(G, path):
"""
Calculate sum of weight in each edges of `path`
"""
sum_weight = 0
for i in range(len(path)-1):
n1, n2 = path[i], path[i+1]
sum_weight += G[n1][n2]['weight']
return sum_weight | 324c9d99c609da742ab71ad43714ec02d4f4d78c | 700,090 |
def is_ccw(signed_area):
"""Returns True when a ring is oriented counterclockwise
This is based on the signed area:
> 0 for counterclockwise
= 0 for none (degenerate)
< 0 for clockwise
"""
if signed_area > 0:
return True
elif signed_area < 0:
return False
else:
raise ValueError("Degeneracy: No orientation based on area") | bd0e0d92913dcb1c895c36c6e724e454e3658a6d | 700,093 |
def age_window_hit(by_predicted, by_truth):
"""
calculates the window for a given truth and checks if the prediction lies within that window
:param by_predicted: the predicted birth year
:param by_truth: the true birth year
:return: true if by_predicted within m-window of by_truth
"""
m = -0.1 * by_truth + 202.8
return int(by_truth - m) <= by_predicted <= int(by_truth + m) | 0d5903d21006f2651114affa9179cfc063b25f1d | 700,098 |
import sqlite3
def get_con_cur(db_filename):
"""Returns an open connection and cursor associated with the sqlite
database associated with db_filename.
Args:
db_filename: (str) the filename of the db to which to connect
Returns: a tuple of:
-an open connection to the sqlite database
-an open cursor associated with the connection
"""
con = sqlite3.connect(db_filename)
cur = con.cursor()
return (con, cur) | 5b99bb2df4f5a59a89d842f125a04252b86aab38 | 700,099 |
def small_straight(dice):
"""Score the given roll in the 'Small Straight' category.
"""
if sorted(dice) == [1, 2, 3, 4, 5]:
return sum(dice)
else:
return 0 | 4b88652b32efd49d5d4247ce88584011a43a0b10 | 700,100 |
def format_internal_tas(row):
"""Concatenate TAS components into a single field for internal use."""
# This formatting should match formatting in dataactcore.models.stagingModels concatTas
tas = ''.join([
row['allocation_transfer_agency'] if row['allocation_transfer_agency'] else '000',
row['agency_identifier'] if row['agency_identifier'] else '000',
row['beginning_period_of_availa'] if row['beginning_period_of_availa'].strip() else '0000',
row['ending_period_of_availabil'] if row['ending_period_of_availabil'].strip() else '0000',
row['availability_type_code'].strip() if row['availability_type_code'].strip() else ' ',
row['main_account_code'] if row['main_account_code'] else '0000',
row['sub_account_code'] if row['sub_account_code'] else '000'
])
return tas | 0a1db8f1958d3ee1f06b323f9d00de66814e2a6b | 700,102 |
def form(self, lab="", **kwargs):
"""Specifies the format of the file dump.
APDL Command: FORM
Parameters
----------
lab
Format:
RECO - Basic record description only (minimum output) (default).
TEN - Same as RECO plus the first ten words of each record.
LONG - Same as RECO plus all words of each record.
Notes
-----
Specifies the format of the file dump (from the DUMP command).
"""
command = f"FORM,{lab}"
return self.run(command, **kwargs) | 68c2ec60889bac22a8f97789acb1586c41c60a06 | 700,103 |
def percent(values, p=0.5):
"""Return a value a faction of the way between the min and max values in a list."""
m = min(values)
interval = max(values) - m
return m + p*interval | 80d3d291122d42e8b9936c4ef994e9ca1a7e98b5 | 700,104 |
def fns2dict(*functions) -> dict:
"""
Returns a dictionary of function name -> function,
given functions as *arguments.
Return:
Dict[str, Callable]
"""
return {f.__name__: f for f in functions} | 7ddfc5b5a99d016e13e66e4521d9f60b34051505 | 700,107 |
def maxabs(vals):
"""convenience function for the maximum of the absolute values"""
return max([abs(v) for v in vals]) | ec79fe4de1aa658b40a7495f484b26493e5d8fc2 | 700,109 |
def get_row_sql(row):
"""Function to get SQL to create column from row in PROC CONTENTS."""
postgres_type = row['postgres_type']
if postgres_type == 'timestamp':
postgres_type = 'text'
return row['name'].lower() + ' ' + postgres_type | 4efecaefa8b79bdeec7447138586cc93268c54df | 700,110 |
def resolve_relative_path(filename):
"""
Returns the full path to the filename provided, taken relative to the current file
e.g.
if this file was file.py at /path/to/file.py
and the provided relative filename was tests/unit.py
then the resulting path would be /path/to/tests/unit.py
"""
r = __file__.rsplit("/", 1) # poor man's os.path.dirname(__file__)
head = r[0]
if len(r) == 1 or not head:
return filename
return "%s/%s" % (head, filename) | 447df7fb94dbb3a0796c5207a99062b04dfbbf50 | 700,111 |
import torch
def normalize(x: torch.Tensor) -> torch.Tensor:
"""Normalizes a vector with its L2-norm.
Args:
x: The vector to be normalized.
Returns:
The normalized vector of the same shape.
"""
norm = x.pow(2).sum(1, keepdim=True).pow(1.0 / 2)
out = x.div(norm)
return out | f34e664a565953e46c9cb18cc66fce0dd9903bde | 700,112 |
import uuid
def label(project):
"""Label fixture for project label API resource tests."""
_id = uuid.uuid4().hex
data = {
"name": f"prjlabel{_id}",
"description": f"prjlabel1 {_id} description",
"color": "#112233",
}
return project.labels.create(data) | 61d9ca8e6a9c909f3bc97135796a2cf03de99b35 | 700,115 |
def vfid_set(session, vfid):
"""Assign a new VFDI to a session
:param session: dictionary of session returned by :func:`login`
:param vfid: new VFID to be assigned to the session
:rtype: none
"""
session['vfid'] = vfid
return "Success" | 00f17adefa2d24bfcd6a1e1f1a24acfe88873dab | 700,123 |
def bool_list_item_spec(bool_item_spec):
"""A specification for a list of boolean items."""
return {
'my_bools': {
'required': True,
'items': bool_item_spec
}
} | 8bb609015004b6eb12d182b07731368b107ec602 | 700,125 |
def class_fullname(obj):
"""Returns the full class name of an object"""
return obj.__module__ + "." + obj.__class__.__name__ | a7b5915e15122664943a181a48d3f52dff232c88 | 700,132 |
def get_model_ref(data, name_weights):
"""
Returns model reference if found by model name and model weights pair. Returns None otherwise.
data - list of tuples (model_name, model_weights_path, model_ref)
"""
for x in data:
if name_weights == x[:2]:
return x[2]
return None | 8525d77c018ec696619161afb3fbb0342ff46a27 | 700,135 |
def parse_punishment(argument):
"""Converts a punishment name to its code"""
punishments = {
"none": 0,
"note": 1,
"warn": 1,
"mute": 2,
"kick": 3,
"ban": 4
}
return punishments[argument.lower()] | 9ca9ad052c5636dd58f1b375296137de8b55712b | 700,136 |
import configparser
def get_api_config(filename):
"""
Attempt to pull in twitter app API key and secret. If the key
and secret don't exist prompt for them.
Arguments:
filename -- name of the config file to try and parse
Returns:
config_api_store -- contains the twitter API key and secret
"""
config_api_store = {}
config_twiter_api = configparser.ConfigParser()
config_twiter_api.read(filename)
# Try and find the API key and secret in the config file
try:
config_api_store["CONSUMER_KEY"] = config_twiter_api['DEFAULT']['CONSUMER_KEY']
config_api_store["CONSUMER_SECRET"] = config_twiter_api['DEFAULT']['CONSUMER_SECRET']
# If we can't find them, prompt for them and write them in to the configuration file
except KeyError:
print("Visit https://apps.twitter.com/ to create an application and aquire these values (API key and API secret)")
config_api_store["CONSUMER_KEY"] = input("Please enter a valid twitter app API key: ")
config_api_store["CONSUMER_SECRET"] = input("Please enter a valid twitter app API secret: ")
api_config_file = configparser.ConfigParser()
api_config_file['DEFAULT'] = {'CONSUMER_KEY': config_api_store["CONSUMER_KEY"], 'CONSUMER_SECRET': config_api_store["CONSUMER_SECRET"]}
with open(filename, 'w') as configfile:
api_config_file.write(configfile)
return config_api_store | 8723e77f2cc30b9f102d141dd46b66a147ee67ef | 700,139 |
def const(a, b):
"""``const :: a -> b -> a``
Constant function.
"""
return a | 1b3e03d98ab495d1795d3e89d0a57728b1dcef47 | 700,144 |
import torch
def sharpness(predictions:list, total = True):
"""
Calculate the mean size of the intervals, called the sharpness (lower the better)
Parameters
----------
predictions : list
- predictions[0] = y_pred_upper, predicted upper limit of the target variable (torch.Tensor)
- predictions[1] = y_pred_lower, predicted lower limit of the target variable (torch.Tensor)
total : bool, default = True
- When total is set to True, return overall sharpness
- When total is set to False, return sharpness along the horizon
Returns
-------
torch.Tensor
The shaprness, which depending on the value of 'total' is either a scalar (overall sharpness)
or 1d-array over the horizon, in which case it is expected to increase as we move
along the horizon. Generally, lower is better.
"""
assert len(predictions) == 2
y_pred_upper = predictions[0]
y_pred_lower = predictions[1]
if total:
return torch.mean(y_pred_upper - y_pred_lower)
else:
return torch.mean(y_pred_upper - y_pred_lower, dim=0) | 16c4fa826e9ffd4a42a3c987fc9fe6767feb9ebb | 700,146 |
import asyncio
async def _createServer(host, port):
"""
Create async server that listens host:port, reads client request and puts
value to some future that can be used then for checks
:return: reference to server and future for request
"""
indicator = asyncio.Future()
async def _handle(reader, writer):
raw = await reader.readline()
request = raw.decode("utf-8")
indicator.set_result(request)
server = await asyncio.start_server(_handle, host, port)
return server, indicator | bbd21ede887ae93ba8127aa1bb0a9ff4264b8399 | 700,150 |
def _exclude_swift_incompatible_define(define):
"""A `map_each` helper that excludes a define if it is not Swift-compatible.
This function rejects any defines that are not of the form `FOO=1` or `FOO`.
Note that in C-family languages, the option `-DFOO` is equivalent to
`-DFOO=1` so we must preserve both.
Args:
define: A string of the form `FOO` or `FOO=BAR` that represents an
Objective-C define.
Returns:
The token portion of the define it is Swift-compatible, or `None`
otherwise.
"""
token, equal, value = define.partition("=")
if (not equal and not value) or (equal == "=" and value == "1"):
return token
return None | 9ce87f52f8829636364e2671f59a0eb9e66f5a9b | 700,152 |
import re
def hex_color_code(value: str):
"""
Hex color validator
Example Result:
[#00ff00,
#fff]
"""
_hex_color_pat = r'#(?:[A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})'
return re.findall(_hex_color_pat, value) | 760db0bd1b729b62171b6964d1615546e32dbe52 | 700,154 |
import re
import logging
def parse(fqdn):
"""Parses an M-Lab FQDN into its constituent parts.
Args:
fqdn: str, an M-Lab FQDN e.g., ndt-iupui-mlab1-den05.mlab-oti.measurement-lab.org
Returns:
dict representing the constituent parts.
"""
# This regex *should* match all valid M-Lab domain names, for both nodes
# and experiments, for both v1 and v2 names. It makes use of non-capturing
# groups denoted by '(?:)'. What is interesting is that you can specify
# capturing groups inside of non-capturing groups.
regex = '(?:([a-z]+)(?:[.-]([a-z]+))?[.-])?(mlab[1-4])[.-]([a-z]{3}[0-9ct]{2})(?:\.(mlab-[a-z]+))?\.(.*)$'
matches = re.match(regex, fqdn)
if not matches or len(matches.groups()) != 6:
logging.error('Failed to parse FQDN: %s', fqdn)
return {}
parts = list(matches.groups())
fqdn_parts = {
'experiment': parts[0],
'org': parts[1],
'machine': parts[2],
'site': parts[3],
'project': parts[4],
'domain': parts[5],
}
return fqdn_parts | a05a7125b1818668dc681be460a326c2a5a2f065 | 700,155 |
def buffer_type(request):
"""
Fixture that yields types that support the buffer protocol.
"""
return request.param | afc79bf3ac5bfeb53fe9cb8de707b2f0a93ae6f8 | 700,156 |
def minargmin(sequence):
"""Returns the minimum value and the first index at which it can be
found in the input sequence."""
best = (None, None)
for (i, value) in enumerate(sequence):
if best[0] is None or value < best[0]:
best = (value, i)
return best | cf66ccd0dc76d3530fe7b2503bb3ed3b31c7ba61 | 700,157 |
def _is_eqsine(opts):
"""
Checks to see if 'eqsine' option is set to true
Parameters
----------
opts : dict
Dictionary of :func:`pyyeti.srs.srs` options; can be empty.
Returns
-------
flag : bool
True if the eqsine option is set to true.
"""
if "eqsine" in opts:
return opts["eqsine"]
return False | 3515a75eb2c0976198700e1fe068cd15b0017d8f | 700,159 |
from typing import Callable
from typing import List
def generate_definition(cls: Callable) -> List[str]:
"""Generates a function signature from a pyDantic class object"""
# Fetch parameters
params = cls.__annotations__
return [
f"{name}: {data_type},"
if "Optional" not in data_type
else f"{name}: {data_type} = None,"
for name, data_type in params.items()
] | 2e0a40875f78eb07733fa94fbadbd1d5ee06f2c7 | 700,160 |
import shutil
def get_archive_name_and_format_for_shutil(path):
"""Returns archive name and format to shutil.make_archive() for the |path|.
e.g., returns ('/path/to/boot-img', 'gztar') if |path| is
'/path/to/boot-img.tar.gz'.
"""
for format_name, format_extensions, _ in shutil.get_unpack_formats():
for extension in format_extensions:
if path.endswith(extension):
return path[:-len(extension)], format_name
raise ValueError(f"Unsupported archive format: '{path}'") | 152d68ea9613d7253f78c37ce85758a2c8bc67f9 | 700,162 |
def counting_sort(values, max_value):
"""Sorts integers using the Counting Sort algorithm.
Args:
values: iterable, contains the integers to sort
should be between 0 and max_value
max_value: maximum value the numbers can take
Returns:
a sorted list of the numbers
"""
counting_list = [0] * (max_value + 1)
values_sorted = []
for number in values:
counting_list[number] += 1
for number, amount in enumerate(counting_list):
for _ in range(amount):
values_sorted.append(number)
return values_sorted | fccf1b91bb2c300d22e316057b11dab3bb0ee86f | 700,170 |
def find_missing_integer(lst):
"""Returns the first missing integer in an ordered list.
If not found, returns the next integer.
"""
try:
return sorted(set(range(lst[0], lst[-1])) - set(lst))[0]
except:
return max(lst) + 1 | 1e8f25f1670933cf57ae042742c175aac7d905fb | 700,173 |
import logging
def parse_csv_data(csv_filename: str) -> list:
"""Takes in a csv filename and returns a list with each item being a new line of the file.
:param csv_filename: The name of a csv filename, '.csv' appendix is optional
:type csv_filename: str
:return: A list of strings with each item being a single line of the csv file
:rtype: list
"""
logging.info("starting parse_csv_data function")
csv_filename = str(csv_filename) # Makes sure input is a string
if csv_filename[-4:] != ".csv": # Checks if the filename has the
csv_filename += ".csv" # '.csv' appendix and appends it if necessary
with open(csv_filename, "r", encoding='utf-8') as data:
logging.info("parse_csv_data function finished")
return [word.split()[0] for word in data.read().splitlines()]
# returns data as a list of strings for each row in the csv file | ef388507534b6e7e1b82cf5e5f0036e9dd5819dd | 700,175 |
def sort_return_tuples(response, **options):
"""
If ``groups`` is specified, return the response as a list of
n-element tuples with n being the value found in options['groups']
"""
if not response or not options.get("groups"):
return response
n = options["groups"]
return list(zip(*(response[i::n] for i in range(n)))) | 14b49449d8fda6050bf4223365ba0f93918fe58a | 700,177 |
def _common_prefix(string_list):
"""
Given a list of pathnames, returns the longest common leading component
"""
if not string_list:
return ""
min_str = min(string_list)
max_str = max(string_list)
for i, c in enumerate(min_str):
if c != max_str[i]:
return min_str[:i]
return min_str | 4360e712c6c4d3d650a226c1fe7f3a4941861513 | 700,178 |
def qualify(func: object) -> str:
"""Qualify a function."""
return ".".join((func.__module__, func.__qualname__)) | bfda7050ff94f407a2a0d4b00b87ecb0370e9110 | 700,179 |
def dimension(dim: float, tol: int = 0, step: float = 0.4) -> float:
"""
Given a dimension, this function will round down to the
next multiple of the dimension. An additional parameter
`tol` can be specified to add `tol` additional steps to
add a tolerance to accommodate for shrinking.
"""
# Add small value to reduce risk of the remainder being zero.
dim += 1e-10
return (dim // step) * step + tol * step | a63b84bbc73d25da1c86c9919f61bd32071d92f9 | 700,181 |
def shortid(obsid):
"""
Compact format for the observation id, like QPT
Parameters
----------
obsid : string
Program id string
Returns
-------
shortid : string
Compact format
"""
idvals = obsid.split('-')
shortid = idvals[0][-1] + idvals[1][2:] + '-' + idvals[2] + '-' + idvals[3] + '[' + idvals[4] + ']'
return shortid | cb163886e7612fa46d016f2037b110526967c61f | 700,190 |
from typing import OrderedDict
def job_prep_release_status_list_table_format(result):
"""Format job prep-release-status list as a table."""
table_output = []
for item in result:
table_row = OrderedDict()
table_row['Pool Id'] = item['poolId']
table_row['Node Id'] = item['nodeId']
table_row['Job Prep State'] = item['jobPreparationTaskExecutionInfo']['state'] \
if item['jobPreparationTaskExecutionInfo'] else ""
table_row['Job Release State'] = item['jobReleaseTaskExecutionInfo']['state'] \
if item['jobReleaseTaskExecutionInfo'] else ""
table_output.append(table_row)
return table_output | 5b93ade505b166cd45539bfde49e8f51f2ffb9fb | 700,198 |
def rinko_p_prime(N, t, A, B, C, D, E, F, G, H):
"""
Per RinkoIII manual: 'The film sensing the water is affect by environment
temperature and pressure at the depth where it is deployed. Based on experiments,
an empirical algorithm as following is used to correct data dissolved oxygen.'
Parameters
----------
N : array-like
Raw instrument output
t : array-like
Temperature [degC]
A-H : float
Calibration parameters
"""
p_prime = A / (1 + D * (t - 25)) + B / ((N - F) * (1 + D * (t - 25)) + C + F)
return p_prime | 482f2286819af3d147cde4dd258c36c04624a6e8 | 700,199 |
def _index(i, size, Cartesian=True):
"""If Cartesian=True, index 0 is swapped with index 1."""
if Cartesian:
if i == 1:
return 0
if i == 0:
if size >= 2:
return 1
return i | ace0ff4431b64b545f2857eddce85d555a0cf5f3 | 700,200 |
def nmap(value, fr=(0, 1), to=(0, 1)):
"""
Map a value from a two-value interval into another two-value interval.
Both intervals are `(0, 1)` by default. Values outside the `fr` interval
are still mapped proportionately.
"""
value = (value - fr[0]) / (fr[1] - fr[0])
return to[0] + value * (to[1] - to[0]) | d7968d7661c2535f5c820087b79b7a8e3667e8e8 | 700,202 |
def get_best_outputs(problem_dir, problem, user):
"""
Gets outputs of best submission.
:param problem_dir: main directory of submissions
:param problem: id of problem
:param user: user who wants to see submission for problem
:return: -1 if no file found, otherwise array of best outputs
"""
outputs = []
try:
for i in range(10):
outputs.append('')
f = open('{0}/best_out/out_{1}_{2}_{3}'.format(problem_dir, problem, user, i))
for line in f:
outputs[i] += line.strip()
f.close()
except IOError:
return -1
return outputs | a012b849ec76056067a75a71d80ea2911b2b91fc | 700,203 |
def computeFraction(feature_1, feature_2 ):
"""
Parameters:
Two numeric feature vectors for which we want to compute a ratio
between
Output:
Return fraction or ratio of feature_1 divided by feature_2
"""
fraction = 0.
if feature_1 == "NaN":
fraction = 0.0
elif feature_2 == "NaN":
fraction = 0.0
else:
fraction = int(feature_1) / float(feature_2)
return fraction | fbce06ab1fea604a3c0e4f0e427dd6560acf80fe | 700,208 |
import base64
import json
def encode_transaction(value):
"""Encode a transaction (dict) to Base64."""
return base64.b64encode(json.dumps(value).encode('utf8')).decode('utf8') | 066fa737b9c2d474be500bf2006ce43adea8d4f8 | 700,212 |
import random
def shuffle_sequence(sequence):
"""Shuffle sequence.
Parameters
----------
sequence : str
Sequence to shuffle.
Returns
-------
str
Shuffled sequence.
"""
shuffled_sequence = list(sequence)
random.shuffle(shuffled_sequence)
return "".join(shuffled_sequence) | 1acb94516a6ed491359538f2016a22fc6d613499 | 700,214 |
def enable_cloud_admin_access(session, confirm, return_type=None, **kwargs):
"""
Enables the ability of a storage cloud administrator to access the VPSA
GUI of this VPSA to assist in troubleshooting. This does not grant access
to any volume data. Enabled by default.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type confirm: bool
:param confirm: If True, cloud admin access will be enabled. This is a
safeguard for this function since it requires no other arguments.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
if not confirm:
raise ValueError('The confirm parameter is not set to True - '
'cloud admin access will not be enabled.')
path = '/api/users/admin_access/enable.json'
return session.post_api(path=path, return_type=return_type, **kwargs) | 5444ba9f4c917a72c666908bcd4db3b8527d596c | 700,215 |
import torch
def align(src_tokens, tgt_tokens):
"""
Given two sequences of tokens, return
a mask of where there is overlap.
Returns:
mask: src_len x tgt_len
"""
mask = torch.ByteTensor(len(src_tokens), len(tgt_tokens)).fill_(0)
for i in range(len(src_tokens)):
for j in range(len(tgt_tokens)):
if src_tokens[i] == tgt_tokens[j]:
mask[i][j] = 1
return mask | 0408ae7148c4bed9e3c24b71acdbd1a182dd6e69 | 700,220 |
from pathlib import Path
from typing import Dict
import yaml
def _load_yaml_doc(path: Path) -> Dict:
"""Load a yaml document."""
with open(path, "r") as src:
doc = yaml.load(src, Loader=yaml.FullLoader)
return doc | 4b049909c5e6eac6e7772b3311f928ccd6cf528c | 700,222 |
def getFloat (Float):
"""
Float input verification
usage: x = getFloat ('mensage to display ')
"""
while True:
try:
user_input = float(input(Float))
return user_input
except ValueError:
print('Use only numbers and separete decimals with point') | 27d9128441cadd00627d88bbfdb45144bf5a55f3 | 700,224 |
import warnings
def weight_list(spam, weights, warn=True):
""" Returns weighted list
Args:
spam(list): list to multiply with weights
weights (list): of weights to multiply the respective distance with
warn (bool): if warn, it will warn instead of raising error
Returns:
(list): weighted list
"""
if warn:
if len(weights) > len(spam):
warnings.warn("The list of weights is longer than the list, last weights are not used!!", RuntimeWarning)
if len(weights) > len(spam):
warnings.warn("The list of weights is shorter than the list, last items are not weighted!!", RuntimeWarning)
try:
for index, item in enumerate(spam):
spam[index] = float(spam[index]) * float(weights[index])
except IndexError:
pass
return spam | 6b2258675a5c346c50ecc8f7d8aba466a7b216ef | 700,226 |
def __extract_digits__(string):
"""
Extracts digits from beginning of string up until first non-diget character
Parameters
-----------------
string : string
Measurement string contain some digits and units
Returns
-----------------
digits : int
Digits at start of string
"""
i = 0
while string[i].isdigit():
i += 1
return int(string[0:i]) | 6613e56dc33c88d9196c2ec95412155ad0aaf382 | 700,229 |
def status_in_range(value: int, lower: int = 100,
upper: int = 600) -> bool:
"""
Validates the status code of a HTTP call is within the given boundary,
inclusive.
"""
return value in range(lower, upper+1) | 43bb6f4824b7e42b6620e3ddff853aa65713f145 | 700,237 |
def _float_to_str(x):
"""
Converts a float to str making. For most numbers this results in a
decimal representation (for xs:decimal) while for very large or very
small numbers this results in an exponential representation suitable for
xs:float and xs:double.
"""
return "%s" % x | cb795b9c4778b9a3fda7398024166d86d8458bd3 | 700,239 |
def unique_cluster_indices(cluster_indx):
"""
Return a unique list of cluster indices
:param cluster_indx: Cluster index list of ClusterExpansionSetting
"""
unique_indx = []
for symmgroup in cluster_indx:
for sizegroup in symmgroup:
for cluster in sizegroup:
if cluster is None:
continue
for subcluster in cluster:
for indx in subcluster:
if indx not in unique_indx:
unique_indx.append(indx)
return unique_indx | 36bc5a287d49c6abbd552b9edc0e72675ba82eca | 700,241 |
def _calculate_atr(atr_length, highs, lows, closes):
"""Calculate the average true range
atr_length : time period to calculate over
all_highs : list of highs
all_lows : list of lows
all_closes : list of closes
"""
if atr_length < 1:
raise ValueError("Specified atr_length may not be less than 1")
elif atr_length >= len(closes):
raise ValueError("Specified atr_length is larger than the length of the dataset: " + str(len(closes)))
atr = 0
for i in range(len(highs)-atr_length, len(highs)):
high = highs[i]
low = lows[i]
close_prev = closes[i-1]
tr = max(abs(high-low), abs(high-close_prev), abs(low-close_prev))
atr += tr
return atr/atr_length | f5878eda22c09fa8c428122bd013f9c6088ea0f8 | 700,242 |
import torch
from typing import Tuple
def get_tot_objf_and_finite_mask(tot_scores: torch.Tensor, reduction: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""Figures out the total score(log-prob) over all successful supervision segments
(i.e. those for which the total score wasn't -infinity).
Args:
tot_scores: a Torch tensor of shape (num_segments,) containing total scores
from forward-backward
reduction: a reduction type ('mean', 'sum' or 'none')
Returns:
Returns a tuple of 2 scalar tensors: (tot_score, finite_mask)
where finite_mask is a tensor containing successful segment mask.
Based on get_tot_objf_and_num_frames
from https://github.com/k2-fsa/snowfall/blob/master/snowfall/objectives/common.py
"""
finite_mask = ~torch.isnan(tot_scores) & torch.ne(tot_scores, -float("inf"))
if reduction == "mean":
tot_scores = tot_scores[finite_mask].mean()
elif reduction == "sum":
tot_scores = tot_scores[finite_mask].sum()
return tot_scores, finite_mask | d09b95056c2635be22a7db1baaa64b6667955007 | 700,245 |
def traverse_file_structure(current, function, **inner_function_args):
"""Recursively traverses the given folder and applies the function to every file that it finds.
:param current: Source folder
:type current: stibnite.file_operations.FolderType
:param function: The function that will be applied to files of the current folder
:type function: function
:param inner_function_args: Arguments of the inner function
:type inner_function_args: dictionary of sting to object
:return: The same source folder
:rtype: stibnite.file_operations.FolderType
"""
if len(current.folders) > 0:
for folder in current.folders.keys():
traverse_file_structure(current.get_element(folder), function, **inner_function_args)
if len(current.files) > 0:
for file in current.files.keys():
current.files[file] = function(current.files[file], **inner_function_args)
return current | 93312cb2792a27c1441a3794ca261bc67c22171a | 700,252 |
def rdist(x, y):
"""Reduced Euclidean distance.
Parameters
----------
x: array of shape (embedding_dim,)
y: array of shape (embedding_dim,)
Returns
-------
The squared euclidean distance between x and y
"""
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return result | 3caa871145b8ca68c0cd2bd23a183967b7b5b7cc | 700,261 |
import glob
def find_file(path):
"""
Search file
Parameters
----------
path : str
Path and pattern to find files.
Returns
-------
str or list of str
List of files.
"""
file_path = glob.glob(path)
if len(file_path) == 0:
raise ValueError("!!! No files found in: {}".format(path))
for i in range(len(file_path)):
file_path[i] = file_path[i].replace("\\", "/")
return sorted(file_path) | a5176d5caa5cef6ca2724c79e3f920cfc96aea0c | 700,262 |
import hashlib
import json
def _hasher(obj):
"""Computes non-cryptographic hash of an object."""
h = hashlib.md5(json.dumps(obj).encode())
return h.hexdigest() | 14262878ac53af8f49f7fef4c028be36e4370725 | 700,265 |
def length_str(msec: float) -> str:
"""
Convert a number of milliseconds into a human-readable representation of
the length of a track.
"""
seconds = (msec or 0)/1000
remainder_seconds = seconds % 60
minutes = (seconds - remainder_seconds) / 60
if minutes >= 60:
remainder_minutes = minutes % 60
hours = (minutes - remainder_minutes) / 60
return '%i:%02d:%02d' % (hours, remainder_minutes, remainder_seconds)
else:
return '%i:%02d' % (minutes, remainder_seconds) | 7cf6674d68d118c78a2953b3fef873633673bbf0 | 700,272 |
def row_sum(lst):
""" Sum of non-missing items in `lst` """
return sum(int(x) for x in lst if x > -1) | 5fabe4d3487e502dcb82dd452854de3777e1a5a8 | 700,276 |
def compress_dataframe_time_interval(processed_df, interval):
"""
Resamples dataframe according to time interval. If data is originally in 1
minute intervals the number of rows can be reduced by making the interval 15 minutes.
To maintain data quality, an average is taken when compressing the dataframe.
Args:
processed_df: Pandas dataframe containing a "Time" column with date ranges
interval: Integer representing the new date range interval for the compressed dataframe
Returns:
Pandas dataframe with compressed time interval
"""
resampled_df = processed_df.resample('{}min'.format(interval), on='Time').mean()
return resampled_df | ffbb35719e33f445ba4b5c91acf8a069cd4902a6 | 700,277 |
def iseast(bb1, bb2, north_vector=[0,1,0]):
""" Returns True if bb1 is east of bb2
For obj1 to be east of obj2 if we assume a north_vector of [0,1,0]
- The min X of bb1 is greater than the max X of bb2
"""
#Currently a North Vector of 0,1,0 (North is in the positive Y direction)
#is assumed. At some point this should be updated to allow for non-traditional
#North to be taken and to allow for directions based on perspective.
if north_vector != [0,1,0]:
raise NotImplementedError
bb1_min, _ = bb1
_, bb2_max = bb2
x1,y1,z1 = bb1_min
x2,y2,z2 = bb2_max
return x1 > x2 | 9764d373d14530fca2d26d8c7855cc0620e14496 | 700,278 |
import itertools
def concat_list(in_list: list) -> list:
"""Concatenate a list of list into a single list."""
return list(itertools.chain(*in_list)) | 5a58e8e1899fce99f8dabe681206507ae8ad4b8c | 700,279 |
def is_sale(line):
"""Determine whether a given line describes a sale of cattle."""
return len(line) == 5 | e4ff4ae2ea7ea14a2975eaf87852eed2fad0abff | 700,280 |
def _is_recipe_fitted(recipe):
"""Check if a recipe is ready to be used.
Fitting a recipe consists in wrapping every values of `fov`, `r`, `c` and
`z` in a list (an empty one if necessary). Values for `ext` and `opt` are
also initialized.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
Returns
-------
_ : bool
Indicates if the recipe is fitted or not
"""
# all keys should be initialized in the new recipe, with a list or a string
for key in ['fov', 'r', 'c', 'z']:
if key not in recipe or not isinstance(recipe[key], list):
return False
for key in ['ext', 'opt']:
if key not in recipe or not isinstance(recipe[key], str):
return False
if 'pattern' not in recipe or not isinstance(recipe['pattern'], str):
return False
return True | 77e438dd00ac5606c52c88518c6932a09dff75df | 700,281 |
def view_event(user, event):
"""
Check whether a user may view a specified event.
:param User user:
:param Event event:
:return: bool
"""
if event is None:
return None
return user.has_perm("booking.view_hidden_events") or event.visible is True | 0aca52c9a60449ab0711a2291c5f12f42c8b3f96 | 700,282 |
def parse_filename(fname, return_ext=True, verbose=False):
"""
Parses `fname` (in BIDS-inspired format) and returns dictionary
Parameters
----------
fname : str os os.PathLike
Filename to parse
return_ext : bool, optional
Whether to return extension of `fname` in addition to key-value dict.
Default: False
verbose : bool, optional
Whether to print status messages. Default: False
Returns
-------
info : dict
Key-value pairs extracted from `fname`
ext : str
Extension of `fname`, only returned if `return_ext=True`
"""
try:
base, *ext = fname.split('.')
fname_dict = dict([
pair.split('-') for pair in base.split('_') if pair != 'feature'
])
except ValueError:
print('Wrong filename format!')
return
if verbose:
print(fname_dict)
if return_ext:
return fname_dict, '.'.join(ext)
return fname_dict | 1512b50fa6d07a0bcbb69831418a935f28abe2d8 | 700,283 |
def get_ls(omega_list):
"""Return the array of the Solar longitude of each OMEGA/MEx observation in omega_list.
Parameters
==========
omega_list : array of OMEGAdata
The input array of OMEGA observations.
Returns
=======
ls : ndarray
The array of the omega_list Ls.
"""
ls = []
for omega in omega_list:
ls.append(omega.ls)
return ls | c8be1927a55ff9aac0134d52691b3b2bdd049724 | 700,284 |
import random
def miller_rabin_primality_testing(n):
"""Calculates whether n is composite (which is always correct) or prime
(which theoretically is incorrect with error probability 4**-k), by
applying Miller-Rabin primality testing.
For reference and implementation example, see:
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
:param n: Integer to be tested for primality.
:type n: int
:param k: Number of rounds (witnesses) of Miller-Rabin testing.
:type k: int
:return: False if the number is composite, True if it's probably prime.
:rtype: bool
"""
bitsize = n.bit_length()
# Set number of rounds.
if bitsize >= 1536:
k = 3
elif bitsize >= 1024:
k = 4
elif bitsize >= 512:
k = 7
else:
# For smaller bitsizes, set arbitrary number of rounds.
k = 10
# prevent potential infinite loop when d = 0
if n < 2:
return False
# Decompose (n - 1) to write it as (2 ** r) * d
# While d is even, divide it by 2 and increase the exponent.
d = n - 1
r = 0
while not (d & 1):
r += 1
d >>= 1
# Test k witnesses.
for _ in range(k):
# Generate random integer a, where 2 <= a <= (n - 2)
a = random.randint(2, n - 2)
x = pow(a, d, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == 1:
# n is composite.
return False
if x == n - 1:
# Exit inner loop and continue with next witness.
break
else:
# If loop doesn't break, n is composite.
return False
return True | 6f7263f261bf20b851aa40e0c616a68e9936f16d | 700,289 |
import functools
from datetime import datetime
def busy_try(delay_secs: int, ExceptionType=Exception):
"""
A decorator that repeatedly attempts the function until the timeout specified
has been reached. This is different from timeout-related functions, where
the decorated function is called only *once*.
Because the decorated function is called repeatedly, the delay period should
not be long. Use `timeout` if you hope to avoid occupying system resources.
Parameters
----------
delay_secs :
time delayed, in seconds.
ExceptionType : optional
exception caught when the function attempted raises an error. Default
to `Exception`.
Returns
-------
The return value of the decorated function, if any function call is
successful; `None`, otherwise.
Raises
------
TimeoutError :
when the function has tried `delayed_secs` seconds and no function call
succeeds.
"""
def _busy_try(func):
@functools.wraps(func)
def busy_try_wrapper(self, *args, **kwargs):
start = datetime.now()
while True:
try:
return func(self, *args, **kwargs)
except ExceptionType:
continue
finally:
now = datetime.now()
if (now - start).seconds > delay_secs:
raise TimeoutError
break
return None
return busy_try_wrapper
return _busy_try | 0d005935ffa8b7f594da692edfa39ec4342ed4e1 | 700,290 |
def MakeDeclarationString(params):
"""Given a list of (name, type, vectorSize) parameters, make a C-style
parameter declaration string.
Ex return: 'GLuint index, GLfloat x, GLfloat y, GLfloat z'.
"""
n = len(params)
if n == 0:
return 'void'
else:
result = ''
i = 1
for (name, type, vecSize) in params:
result = result + type + ' ' + name
if i < n:
result = result + ', '
i += 1
#endfor
return result
#endif | 1b009bce0d6c25b25e4830b3a021dda877519ea3 | 700,291 |
def make_text_list(postings_dict, first_n_postings=100):
"""
Extract the texts from postings_dict into a list of strings
Parameters:
postings_dict:
first_n_postings:
Returns:
text_list: list of job posting texts
"""
text_list = []
for i in range(0, first_n_postings+1):
# Since some number could be missing due to errors in scraping,
# handle exception here to ensure error free
try:
text_list.append(postings_dict[str(i)]['posting'])
except:
continue
return text_list | 0d2a4e0f2d904b246942508e03cfd97cf5d43ea0 | 700,294 |
def one_or_more(amount, single_str, multiple_str):
"""
Return a string which uses either the single or the multiple form.
@param amount the amount to be displayed
@param single_str the string for a single element
@param multiple_str the string for multiple elements
@return the string representation
"""
if amount == 1:
ret_str = single_str
else:
ret_str = multiple_str
return ret_str.format(amount) | 8c3495614cd8c718e243383bcc72cc7daa8fa286 | 700,300 |
import torch
def accuracy(output, target, topk=(1,), exact=False):
"""
Computes the top-k accuracy for the specified values of k
Args:
output (ch.tensor) : model output (N, classes) or (N, attributes)
for sigmoid/multitask binary classification
target (ch.tensor) : correct labels (N,) [multiclass] or (N,
attributes) [multitask binary]
topk (tuple) : for each item "k" in this tuple, this method
will return the top-k accuracy
exact (bool) : whether to return aggregate statistics (if
False) or per-example correctness (if True)
Returns:
A list of top-k accuracies.
"""
with torch.no_grad():
# Binary Classification
if len(target.shape) > 1:
assert output.shape == target.shape, \
"Detected binary classification but output shape != target shape"
return [torch.round(torch.sigmoid(output)).eq(torch.round(target)).float().mean()], [-1.0]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
res_exact = []
for k in topk:
correct_k = correct[:k].view(-1).float()
ck_sum = correct_k.sum(0, keepdim=True)
res.append(ck_sum.mul_(100.0 / batch_size))
res_exact.append(correct_k)
if not exact:
return res
else:
return res_exact | cc2194bb72460ff39e3648e173d52875f64abeab | 700,301 |
import itertools
def enumerate_hyperparameter_combinations(parameter_to_options):
"""
Returns a list of dictionaries of all hyperparameter options
:param parameter_to_options: a dictionary that maps parameter name to a list of possible values
:return: a list of dictionaries that map parameter names to set values
"""
keys, values = zip(*parameter_to_options.items())
return [dict(zip(keys, v)) for v in itertools.product(*values)] | 8665ef66b7cb1467599ff0a56f47ac60042e0e9a | 700,302 |
def get_new_pvars(opvs, epvs):
"""Returns a list of new projection variables from a list of old
PVar's opvs, based on a list of existing PVar's epvs.
Args:
opvs: Old projection variables.
evps: Existing projection variables.
Returns:
A list of projection variables.
"""
if len(epvs) == 0: return opvs
n = max(epvs) + 1
return [x+n for x in range(len(opvs))] | b344e4fb60daa0452c944065b3164d90e7698a21 | 700,303 |
def suffix(pattern, k):
"""we define SUFFIX(Pattern) as the last (k-1)-mers in a k-mer Pattern"""
return pattern[-(k - 1):] | d6cec61ba024f551071a6ba9d6409963ff2ffe7d | 700,305 |
def runSingleThreaded(runFunction, arguments):
"""
Small overhead-function to iteratively run a function with a pre-determined input arguments
:param runFunction: The (``partial``) function to run, accepting ``arguments``
:param arguments: The arguments to passed to ``runFunction``, one run at a time
:return: List of any results produced by ``runFunction``
"""
results = []
for arg in arguments:
results.append(runFunction(*arg))
return results | 9cdc34f5e44751667ec5a3bddcf2958b3302f4d1 | 700,306 |
def clip(num, num_min=None, num_max=None):
"""Clip to max and/or min values. To not use limit, give argument None
Args:
num (float): input number
num_min (float): minimum value, if less than this return this num
Use None to designate no minimum value.
num_max (float): maximum value, if more than this return this num
Use None to designate no maximum value.
Returns
float: clipped version of input number
"""
if num_min is not None and num_max is not None:
return min(max(num, num_min), num_max)
elif num_min is not None:
return max(num, num_min)
elif num_max is not None:
return min(num, num_max)
else:
return num | fe46f5a200ab24d517c57c5e1d93d4bf86192e13 | 700,310 |
def grid_coordinates(roi, x_divisions, y_divisions, position):
"""
Function that returns the grid coordinates of a given position.
To do so it computes, for a given area and taking into account
the number of x and y divisions which is the total amount of cells.
After that it maps the given position to the cell it falls inside and
returns the coordinates of that cell. Finally, it is assumed that
the position is always inside the grid
:param roi: region of interest to be gridezied
:param x_divisions:number of divisions in the x axis
:param y_divisions:number of divisions in the y axis
:param position: position to transform into grid coordinates
"""
px_per_x_division = float(roi[0][1]-roi[0][0])/x_divisions
px_per_y_division = float(roi[1][1]-roi[1][0])/y_divisions
x_in_grid = position[0] - roi[0][0]
y_in_grid = position[1] - roi[1][0]
return (int(x_in_grid/px_per_x_division), int(y_in_grid/px_per_y_division)) | 1db6e8ed8b1c0abde965e3a5536867ae32ac2228 | 700,314 |
def splitmessage(message):
"""Returns a tuple containing the command and arguments from a message.
Returns None if there is no firstword found
"""
assert isinstance(message, str)
words = message.split()
if words:
return (words[0], words[1:]) | d8db56ef55097f9f8858de95ee3d7799c0dc127e | 700,315 |
def dec_to_str(total):
"""Converts decimals to strings for more natural speech."""
if total == 0.125:
return "an eighth"
elif total == 0.25:
return "a quarter"
elif total == 0.5:
return "a half"
elif total == 0.75:
return "three quarters"
else:
if total % 1 == 0:
return str(int(total))
elif total % 0.5 == 0:
return "{0:.1f}".format(total)
else:
return "{0:.2f}".format(total) | 05e170eb21f5f0b188a32a634b5c617536c64a11 | 700,317 |
def loadWorld(worldName, store):
"""
Load an imaginary world from a file.
The specified file should be a Python file defining a global callable named
C{world}, taking an axiom L{Store} object and returning an
L{ImaginaryWorld}. This world (and its attendant L{Store}) should contain
only a single L{Actor} instance, which will be used for the player
character.
@param worldName: The path name to a Python file containing a world.
@type worldName: L{str}
@param store: The axiom data store to read the world into.
@type store: L{Store}
"""
with open(worldName, "rb") as f:
codeobj = compile(f.read(), worldName, "exec")
namespace = {}
eval(codeobj, namespace, namespace)
return namespace['world'](store) | 801c98b5be82e9d5c9ca19db9adbe3078f500674 | 700,322 |
import math
def tan(x):
"""Get tan(x)"""
return math.tan(x) | 112b52faee2f08262515086fe59b2ff978001200 | 700,324 |
from typing import Any
import json
def is_jsonable(x: Any):
"""
Check if an object is json serializable.
Source: https://stackoverflow.com/a/53112659
"""
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False | 3735de8bd1940d84c185142c0a4387366d7cd9c2 | 700,329 |
def _linear_transform(src, dst):
""" Parameters of a linear transform from range specifications """
(s0, s1), (d0,d1) = src, dst
w = (d1 - d0) / (s1 - s0)
b = d0 - w*s0
return w, b | 7f55a2617721fdefcc724bcb8ce9f880d7bcd846 | 700,332 |
def feature_within_s(annolayer, list_of_s):
"""Extracts all <annolayer> from all sentence-elements in list_of_s;
returns a flat list of <annolayer>-elements;
"""
list_of_lists_of_feature = [s.findall('.//' + annolayer) for s in list_of_s]
list_of_feature = [element for sublist in list_of_lists_of_feature for element in sublist]
return(list_of_feature) | df6ed3603381a4b8d2ea12fc483fa37ea3068372 | 700,333 |
def processPostMessage(post_message, status_type):
"""
Check if the message is >500 characters
If it is, shorten it to 500 characters
Ouput: a tuple of strings: read_more (empty if not shortened), post text
"""
if len(post_message) > 500:
post_message = post_message[:500]
last_space = post_message.rfind(' ')
post_message = post_message[:last_space]
post_message += "..."
if status_type == 'added_video':
return "\nЧитати повністю і дивитися відео:\n", post_message
return "\nЧитати далі:\n", post_message
return "", post_message | 2d21ec04ef863b57f95bb4b8256f2195559e6f8e | 700,335 |
def data_for_keys(data_dict, data_keys):
"""
Return a dict with data for requested keys, or empty strings if missing.
"""
return {x: data_dict[x] if x in data_dict else '' for x in data_keys} | b844ae2dba804e179e7e8dd08166f392a90e7f7a | 700,336 |
def patch_set_approved(patch_set):
"""Return True if the patchset has been approved.
:param dict patch_set: De-serialized dict of a gerrit change
:return: True if one of the patchset reviews approved it.
:rtype: bool
"""
approvals = patch_set.get('approvals', [])
for review in approvals:
if (review['type'] == 'Approved'
or (review['type'] == 'Workflow'
and int(review['value']) > 0)):
return True
return False | af7e56be45e537be9308f0031fe3923425afd48c | 700,343 |
def message_has_label(message, label):
"""Tests whether a message has a label
Args: message: message to consider.
label: label to check.
Returns: True/False.
"""
return label['id'] in message.get('labelIds', []) | 634808b2533469daa42779a3563f127d06ce1b14 | 700,348 |
def transform_case(input_string):
"""
Lowercase string fields
"""
return input_string.lower() | 4d15f33781c1b58d3a04a52fcc8e5f5042e33bdf | 700,352 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.