content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import pathlib
def get_path_name(name: pathlib.Path | str | None) -> pathlib.Path | str:
"""Get the full name from a directory name or path and a file name or path.
Args:
name (pathlib.Path | str | None): Directory name or file name.
Returns:
str: Full file name.
"""
if name is None:
return ""
return pathlib.Path(name) | 0eb4abd9cf82f022d0a0c3d380fab3e750c183d3 | 25,184 |
def getFirstLineContaining(lines, keyLineStart):
"""
Given a split abfinfo text, return a stripped value for the given key.
"""
for line in lines:
if line.startswith(keyLineStart):
line = line.replace(keyLineStart, "")
line = line.strip()
return line
return None | ea9128d51d0d32075c212f3f11306b8cbb9f0fb6 | 25,189 |
def tokenize(name):
"""Turn a string into a list of character tokens."""
#name = name.lower() #ignore cases
characters = [c for c in name] #make a list of characters
characters = ['<START>'] + characters + ['<STOP>'] #add beginning and end token
return characters | 9ed489baf16b6e91f67ed74c1f77c3c40205147e | 25,190 |
def hex(n):
"""render the given number using upper case hex, like: 0x123ABC"""
return "0x%X" % n | 2759c53b50214b138da8c986820b3acc61ed0ab2 | 25,191 |
def es_index_successful(response):
"""
Test if an Elasticsearch client ``index`` method response indicates a
successful index operation. The response parameter should be a dictionary
with following keys:
.. sourcecode:: python
{
'_shards': {
'total': 2,
'failed': 0,
'successful': 1
},
'_index': u'billing-2015-12',
'_type': u'billing',
'_id': u'AVOmKFXgF_o3S6_4PkP1',
'_version': 1,
'created': True
}
According to `Elasticsearch Index API <https://www.elastic.co/guide/en/
elasticsearch/reference/current/docs-index_.html>`, an index operation is
successful in the case ``successful`` is at least 1.
:rtype: bool
"""
return response.get('_shards', {}).get('successful', 0) >= 1 | e76e84d8461078da03f820089297ea9ca28f0911 | 25,195 |
def verify_header(filename):
"""
verify the signature file header (pastadb)
"""
with open(filename, "rb") as f:
if f.read(7) == "\x70\x61\x73\x74\x61\x64\x62":
return True
return False | 5958179b0656ac7b4e8681e20d2746176592da3a | 25,196 |
import requests
def _request_limit_reached(exception):
""" Checks if exception was raised because of too many executed requests. (This is a temporal solution and
will be changed in later package versions.)
:param exception: Exception raised during download
:type exception: Exception
:return: `True` if exception is caused because too many requests were executed at once and `False` otherwise
:rtype: bool
"""
return isinstance(exception, requests.HTTPError) and \
exception.response.status_code == requests.status_codes.codes.TOO_MANY_REQUESTS | 67280ea48cce3238d0c574ec8ad1b13719df4990 | 25,197 |
import tempfile
def make_environment(case, method='HEAD', path=u'/', qs=None, host=None,
server_name='localhost', port=80, script_name='',
url_scheme='http', headers={},
content_type=None, content_length=None,
http_protocol='http/1.1'):
"""
Build a WSGI environment for use in testing
"""
if url_scheme == 'https' and port == 80:
port = 443
environment = {
'wsgi.version': (1, 0),
'wsgi.errors': tempfile.NamedTemporaryFile(),
'wsgi.input': tempfile.NamedTemporaryFile(),
'wsgi.multithreaded': False,
'wsgi.multiprocess': False,
'wsgi.run_once': True,
'wsgi.url_scheme': url_scheme,
'SERVER_PROTOCOL': http_protocol,
'REQUEST_METHOD': method,
'PATH_INFO': path,
'SERVER_NAME': server_name,
'SERVER_PORT': str(port),
'SCRIPT_NAME': script_name
}
case.assertIsNotNone(environment['wsgi.input'])
case.assertIsNotNone(environment['wsgi.errors'])
for k, v in headers.items():
environment['HTTP_' + k] = v
if host is not None:
environment['HTTP_HOST'] = host
if qs is not None:
environment['QUERY_STRING'] = qs
if content_type is not None:
environment['CONTENT_TYPE'] = content_type
if content_length is not None:
environment['CONTENT_LENGTH'] = 0
return environment | 4b2fdd959fa538478b9166f5ade0d157847eb7ca | 25,202 |
from typing import List
def parse_raw_value(raw_value: str, linesep: str) -> List[List[str]]:
"""Parse raw values from VEP"""
parsed = list()
for line in raw_value.strip().split(linesep):
# This is the regular case
if "\t" in line:
parsed.append(line.split("\t", 1))
# Special cases where VEP was run on an emtpy input file
elif line == "Lines of input read":
parsed.append(["Lines of input read", "0"])
elif line == "Variants processed":
parsed.append(["Variants processed", "0"])
return parsed | 4ce5fff141628c553f59e87e9beb4174bd400a19 | 25,208 |
def format_datetime(dt):
"""
Returns ISO 8601 string representation
"""
return dt.isoformat() | fbbdec6086618f94826a69230b88bfa9e79ca472 | 25,213 |
def get_labelled_groups(labelled_measures_table, labelname):
"""
Provides a simple way of splitting a labelled measures table into multiple tables each corresponding to a given label.
Args:
labelled_measures_table (Dataframe): A measures table with a column corresponding to a label.
labelname (String): The name of the label to be split on.
Returns:
List of dataframes correspending to membership of a given label.
"""
# get the labels IN ASCENDING ORDER (important)
labels = sorted(set(list(labelled_measures_table[labelname].values)))
player_groups = []
for label in labels:
this_group = labelled_measures_table[
labelled_measures_table[labelname] == label
]
player_groups.append(this_group)
return player_groups | 40bacfaf0044185034fde3a0eb31f7e1ab7a94ad | 25,217 |
def _antnums_to_blpair(antnums):
"""
Convert nested tuple of antenna numbers to baseline-pair integer.
A baseline-pair integer is an i12 integer that is the antenna numbers
+ 100 directly concatenated (i.e. string contatenation).
Ex: ((1, 2), (3, 4)) --> 101 + 102 + 103 + 104 --> 101102103014.
Parameters
----------
antnums : tuple
nested tuple containing integer antenna numbers for a baseline-pair.
Ex. ((ant1, ant2), (ant3, ant4))
Returns
-------
blpair : <i12 integer
baseline-pair integer
"""
# get antennas
ant1 = antnums[0][0] + 100
ant2 = antnums[0][1] + 100
ant3 = antnums[1][0] + 100
ant4 = antnums[1][1] + 100
# form blpair
blpair = int(ant1*1e9 + ant2*1e6 + ant3*1e3 + ant4)
return blpair | 47cbdee6212ae4a2857df3578b5755c98d928d55 | 25,221 |
def get_user_html(user):
"""Return standard HTML representation for a User object"""
return (
'<a title="{}" href="mailto:{}" data-toggle="tooltip" '
'data-placement="top">{}'
'</a>'.format(user.get_full_name(), user.email, user.username)
) | d988e872266767ea23f23836dcd588d74b0060a8 | 25,222 |
from typing import Dict
from typing import Any
def dict_without(dictionary: Dict[Any, Any], without: Any) -> Dict[Any, Any]:
"""
Remove a key from a dictionary and return the copy without.
"""
new = dictionary.copy()
new.pop(without)
return new | 4008e7e56690363e58da41f272e93ebf88ccd907 | 25,224 |
def map_rows_to_cols(rows, cols):
"""
Returns a list of dictionaries.
Each dictionary is the column name to its corresponding row value.
"""
mapped_rows = []
for row in rows:
mapped_rows.append({cols[i]: row[i] for i in range(len(cols))})
return mapped_rows | 9efb7d48f69d5bab8d910cbedd75f4c4d001ef7b | 25,227 |
def clean_side_panel(sidepanel):
"""
Cleans the SidePanel data and stores it in a dict.
Parameter:
sidepanel: html-sidepanel extracted using bs4
Returns:
data: dict of extracted data
"""
data = dict()
for x in sidepanel:
x = x.text.strip().replace('\n', '')
index = x.find(':')
if index == -1:
continue
y, x = x[:index], x[index+1:].strip()
data[y] = x
return data | c9fab309b64b788283e0848a0777fd3ef80014ad | 25,228 |
from typing import OrderedDict
def object_attributes_to_ordered_dict(obj, attributes):
"""Returns the specified attributes from the object in an OrderedDict."""
dict = OrderedDict()
object_vars = vars(obj)
for attribute in attributes:
dict[attribute] = object_vars[attribute]
return dict | 2aa1e75669bbe13f8d3fa238dc0c2bb681aa8b72 | 25,232 |
def _get_object_info_from_revision(revision, known_type):
""" returns type and id of the searched object, if we have one part of
the relationship known.
"""
object_type = revision.destination_type \
if revision.source_type == known_type \
else revision.source_type
object_id = revision.destination_id if \
revision.source_type == known_type \
else revision.source_id
return object_type, object_id | e8f73296f0d7080c6290148142dd3d2902646ed1 | 25,234 |
def get_transitions(sequence):
"""
Extracts a list of transitions from a sequence, returning a list of lists containing each transition.
Example
--------
>>> sequence = [1,2,2,1,2,3,2,3,1]
>>> ps.get_transitions(sequence)
[[1, 2], [2, 1], [1, 2], [2, 3], [3, 2], [2, 3], [3, 1]]
"""
transitions = []
for position in range(len(sequence) - 1):
if sequence[position] != sequence[position + 1]:
transitions.append([sequence[position], sequence[position + 1]])
return transitions | 25c2e7de0f4701517c1f41f466a3710a7f124c4d | 25,240 |
import traceback
def exception_to_string(e: Exception) -> str:
""" Convert exception to printable string """
stack = traceback.extract_stack()[:-3] + traceback.extract_tb(e.__traceback__)
pretty_out = traceback.format_list(stack)
return f"{pretty_out}\n {e.__class__} {e}" | b5b0e873dd3ad2d923d0cc16de5ab4016e73565e | 25,242 |
def get_dependent_nodes(nodes):
"""Get all dependent nodes connected to the list of nodes.
Looking for connections outside of the nodes in incoming argument.
Arguments:
nodes (list): list of nuke.Node objects
Returns:
connections_in: dictionary of nodes and its dependencies
connections_out: dictionary of nodes and its dependency
"""
connections_in = dict()
connections_out = dict()
node_names = [n.name() for n in nodes]
for node in nodes:
inputs = node.dependencies()
outputs = node.dependent()
# collect all inputs outside
test_in = [(i, n) for i, n in enumerate(inputs)
if n.name() not in node_names]
if test_in:
connections_in.update({
node: test_in
})
# collect all outputs outside
test_out = [i for i in outputs if i.name() not in node_names]
if test_out:
# only one dependent node is allowed
connections_out.update({
node: test_out[-1]
})
return connections_in, connections_out | 3a436fa704226f7466761a2b87a71ba91ca419b3 | 25,245 |
import math
import torch
def gaussian_probability(sigma, mu, data):
"""Returns the probability of `data` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
data (BxI): A batch of data. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
data = data.unsqueeze(1).expand_as(sigma)
ret = 1.0 / math.sqrt(2 * math.pi) * torch.exp(-0.5 *
((data - mu) / sigma) ** 2) / sigma
return torch.prod(ret, 2) | 5757e051af16b692fba9e483990df1d5c4fd3870 | 25,247 |
def length(head) -> int:
"""
The method length, which accepts a linked list
(head), and returns the length of the list.
:param head:
:return:
"""
i = 0
if head is None:
return 0
while head.next is not None:
head = head.next
i += 1
return i + 1 | f1ad7c64dc15620340f505671281e59267eb4b2b | 25,248 |
def Mabs2L(Mabs,MUVsun=5.5):
"""
Converting absolute magnitude(s) to luminosity in erg/s
Using a default absolute magnitude of the sun (in UV) of 5.5 from http://www.ucolick.org/~cnaw/sun.html
"""
Lsun = 3.839e-11 # 1e44 erg/s
Lobj = 10**((MUVsun-Mabs)/2.5)*Lsun # Luminosity in erg/s
return Lobj | 85a6f7b1e58dbc086a7dd36659e76b37849a8b04 | 25,249 |
def list_prod(lst):
"""
Calculate the product of all numbers in a python list.
"""
prod = 1
for itm in lst:
prod *= itm
return prod | b0f5911e6eeb289aae7efe7f1fe99a2ca0f83cc4 | 25,251 |
def convert_member(member_str):
"""
Convert member data from database to member id list
:param member_str: Data from Class database
:type member_str: str
:return: a list of member id as integer
:rtype: list
>>> print(convert_member("1,2,50,69"))
[1, 2, 50, 69]
"""
if (member_str == "0") or (member_str == ""):
return []
else:
li = list(member_str.split(","))
li = [int(item) for item in li]
return li | fec4081104c3cb4574e255c8408164062a287963 | 25,256 |
def is_palindrome(s):
"""
What comes in:
-- a string s that (in this simple version of the palindrome
problem) contains only lower-case letters
(no spaces, no punctuation, no upper-case characters)
What goes out: Returns True if the given string s is a palindrome,
i.e., reads the same backwards as forwards.
Returns False if the given string s is not a palindrome.
Side effects: None.
Examples:
abba reads backwards as abba so it IS a palindrome
but
abbz reads backwards as zbba so it is NOT a palindrome
Here are two more examples: (Note: I have put spaces into the
strings for readability; the real problem is the string WITHOUT
the spaces.)
a b c d e x x e d c b a reads backwards as
a b c d e x x e d c b a
so it IS a palindrome
but
a b c d e x y e d c b a reads backwards as
a b c d e y x e d c b a
so it is NOT a palindrome
Type hints:
:type s: str
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# The testing code is already written for you (above).
#
###########################################################################
# IMPORTANT: As with ALL problems, work a concrete example BY HAND
# to figure out how to solve this problem. The last two examples
# above are particularly good examples to work by hand.
###########################################################################
# -------------------------------------------------------------------------
m = 1
for k in range(len(s)//2):
if s[k] != s[len(s) - m]:
return False
m = m + 1
return True | 41409a3681c6f5c343f19fc53823415852243d45 | 25,258 |
def number_of_pluses_before_an_equal(reaction):
"""
Args:
reaction (str) - reaction with correctly formatted spacing
Returns (int):
number_of - the number of pluses before the arrow `=>`
Example:
>>>number_of_pluses_before_an_equal("C6H12O6 + 6O2=> 6CO2 + 6H2O")
1
"""
number_of = 0
# so we don't have to worry about (i - 1) < 0
reac = reaction.strip()
for i in range(1, len(reaction)):
if reaction[i] == "=":
return number_of
if i > 0:
# and reaction[i+1] == " " is omitted because of formatting reasons
if reaction[i] == "+" and reaction[i-1] == " ":
number_of += 1
return number_of | f532ee41dee797d7c2ae1639fae9f1cb61c3f037 | 25,260 |
import random
def generate_chromosome(min_length_chromosome, max_length_chromosome, possible_genes, repeated_genes_allowed):
""" Function called to create a new individual (its chromosome). It randomly chooses its length (between min_length_chromosome and min_length_chromosome), and it randomly chooses genes among the list of possible_genes.
:param min_length_chromosome: (int) Minimum allowed length of the chromosome.
:param max_length_chromosome: (int) Maximum allowed length of the chromosome.
:param possible_genes: (list of ...) List with the all the possible values that the genes can take.
:param repeated_genes_allowed: (bool) It is a boolean that indicates whether the genes can be repeated in the chromosome (repeated_genes_allowed = 1) or they cannot be repeated (repeated_genes_allowed = 0).
:return:
* (list of genes) List that represents the chromosome.
"""
# Choose a random number of genes
number_of_genes = random.randrange(min_length_chromosome, max_length_chromosome + 1)
# Create new chromosome:
if repeated_genes_allowed:
chromosome = random.choices(possible_genes, weights=None, k=number_of_genes)
return chromosome
else:
possible_genes_aux = possible_genes.copy()
random.shuffle(possible_genes_aux)
return possible_genes_aux[:number_of_genes] | aae0356538958bfe180b3f0abade7afd5dc2e7f7 | 25,261 |
def binary(x: int, pre: str='0b', length: int=8):
"""
Return the binary representation of integer x
Input:
x: an integer of any size
pre: the prefix for the output string, default 0b
length: length of the output in binary if its representation has smaller length
default is 8 i,e, 2**8=256 int, a byte
Return:
The binary representation of integer x with a minimum lenght of "length"
padded with trailing 0s
"""
return '{0}{{:{1}>{2}}}'.format(pre, 0, length).format(bin(x)[2:]) | 287e5bb87f31b71ad7ccd1cf65fab729794eeef4 | 25,262 |
import inspect
def library(scope=None, version=None, converters=None, doc_format=None, listener=None,
auto_keywords=False):
"""Class decorator to control keyword discovery and other library settings.
By default disables automatic keyword detection by setting class attribute
``ROBOT_AUTO_KEYWORDS = False`` to the decorated library. In that mode
only methods decorated explicitly with the :func:`keyword` decorator become
keywords. If that is not desired, automatic keyword discovery can be
enabled by using ``auto_keywords=True``.
Arguments ``scope``, ``version``, ``converters``, ``doc_format`` and ``listener``
set library's scope, version, converters, documentation format and listener by
using class attributes ``ROBOT_LIBRARY_SCOPE``, ``ROBOT_LIBRARY_VERSION``,
``ROBOT_LIBRARY_CONVERTERS``, ``ROBOT_LIBRARY_DOC_FORMAT`` and
``ROBOT_LIBRARY_LISTENER``, respectively. These attributes are only set if
the related arguments are given and they override possible existing attributes
in the decorated class.
Examples::
@library
class KeywordDiscovery:
@keyword
def do_something(self):
# ...
def not_keyword(self):
# ...
@library(scope='GLOBAL', version='3.2')
class LibraryConfiguration:
# ...
The ``@library`` decorator is new in Robot Framework 3.2.
The ``converters`` argument is new in Robot Framework 5.0.
"""
if inspect.isclass(scope):
return library()(scope)
def decorator(cls):
if scope is not None:
cls.ROBOT_LIBRARY_SCOPE = scope
if version is not None:
cls.ROBOT_LIBRARY_VERSION = version
if converters is not None:
cls.ROBOT_LIBRARY_CONVERTERS = converters
if doc_format is not None:
cls.ROBOT_LIBRARY_DOC_FORMAT = doc_format
if listener is not None:
cls.ROBOT_LIBRARY_LISTENER = listener
cls.ROBOT_AUTO_KEYWORDS = auto_keywords
return cls
return decorator | 78b3a7c2423d0b594d5d8f4e564b929f2ef7148a | 25,265 |
def false(*args):
"""
>>> false(1)
False
>>> false(None)
False
"""
return False | cb960acdc5ddb7a2a54d1a69165fc684674b34fe | 25,266 |
def reflection_normal(n1, n2):
"""
Fresnel reflection losses for normal incidence.
For normal incidence no difference between s and p polarisation.
Inputs:
n1 : Refractive index of medium 1 (input)
n2 : Refractive index of medium 2 (output)
Returns:
R : The Fresnel
Doctests:
>>> '%.2f' % reflection_normal(1.5,1)
'0.04'
"""
return((n1-n2)/(n1+n2))**2. | db3e1779628116ce2d82a91dada64aa2d8ff4463 | 25,269 |
def _get_oauth_url(url):
"""
Returns the complete url for the oauth2 endpoint.
Args:
url (str): base url of the LMS oauth endpoint, which can optionally include some or all of the path
``/oauth2/access_token``. Common example settings that would work for ``url`` would include:
LMS_BASE_URL = 'http://edx.devstack.lms:18000'
BACKEND_SERVICE_EDX_OAUTH2_PROVIDER_URL = 'http://edx.devstack.lms:18000/oauth2'
"""
stripped_url = url.rstrip('/')
if stripped_url.endswith('/access_token'):
return url
if stripped_url.endswith('/oauth2'):
return stripped_url + '/access_token'
return stripped_url + '/oauth2/access_token' | e2f81f8fa0aab74c41eb253e1a7e2291ff96e334 | 25,271 |
from pathlib import Path
from typing import List
def read_feastignore(repo_root: Path) -> List[str]:
"""Read .feastignore in the repo root directory (if exists) and return the list of user-defined ignore paths"""
feast_ignore = repo_root / ".feastignore"
if not feast_ignore.is_file():
return []
lines = feast_ignore.read_text().strip().split("\n")
ignore_paths = []
for line in lines:
# Remove everything after the first occurance of "#" symbol (comments)
if line.find("#") >= 0:
line = line[: line.find("#")]
# Strip leading or ending whitespaces
line = line.strip()
# Add this processed line to ignore_paths if it's not empty
if len(line) > 0:
ignore_paths.append(line)
return ignore_paths | 57fa48fa61edfe9856d98171d54855a854c33743 | 25,281 |
from pathlib import Path
import math
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:return: value to submit
"""
heading = 90
east_west_pos = 0
north_south_pos = 0
instructions = [l.strip() for l in open(file)]
for i in instructions:
action = i[0]
value = int(i[1:])
if action == 'R':
heading = (heading + value) % 360
elif action == 'L':
heading = (heading - value) % 360
if action == 'E':
east_west_pos += value
if action == 'W':
east_west_pos -= value
if action == 'N':
north_south_pos += value
if action == 'S':
north_south_pos -= value
if action == 'F':
east_west_pos += value * math.sin(float(heading) / 360.0 * 2.0 * math.pi)
north_south_pos += value * math.cos(heading / 360 * 2 * math.pi)
manhattan_distance = int(abs(east_west_pos) + abs(north_south_pos))
return manhattan_distance | 3ba2c0a9fd4457ea2b49d6aca983123d8af45e04 | 25,282 |
def give_same(value):
"""Return what is given."""
return value | 92e0b8b3e6d40120fbe1d860ff74c220fbdfaec5 | 25,288 |
def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options):
"""
Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
"""
mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf)
with open(mutect_vcf, 'r') as infile, open(mutect_vcf + 'mutect_parsed.tmp', 'w') as outfile:
for line in infile:
line = line.strip()
if line.startswith('#'):
print(line, file=outfile)
continue
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=outfile)
return outfile.name | 083859b8ef25f8b0f1c89f0542286a692aef98c0 | 25,289 |
def retrieve_longest_smiles_from_optimal_model(task):
"""
From the optimal models that were trained on the full data set using `full_working_optimal.py`,
we retrieve the longest SMILES that was generated.
Parameters
----------
task : str
The task to consider.
Returns
-------
int :
The longest SMILES that was generated when training the best model strategy for `task` data.
"""
if task == "FreeSolv":
longest_smiles = 76
elif task == "ESOL":
longest_smiles = 109
elif task in ["lipo", "lipophilicity"]:
longest_smiles = 268
elif task in ["chembl28", "affinity"]:
longest_smiles = 246
else:
longest_smiles = None
return longest_smiles | 5692b6e5bf322b0a6df67f9ad5ac699429ba9711 | 25,297 |
import re
def all_collections(db):
"""
Yield all non-sytem collections in db.
"""
include_pattern = r'(?!system\.)'
return (
db[name]
for name in db.list_collection_names()
if re.match(include_pattern, name)
) | 1b8220ac493036995695fc9ccf9ac74882677b4d | 25,300 |
def single_line_paragraph(s):
"""Return True if s is a single-line paragraph."""
return s.startswith('@') or s.strip() in ('"""', "'''") | 1e1febf21479b65920423268d93e5571de72b4ef | 25,301 |
def num_words_tags(tags, data):
"""This functions takes the tags we want to count and the datafram
and return a dict where the key is the tag and the value is the frequency
of that tag"""
tags_count = {}
for tag in tags:
len_tag = len(data[data['Tag'] == tag])
tags_count[tag] = len_tag
return tags_count | 9c6fddfcbdd1958e1c43b9b9bf8750bacacc3a31 | 25,307 |
def generate_pyproject(tmp_path):
"""Return function which generates pyproject.toml with a given ignore_fail value."""
def generator(ignore_fail):
project_tmpl = """
[tool.poe.tasks]
task_1 = { shell = "echo 'task 1 error'; exit 1;" }
task_2 = { shell = "echo 'task 2 error'; exit 1;" }
task_3 = { shell = "echo 'task 3 success'; exit 0;" }
[tool.poe.tasks.all_tasks]
sequence = ["task_1", "task_2", "task_3"]
"""
if isinstance(ignore_fail, bool) and ignore_fail:
project_tmpl += "\nignore_fail = true"
elif not isinstance(ignore_fail, bool):
project_tmpl += f'\nignore_fail = "{ignore_fail}"'
with open(tmp_path / "pyproject.toml", "w") as fp:
fp.write(project_tmpl)
return tmp_path
return generator | a79d0f24a3edc9fa8689cd0c33cef9e8f8121252 | 25,309 |
def parseConfig(s):
"""Parses a simple config file.
The expected format encodes a simple key-value store: keys are strings,
one per line, and values are arrays. Keys may not have colons in them;
everything before the first colon on each line is taken to be the key,
and everything after is considered a space-separated list of value-array
entries. Leading and trailing whitespace are stripped on each key and
value entry.
No special handling of comments is implemented, but non-conforming lines
(those with no colon) will be silently ignored.
Arguments:
s: A string containing the full contents of a config file.
Returns a dictionary mapping strings to lists. The lists, which may be
singletons, contain ints, floats, and/or strings.
"""
def stringToNumberMaybe(s):
if s.lower() in ['true', 'yes']:
return True
if s.lower() in ['false', 'no']:
return False
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
lines = s.splitlines()
d = {}
for line in lines:
kv = [x.strip() for x in line.split(':',1)]
try:
val_list = [stringToNumberMaybe(x) for x in kv[1].split()]
if len(val_list) != 1:
d[kv[0]] = val_list
else:
d[kv[0]] = val_list[0]
except IndexError:
pass
return d | 259fdcef0eabeb410b2c2f79c50bb5985b5ce5f7 | 25,311 |
def parse_args(args):
"""
Parses the command line arguments. For now, the only arg is `-d`, which
allows the user to select which database file that they would like to use.
More options might be added in the future or this option might be changed.
"""
if args[0] == "-d":
return ' '.join(args[1:]).strip()
else:
return None | b251103d2d73f63ff795ddad82de8040d8e81ec4 | 25,312 |
def path_filter(path):
# type: (str) -> str
"""
Removes the trailing '/' of a path, if any
:param path: A parsed path
:return: The parsed path without its trailing /
"""
return path[:-1] if path and path[-1] == "/" else path | 4b449dfe2f840a25bec605464e6a8dbaeaf9afed | 25,319 |
import json
def to_javascript(obj):
"""For when you want to inject an object into a <script> tag.
"""
return json.dumps(obj).replace('</', '<\\/') | 2fba6a30eb19fd0b8fcc4295c3994ad6fa82b02f | 25,324 |
def parse_years(years):
"""Parse input string into list of years
Args:
years (str): years formatted as XXXX-YYYY
Returns:
list: list of ints depicting the years
"""
# Split user input into the years specified, convert to ints
years_split = list(map(int, years.split('-')))
# if only 1 year then that is list
if len(years_split) == 1:
year_range = years_split
else:
# Make a complete list of years from first year specified to last.
year_range = [str(x) for x in range(min(years_split), max(years_split) + 1)]
return year_range | 5296bd2f9e49a4a1689c813dd0e8641ea9a5c16f | 25,326 |
def dict_from_string(s):
"""
Inverse of dict_to_string. Takes the string representation of a dictionary and returns
the original dictionary.
:param s: The string representation of the dictionary
:return: [dict] the dictionary
"""
l = s.replace("[", "").replace("]", "").split("_")
d = {x.split(":")[0]: float(x.split(":")[1]) for x in l}
return d | b16677d1d39cfe74b53a327e2bcd85b6f16ee8de | 25,329 |
def get(columns, table):
""" Format SQL to get columns from table
Args:
columns (tuple): column names
table (str): table name to fetch from
Returns:
str: sql string
"""
columns = tuple([columns]) if isinstance(columns, str) else columns
return "SELECT {c} FROM {t}".format(c=', '.join(columns), t=table) | 7c4bacad2121f66782e551d440d602e381d77b89 | 25,330 |
import inspect
import functools
def onetimemethod(method):
"""Decorator for methods which need to be executable only once."""
if not inspect.isfunction(method):
raise TypeError('Not a function.')
has_run = {}
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
"""Wrapped method being run once and only once."""
nonlocal has_run
if has_run.setdefault(id(self), False):
raise RuntimeError(
"One-time method '%s' cannot be re-run for this instance."
% method.__name__
)
has_run[id(self)] = True
return method(self, *args, **kwargs)
return wrapped | 8133ee826ea57bbf05e01bac81757e22f0e4c072 | 25,333 |
from typing import Tuple
import math
def rotation_y_to_alpha(
rotation_y: float, center: Tuple[float, float, float]
) -> float:
"""Convert rotation around y-axis to viewpoint angle (alpha)."""
alpha = rotation_y - math.atan2(center[0], center[2])
if alpha > math.pi:
alpha -= 2 * math.pi
if alpha <= -math.pi:
alpha += 2 * math.pi
return alpha | 785ee46456e373b28e5fcb4edd3a81a3e344abda | 25,337 |
def _normalize(vec):
"""Normalizes a list so that the total sum is 1."""
total = float(sum(vec))
return [val / total for val in vec] | 31ca018d688a5c28b89071e04049578737cd027d | 25,344 |
def to_float(string):
"""Converts a string to a float if possible otherwise returns None
:param string: a string to convert to a float
:type string: str
:return: the float or None if conversion failed and a success flag
:rtype: Union[Tuple[float, bool], Tuple[None, bool]]
"""
try:
return float(string), True
except ValueError:
return None, False | 00f3e16765aad9dc79e73cb687676893a743cc7f | 25,345 |
def month_add(date, months):
"""Add number of months to date"""
year, month = divmod(date.year * 12 + date.month + months, 12)
return date.replace(year=year, month=month) | 8252e82f8b7dae41a6d3dee8e6bc58b00c376aa7 | 25,349 |
def obtain_Pleth(tensec_data):
""" obtain Pulse Pleth values of ten second data
:param tensec_data: 10 seconds worth of heart rate data points
:return PlethData: Pulse Pleth unmultiplexed data
"""
PlethData = tensec_data[0::2]
return PlethData | 45159ae768aa8dfa0f67add0951fecb346a8557b | 25,351 |
def get_policy(crm_service, project_id, version=3):
"""Gets IAM policy for a project."""
policy = (
crm_service.projects()
.getIamPolicy(
resource=project_id,
body={"options": {"requestedPolicyVersion": version}},
)
.execute()
)
return policy | 984f40daa2a5e5334d7aa1adb3920c56f7a13a9b | 25,353 |
def get_log_data(lcm_log, lcm_channels, end_time, data_processing_callback, *args,
**kwargs):
"""
Parses an LCM log and returns data as specified by a callback function
:param lcm_log: an lcm.EventLog object
:param lcm_channels: dictionary with entries {channel : lcmtype} of channels
to be read from the log
:param data_processing_callback: function pointer which takes as arguments
(data, args, kwargs) where data is a dictionary with
entries {CHANNEL : [ msg for lcm msg in log with msg.channel == CHANNEL ] }
:param args: positional arguments for data_processing_callback
:param kwargs: keyword arguments for data_processing_callback
:return: return args of data_processing_callback
"""
data_to_process = {}
print('Processing LCM log (this may take a while)...')
t = lcm_log.read_next_event().timestamp
lcm_log.seek(0)
for event in lcm_log:
if event.channel in lcm_channels:
if event.channel in data_to_process:
data_to_process[event.channel].append(
lcm_channels[event.channel].decode(event.data))
else:
data_to_process[event.channel] = \
[lcm_channels[event.channel].decode(event.data)]
if event.eventnum % 50000 == 0:
print(f'processed {(event.timestamp - t)*1e-6:.1f}'
f' seconds of log data')
if 0 < end_time <= (event.timestamp - t)*1e-6:
break
return data_processing_callback(data_to_process, *args, *kwargs) | 6e2a57f04af2e8a6dc98b756ff99fab50d161ffe | 25,354 |
from typing import OrderedDict
def stats(arr):
"""
Return the statistics for an input array of values
Args:
arr (np.ndarray)
Returns:
OrderedDict
"""
try:
return OrderedDict([('min', arr.mean()),
('max', arr.max()),
('mean', arr.mean()),
('std', arr.std())])
except ValueError: # Can happen if the input array is empty
return OrderedDict([('min', None),
('max', None),
('mean', None),
('std', None)]) | 2243b48e129096c76461ea6a877a6b2a511d21d0 | 25,356 |
from typing import Callable
from typing import Iterable
from typing import Optional
from typing import Any
def find(predicate: Callable, sequence: Iterable) -> Optional[Any]:
"""
Find the first element in a sequence that matches the predicate.
??? Hint "Example Usage:"
```python
member = find(lambda m: m.name == "UserName", guild.members)
```
Args:
predicate: A callable that returns a boolean value
sequence: A sequence to be searched
Returns:
A match if found, otherwise None
"""
for el in sequence:
if predicate(el):
return el
return None | 32060a3bd3b578bb357e68dad626f71d0c8ea234 | 25,360 |
def get_final_values(iterable):
"""Returns every unique final value (non-list/tuple/dict/set) in an iterable.
For dicts, returns values, not keys."""
ret = list()
if type(iterable) == dict:
return(get_final_values(list(iterable.values())))
for entry in iterable:
if (type(entry) == tuple or type(entry) == list or type(entry) == set):
ret.extend(get_final_values(entry))
elif (type(entry) == dict):
ret.extend(get_final_values(entry.values()))
else:
ret.extend(iterable)
return(set(ret)) | 466dd8542c87f8c03970ca424c608f83d326c2cb | 25,361 |
def yiq_to_rgb(yiq):
"""
Convert a YIQ color representation to an RGB color representation.
(y, i, q) :: y -> [0, 1]
i -> [-0.5957, 0.5957]
q -> [-0.5226, 0.5226]
:param yiq: A tuple of three numeric values corresponding to the
luma and chrominance.
:return: RGB representation of the input YIQ value.
:rtype: tuple
"""
y, i, q = yiq
r = y + (0.956 * i) + (0.621 * q)
g = y - (0.272 * i) - (0.647 * q)
b = y - (1.108 * i) + (1.705 * q)
r = 1 if r > 1 else max(0, r)
g = 1 if g > 1 else max(0, g)
b = 1 if b > 1 else max(0, b)
return round(r * 255, 3), round(g * 255, 3), round(b * 255, 3) | 0ede6cfacc368a3d225fe40b0c3fe505f066233b | 25,365 |
def read_y_n(inp):
""" Takes user's input as an argument and translates it to bool """
choice = input(inp)
if choice.lower() in ['y', 'yep', 'yeah', 'yes']:
return True
return False | cf1baee8d4b3e533ff0216c3d94e1bf6ed17a202 | 25,369 |
def delim() -> str:
"""80 char - delimiter."""
return '-' * 80 + '\n' | d74e847836632d3a7f7e2705d5b1dee0210d0161 | 25,371 |
def from_aws_tags(tags):
"""
Convert tags from AWS format [{'Key': key, 'Value': value}] to dictionary
:param tags
:return:
"""
return {tag['Key']: tag['Value'] for tag in tags} | a58931a29302154cc01656ece403d1468db1a6ab | 25,374 |
def index_nearest_shape(point, r_tree, shape_index_dict):
"""Returns the index of the nearest Shapely shape to a Shapely point.
Uses a Shapely STRtree (R-tree) to perform a faster lookup"""
result = None
if point.is_valid: # Point(nan, nan) is not valid (also not empty) in 1.8
geom = r_tree.nearest(point)
result = shape_index_dict[id(geom)]
return result | 08153395ec03d9f0496f3bc16a0a58c2e1d09d60 | 25,380 |
def example_globus(request):
"""Globus example data."""
return {
'identity_provider_display_name': 'Globus ID',
'sub': '1142af3a-fea4-4df9-afe2-865ccd68bfdb',
'preferred_username': '[email protected]',
'identity_provider': '41143743-f3c8-4d60-bbdb-eeecaba85bd9',
'organization': 'Globus',
'email': '[email protected]',
'name': 'Josiah Carberry'
}, {
'expires_in': 3599,
'resource_server': 'auth.globus.org',
'state': 'test_state',
'access_token': 'test_access_token',
'id_token': 'header.test-oidc-token.pub-key',
'other_tokens': [],
'scope': 'profile openid email',
'token_type': 'Bearer',
}, {
'identities': [
{
'username': '[email protected]',
'status': 'used',
'name': 'Josiah Carberry',
'email': '[email protected]',
'identity_provider':
'927d7238-f917-4eb2-9ace-c523fa9ba34e',
'organization': 'Globus',
'id': '3b843349-4d4d-4ef3-916d-2a465f9740a9'
}
]
} | 61c489bf3bdd66330c634326af895d0454a64406 | 25,383 |
def join_col(col):
"""Converts an array of arrays into an array of strings, using ';' as the sep."""
joined_col = []
for item in col:
joined_col.append(";".join(map(str, item)))
return joined_col | f6386d99e69e3a8c04da2d7f97aa7fb34ac9044c | 25,386 |
def read_words_from_file(f):
""" Reads a text file of words in the format '"word1","word2","word3"' """
txt = open(f).read()
return list(map(lambda s: s.strip('"'), txt.split(","))) | 669aeebd2cbfeb67cdc0cd65da2e58fdefa3bfe6 | 25,389 |
def to_ordinal(number):
"""Return the "ordinal" representation of a number"""
assert isinstance(number, int)
sr = str(number) # string representation
ld = sr[-1] # last digit
try:
# Second to last digit
stld = sr[-2]
except IndexError:
stld = None
if stld != '1':
if ld == '1':
return sr + 'st'
if ld == '2':
return sr + 'nd'
if ld == '3':
return sr + 'rd'
return sr + 'th' | 5974aa3abf05c9e200ec1d6fc05bdecc231d2b22 | 25,390 |
def mutate(codon, alt, index):
"""
Replace (mutate) a base in a codon with an
alternate base.
Parameters
----------
codon : str
three letter DNA sequence
alt : str
alternative base
index : int
index of the alt base in codon (0|1|2).
Returns
-------
str
codon with alternative base
Raises
------
AssertionError
error if index is not valid (0|1|2)
AssertionError
error if base is not valid (A|T|C|G)
"""
assert index in [0,1,2], "Not a valid index."
assert alt in ["A", "T", "C", "G"], "Not a valid base."
return "".join([alt if i == index else b for i,b in enumerate(codon)]) | 6db054a599846e104aa7bc8a4c565d37dad3b56e | 25,393 |
import calendar
def get_date_label(time):
"""Returns a nice label for timestamped months-years"""
split = [int(x) for x in time.split("-")]
return f"{calendar.month_abbr[split[1]]} {str(split[0])}" | 6495818e104e88f66119ab2cfdfcf6ac1756bc0d | 25,397 |
def _modify_payloads(checks, values, clear):
"""
Set or add payloads to checks.
:param checks: list of check instance
:param values: list of payloads with keys
:param clear: boolean flag if clearing the predefined payloads
:return: list of check instance
"""
for key, payloads in values.items():
for check in checks:
if check.key == key:
if clear:
check.set_payloads(payloads)
else:
check.add_payloads(payloads)
return checks | 799c93803131f32cfa6a7524d1ed20873c129f27 | 25,398 |
def split(arr, splits=2):
"""Split given array into `splits` smaller, similar sized arrays"""
if len(arr) < splits:
raise ValueError("Can't find more splits than array has elements")
new_size = int(len(arr) / splits)
return ([arr[n * new_size:(n + 1) * new_size] for n in range(splits - 1)]
+ [arr[(splits - 1) * new_size:]]) | 13d75bd5a15013e4d91fab9cca2d21e8bcc5e5f8 | 25,402 |
def float_to_digits_list(number):
"""Convert a float into a list of digits, without conserving exponant"""
# Get rid of scientific-format exponant
str_number = str(number)
str_number = str_number.split("e")[0]
res = [int(ele) for ele in str_number if ele.isdigit()]
# Remove trailing 0s in front
while len(res) > 1 and res[0] == 0:
res.pop(0)
# Remove training 0s at end
while len(res) > 1 and res[-1] == 0:
res.pop(-1)
return res | 34a0f2e54899d7410bc6a3035c0fb3d599b9b3eb | 25,403 |
def make_cmd_invocation(invocation, args, kwargs):
"""
>>> make_cmd_invocation('path/program', ['arg1', 'arg2'], {'darg': 4})
['./giotto-cmd', '/path/program/arg1/arg2/', '--darg=4']
"""
if not invocation.endswith('/'):
invocation += '/'
if not invocation.startswith('/'):
invocation = '/' + invocation
cmd = invocation
for arg in args:
cmd += str(arg) + "/"
rendered_kwargs = []
for k, v in kwargs.items():
rendered_kwargs.append("--%s=%s" % (k,v))
return ['./giotto-cmd', cmd] + rendered_kwargs | 19b969dc5a6536f56ba1f004b5b1bdc97ca0812f | 25,407 |
def f_call_1_1_1_kwds(a, /, b, *, c, **kwds):
"""
>>> f_call_1_1_1_kwds(1,2,c=3)
(1, 2, 3, {})
>>> f_call_1_1_1_kwds(1,2,c=3,d=4,e=5) == (1, 2, 3, {'d': 4, 'e': 5})
True
"""
return (a,b,c,kwds) | 8ce4616af2ca6985590c705d291f3408efcb8d34 | 25,409 |
def get_required_availability_type_modules(scenario_id, c):
"""
:param scenario_id: user-specified scenario ID
:param c: database cursor
:return: List of the required capacity type submodules
Get the required availability type submodules based on the database inputs
for the specified scenario_id. Required modules are the unique set of
generator availability types in the scenario's portfolio. Get the list
based on the project_availability_scenario_id of the scenario_id.
This list will be used to know for which availability type submodules we
should validate inputs, get inputs from database , or save results to
database.
Note: once we have determined the dynamic components, this information
will also be stored in the DynamicComponents class object.
"""
project_portfolio_scenario_id = c.execute(
"""SELECT project_portfolio_scenario_id
FROM scenarios
WHERE scenario_id = {}""".format(
scenario_id
)
).fetchone()[0]
project_availability_scenario_id = c.execute(
"""SELECT project_availability_scenario_id
FROM scenarios
WHERE scenario_id = {}""".format(
scenario_id
)
).fetchone()[0]
required_availability_type_modules = [
p[0]
for p in c.execute(
"""SELECT DISTINCT availability_type
FROM
(SELECT project FROM inputs_project_portfolios
WHERE project_portfolio_scenario_id = {}) as prj_tbl
INNER JOIN
(SELECT project, availability_type
FROM inputs_project_availability
WHERE project_availability_scenario_id = {}) as av_type_tbl
USING (project)""".format(
project_portfolio_scenario_id, project_availability_scenario_id
)
).fetchall()
]
return required_availability_type_modules | fbcdb1954c0364dd967a82d3d5eb968597c1db0a | 25,410 |
import torch
def ipca_transform(dataloader, components):
"""
Transform data using incremental PCA.
RH 2020
Args:
dataloader (torch.utils.data.DataLoader):
Data to be decomposed.
components (torch.Tensor or np.ndarray):
The components of the decomposition.
2-D array.
Each column is a component vector. Each row is a
feature weight.
"""
out = []
for iter_batch, batch in enumerate(dataloader):
out.append(batch[0] @ components.T)
return torch.cat(out, dim=0) | ed0cca90c0cfe2bd5cb2f4b0fed19aa320744410 | 25,411 |
def _position_is_valid(position):
"""
Checks if given position is a valid. To consider a position as valid, it
must be a two-elements tuple, containing values from 0 to 2.
Examples of valid positions: (0,0), (1,0)
Examples of invalid positions: (0,0,1), (9,8), False
:param position: Two-elements tuple representing a
position in the board. Example: (2, 2)
Returns True if given position is valid, False otherwiseself.
"""
return (True if isinstance(position, tuple) and
#check length of tuple
len(position) == 2 and
#check height
position[0] in range(3) and
#check width
position[1] in range(3)
else False) | 8ac576f8bc856fdb587ade30d43ee86e7c7be1c1 | 25,413 |
def generate_numbers(partitions):
"""Return a list of numbers ranging from [1, partitions]."""
return list(range(1, partitions + 1)) | 8c1ad09496c4cbddb53c8b93d162d78e3e41b60e | 25,417 |
def yes_or_no(question):
"""Creates y/n question with handling invalid inputs within console
:param question: required question string
:type question: String
:return: True or False for the input
:rtype: bool
"""
while "the answer is invalid":
reply = str(input(question + " (y/n): ")).lower().strip()
if reply[0] == "y":
return True
if reply[0] == "n":
return False | e2ff75c0fdd40ef015ae703fc54d0e6d215fb0e5 | 25,424 |
from datetime import datetime
import pytz
def date_block_key_fn(block):
"""
If the block's date is None, return the maximum datetime in order
to force it to the end of the list of displayed blocks.
"""
return block.date or datetime.max.replace(tzinfo=pytz.UTC) | 33c4553704200e5355cd7d6807cd596192a2264b | 25,428 |
def extract_entities(input_data_tokens, entity_dict):
"""Extracts valid entities present in the input query.
Parses the tokenized input list to find valid entity values, based
on the given entity dataset.
Args:
input_data_tokens: A list of string tokens, without any punctuation,
based on the input string.
entity_dict: A dictionary of dictionary, of entity values for a
particular entity type.
Returns:
A list of valid entity values and their start, stop token index
locations in the tokenized input query.
[(['comedy', 'action'], 5, 7), (['suspense'], 9, 10)]
Always returns a list. If no valid entities are detected, returns
an empty list.
"""
detected_entities = []
length = len(input_data_tokens)
for i, word in enumerate(input_data_tokens):
if word in entity_dict:
start = i
stop = -1
loc = i # keeps track of the current cursor posiiton
current_dict = entity_dict
# keeps track of the current dictionary data
while(loc <= length and current_dict):
if 1 in current_dict:
# tag index of a potential entity value if a
# longer entity is not present
stop = loc
if len(current_dict) == 1:
detected_entities.append(
(input_data_tokens[start:stop], start, stop)
)
stop = -1 # reset
# if end of query reached or mismatch in entity values,
# discard and move on to the next word
if loc == length or input_data_tokens[loc] not in current_dict:
# save a shorter entity, if it exists in the already \
# parsed query
if stop != -1:
detected_entities.append(
(input_data_tokens[start:stop], start, stop)
)
break
else:
# entity matches up until current word, continue
current_dict = current_dict[input_data_tokens[loc]]
loc += 1
return detected_entities | 516d0d9ae0df4a318808125b7e44bc327ecb8cff | 25,434 |
import math
def spiral(radius, step, resolution=.1, angle=0.0, start=0.0, direction=-1):
"""
Generate points on a spiral.
Original source:
https://gist.github.com/eliatlarge/d3d4cb8ba8f868bf640c3f6b1c6f30fd
Parameters
----------
radius : float
maximum radius of the spiral from the center.
Defines the distance of the tail end from the center.
step : float
amount the current radius increases between each point.
Larger = spiral expands faster
resolution : float
distance between 2 points on the curve.
Defines amount radius rotates between each point.
Larger = smoother curves, more points, longer time to calculate.
angle : float
starting angle the pointer starts at on the interior
start : float
starting distance the radius is from the center.
direction : {-1, 1}
direction of the rotation of the spiral
Returns
-------
coordinates : List[Tuple[float, float]]
"""
dist = start + 0.0
coords = []
while dist * math.hypot(math.cos(angle), math.sin(angle)) < radius:
cord = []
cord.append(dist * math.cos(angle) * direction)
cord.append(dist * math.sin(angle))
coords.append(cord)
dist += step
angle += resolution
return coords | cf7f6e22ef1f776bba9ed827b7f7243a45dde21b | 25,438 |
def hash_combine_zmw(zmw):
"""
Generate a unique hash for a ZMW, for use in downsampling filter.
"""
mask = 0xFFFF
upper = (zmw >> 16) & mask
lower = zmw & mask
result = 0
result ^= upper + 0x9e3779b9 + (result << 6) + (result >> 2)
result ^= lower + 0x9e3779b9 + (result << 6) + (result >> 2)
return result | d26ddb5c11a555eb3072fc2db23a3876d1751db4 | 25,443 |
def RecMult(num_1, num_2):
"""
Takes in two nonnegative numbers and return the multiplication result of the two numbers without using the multiplication operator *
Examples:
>>> RecMult(0,500)
0
>>> RecMult(500,0)
0
>>> RecMult(1,500)
500
>>> RecMult(500,1)
500
>>> RecMult(78,16)
1248
"""
if num_1 == 0 or num_2 == 0:
return 0
else:
return RecMult(num_1, num_2 - 1) + num_1 | 5012a5ca27a263d7f26da26842e62ba9d0e5c7ab | 25,445 |
def csv_list(csv_str):
"""
Parser function to turn a string of comma-separated values into a list.
"""
return [int(i) for i in csv_str.split(",")] | 674c75f980bc8d7b47c5ab28e9afd7a586d1c917 | 25,446 |
def read_config_file(fname):
"""
Reads the config file in and outputs a dictionary for the
program to run through.
"""
d = {}
with open(fname, 'r') as ptr:
for line in ptr:
splitline = line.split()
key = splitline[0]
value = ' '.join(splitline[1:])
temp = {key: value}
d.update(temp)
return d | 942175fae143b87ff57df08041b94465ac5c8eb1 | 25,447 |
def cp_max_calc(Ma):
"""
Calculates the maximum pressure coefficient for modified Newtonian flow
Inputs:
Ma: Free stream mach number
Outputs:
CpMax: Maximum pressure coefficient
"""
k = 1.4
PO2_pinf = (((k+1)**2 * Ma**2)/(4*k*Ma**2 - 2*(k-1)))**(k/(1-k)) * \
((1-k+2*k*Ma**2)/(k+1))
CpMax = (2/(k*Ma**2))*(PO2_pinf-1)
return CpMax | 61d91f52234347baaa3f04bd2b82f97ffa6a9fb2 | 25,451 |
def safe_decode(txt):
"""Return decoded text if it's not already bytes."""
try:
return txt.decode()
except AttributeError:
return txt | 2952daf31e29f45a25b6bb70aab89db08280e848 | 25,452 |
def fromHex( h ):
"""Convert a hex string into a int"""
return int(h,16) | ffae24cdade04d3ab4098f13643098dff2c69ef2 | 25,459 |
import json
import requests
def groupRemove(apikey,groupid):
"""
Removes the group and moves all containers to group 0 (ungrouped)
apikey: Your ApiKey from FileCrypt
groupid: the group ID(!) you want to delete
"""
data={"api_key":apikey,"fn":"group","sub":"remove","id":str(groupid)}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text) | ce5e444624d4b071261212f901b364c06169dcfb | 25,460 |
from typing import List
def inner(v: List[float], w: List[float]) -> float:
"""
Computes the inner product of two vectors.
Args:
v: The first vector.
w: The second vector.
Returns:
The inner product.
"""
output: float = sum([i * j for i, j in zip(v, w)])
return output | ed15537cee3c4f3daacdd395e7dd5c74b7b800bf | 25,463 |
def display_plan(plan):
"""Print out the payment plan name and details from stripe API plan object"""
return (f"{plan.metadata.display_name} - ${plan.amount / 100:.2f} "
f"USD per {plan.interval}") | 34831cc91f141a5254d76af793f0527a2cdee403 | 25,466 |
from typing import Any
from pathlib import Path
def sanitize_path(v: Any) -> Any:
"""Sanitize path.
Parameters:
v : Maybe a Path. If ``v`` is a ``Path`` object, it is converted to a string.
Returns:
The sanitized object.
"""
if isinstance(v, Path):
return str(v)
else:
return v | 581c235cdf3c9099103bf5820b578cf25b9392ca | 25,476 |
def remove_key(vglist,key):
"""
Accepts a list of dictionaries (vglist) and a list of keys.
Returns a list of dictionaries with each of the specified keys removed for all element of original list.
"""
new_list = []
for row in vglist:
for item in key:
row.pop(item,None)
new_list.append(row)
return new_list | 4bb1410b21829478851b68fce36158a9080f016f | 25,477 |
def keys_to_camel_case(value):
"""
Transform keys from snake to camel case (does nothing if no snakes are found)
:param value: value to transform
:return: transformed value
"""
def str_to_camel_case(snake_str):
components = snake_str.split("_")
return components[0] + "".join(x.title() for x in components[1:])
if value is None:
return None
if isinstance(value, (list, set)):
return list(map(keys_to_camel_case, value))
if isinstance(value, dict):
return dict(
(
keys_to_camel_case(k),
keys_to_camel_case(v) if isinstance(v, (list, dict)) else v,
)
for (k, v) in list(value.items())
)
return str_to_camel_case(value) | cefb6f4cb75e3d39ae8742b44239ee5a3f2b7b87 | 25,478 |
def prune_completions(prefix, all_test_names):
"""Filter returning only items that will complete the current prefix."""
completions = set()
for test_name in all_test_names:
if test_name.startswith(prefix):
next_break = test_name.find('.', len(prefix) + 1)
if next_break >= 0:
# Add only enough to complete this level; don't drown
# the user with all the possible completions from
# here.
completions.add(test_name[:next_break])
else:
# If there are no more levels, then add the full name
# of the leaf.
completions.add(test_name)
return completions | ad75ecc065dadddfb277329320ae888cf1e3535a | 25,481 |
import click
def validate_jaccard(ctx, param, value):
"""Ensure Jaccard threshold is between 0 and 1"""
if value is None:
return value
try:
jaccard = float(value)
assert jaccard <= 1
assert jaccard >= 0
return jaccard
except (ValueError, AssertionError):
raise click.BadParameter(
"--jaccard-threshold needs to be a number"
" between 0 and 1, but {} was provided".format(value)
) | 78dd5ca99f4fc2b5cdc50b3eaea6adb33a789c0a | 25,482 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.