content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import sympy
def antal_h_coefficient(index, game_matrix):
"""
Returns the H_index coefficient, according to Antal et al. (2009), as given by equation 2.
H_k = \frac{1}{n^2} \sum_{i=1}^{n} \sum_{j=1}^{n} (a_{kj}-a_{jj})
Parameters
----------
index: int
game_matrix: sympy.Matrix
Returns
-------
out: sympy.Expr
Examples:
--------
>>>a = Symbol('a')
>>>antal_h_coefficient(0, Matrix([[a,2,3],[4,5,6],[7,8,9]]))
Out[1]: (2*a - 29)/9
>>>antal_h_coefficient(0, Matrix([[1,2,3],[4,5,6],[7,8,9]]))
Out[1]: -3
"""
size = game_matrix.shape[0]
suma = 0
for i in range(0, size):
for j in range(0, size):
suma = suma + (game_matrix[index, i] - game_matrix[i, j])
return sympy.together(sympy.simplify(suma / (size ** 2)), size) | 0e05a6a622ef24ff63b18b9c8b80348b860a16c3 | 702,877 |
from typing import List
from typing import Dict
def _cx_to_dict(list_of_dicts: List[Dict], key_tag: str = "k", value_tag: str = "v") -> Dict:
"""Convert a CX list of dictionaries to a flat dictionary."""
return {d[key_tag]: d[value_tag] for d in list_of_dicts} | ea80e9a50ea04536c2ed068d19220b56a9bdf3ed | 702,879 |
import typing
import pathlib
import itertools
def find_pyfiles() -> typing.Iterator[pathlib.Path]:
"""Return an iterator of the files to format."""
return itertools.chain(
pathlib.Path("../gdbmongo").rglob("*.py"),
pathlib.Path("../gdbmongo").rglob("*.pyi"),
pathlib.Path("../stubs").rglob("*.pyi"),
pathlib.Path("../tests").rglob("*.py")) | 74b0c11771799fba6090569595d24e70ec68899d | 702,880 |
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
hide = '__attribute__((visibility("hidden")))'
if config.check_gcc_function_attribute(hide, 'hideme'):
return hide
else:
return '' | b08e8515440c4bf1ebec51c4100e55fe9f14b17d | 702,882 |
def calculate_num_points_in_solution(solution):
"""Calculates the number of data points in the given solution."""
return sum(len(points) for points in solution.values()) | c75f7cb7d9c8c2731e4698040954c559b6b5d4ec | 702,883 |
import re
def get_polygon_speed(polygon_name):
"""Returns speed unit within a polygon."""
result = re.search(r"\(([0-9.]+)\)", polygon_name)
return float(result.group(1)) if result else None | 2d2cc99f30153c4fbc9ac358ad3debc15fc3227e | 702,884 |
import copy
def random_reset_mutation(random, candidate, args):
"""Return the mutants produced by randomly choosing new values.
This function performs random-reset mutation. It assumes that
candidate solutions are composed of discrete values. This function
makes use of the bounder function as specified in the EC's
``evolve`` method, and it assumes that the bounder contains
an attribute called *values* (which is true for instances of
``DiscreteBounder``).
The mutation moves through a candidate solution and, with rate
equal to the *mutation_rate*, randomly chooses a value from the
set of allowed values to be used in that location. Note that this
value may be the same as the original value.
.. Arguments:
random -- the random number generator object
candidate -- the candidate solution
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *mutation_rate* -- the rate at which mutation is performed (default 0.1)
The mutation rate is applied on an element by element basis.
"""
bounder = args['_ec'].bounder
try:
values = bounder.values
except AttributeError:
values = None
if values is not None:
rate = args.setdefault('mutation_rate', 0.1)
mutant = copy.copy(candidate)
for i, m in enumerate(mutant):
if random.random() < rate:
mutant[i] = random.choice(values)
return mutant
else:
return candidate | c357237e22e34b7496f8cc17f4ad0efa2bd4621d | 702,891 |
import re
def normalise_name(raw_name):
"""
Normalise the name to be used in python package allowable names.
conforms to PEP-423 package naming conventions
:param raw_name: raw string
:return: normalised string
"""
return re.sub(r"[-_. ]+", "_", raw_name).lower() | 2c9aea4a3e83fdb52f952d2308de29d8948f6917 | 702,893 |
def timestr_mod24(timestr):
"""
Given a GTFS HH:MM:SS time string, return a timestring in the same
format but with the hours taken modulo 24.
"""
try:
hours, mins, seconds = [int(x) for x in timestr.split(":")]
hours %= 24
result = "{:02d}:{:02d}:{:02d}".format(hours, mins, seconds)
except:
result = None
return result | ea049f4b31861de56b04dba6f4356ed46930af44 | 702,894 |
from functools import reduce
def constant_time_compare(x, y):
"""
Compares two byte strings in a way such that execution time is constant
regardless of how much alike the input values are, provided that they
are of the same length.
Comparisons between user input and secret data such as calculated
HMAC values needs to be executed in constant time to avoid leaking
information to the caller via the timing side channel.
Params:
x: the first byte string to compare
y: the second byte string to compare
Return: True if x and y are equal, else False
"""
if len(x) != len(y):
return False
n = reduce(lambda a, b: a | b, (ord(x) ^ ord(y) for x, y in zip(x, y)))
return n == 0 | fe7fc348d367907eee2c9df3b7d0fbe232072714 | 702,900 |
def _str2bool(string):
"""Converts either 'true' or 'false' (not case-sensitively) into a boolean."""
if string is None:
return False
else:
string = string.lower()
if string == 'true':
return True
elif string == 'false':
return False
else:
raise ValueError(
'String should either be `true` or `false`: got {}'.format(string)) | 97755d1901a836bb1e3ce062afdbffc8b5b92de1 | 702,901 |
import math
def _get_alpha_bar_from_time(t):
"""
Noise scheduling method proposed by Nichol et. al to avoid too noisy image especially for smaller resolution.
This strategy creates beta as follows:
alpha_bar(t) = f(t) / f(0)
f(t) = cos((t / T + s) / (1 + s) * PI / 2) ** 2
beta(t) = 1 - alpha_bar(t) / alpha_bar(t-1)
where s = 0.008 (~= 1 / 127.5), which ensures sqrt(beta_0) is slightly smaller than the pixel bin size.
"""
assert 0 <= t <= 1, "t must be normalized by max diffusion timestep (T)."
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 | ecb79239e2181d6e17db0b30885ec68d48b0a2d3 | 702,902 |
def ExtractModuleIdIfValidBreakpad(file_path):
"""Extracts breakpad file's module id if the file is valid.
A breakpad file is valid for extracting its module id if it
has a valid MODULE record, formatted like so:
MODULE operatingsystem architecture id name
For example:
MODULE mac x86_64 1240DF90E9AC39038EF400 Chrome Name
See this for more information:
https://chromium.googlesource.com/breakpad/breakpad/+/HEAD/docs/symbol_files.md#records-1
Args:
file_path: Path to breakpad file to extract module id from.
Returns:
Module id if file is a valid breakpad file; None, otherwise.
"""
module_id = None
with open(file_path, 'r') as file_handle:
# Reads a maximum of 200 bytes/characters. Malformed file or binary will
# not have '\n' character.
first_line = file_handle.readline(200)
fragments = first_line.rstrip().split()
if fragments and fragments[0] == 'MODULE' and len(fragments) >= 5:
# Symbolization script's input file format requires module id hexadecimal
# to be upper case.
module_id = fragments[3].upper()
return module_id | e846cf05976c2c1622160d1a2a639d605e072417 | 702,904 |
from datetime import datetime
import pytz
def _timestamp_to_iso_str(timestamp):
"""
Converts the timestamp value into a iso str
Args:
timestamp(float): the timestamp to convert
Returns:
str: converted timestamp
"""
return datetime.fromtimestamp(timestamp).replace(tzinfo=pytz.utc).isoformat() | 89257c74a96c5335bc25ef617e41c0eeeb31021e | 702,905 |
def subtract_mean_vector(frame):
"""
Re-center the vectors in a DataFrame by subtracting the mean vector from
each row.
"""
return frame.sub(frame.mean(axis='rows'), axis='columns') | 4a6207889b958aebd608c349ad889e109ab3f4a9 | 702,906 |
def to_location(maiden: str, center: bool = False) -> tuple[float, float]:
"""
convert Maidenhead grid to latitude, longitude
Parameters
----------
maiden : str
Maidenhead grid locator of length 2 to 8
center : bool
If true, return the center of provided maidenhead grid square, instead of default south-west corner
Default value = False needed to maidenhead full backward compatibility of this module.
Returns
-------
latLon : tuple of float
Geographic latitude, longitude
"""
maiden = maiden.strip().upper()
N = len(maiden)
if not 8 >= N >= 2 and N % 2 == 0:
raise ValueError("Maidenhead locator requires 2-8 characters, even number of characters")
Oa = ord("A")
lon = -180.0
lat = -90.0
# %% first pair
lon += (ord(maiden[0]) - Oa) * 20
lat += (ord(maiden[1]) - Oa) * 10
# %% second pair
if N >= 4:
lon += int(maiden[2]) * 2
lat += int(maiden[3]) * 1
# %%
if N >= 6:
lon += (ord(maiden[4]) - Oa) * 5.0 / 60
lat += (ord(maiden[5]) - Oa) * 2.5 / 60
# %%
if N >= 8:
lon += int(maiden[6]) * 5.0 / 600
lat += int(maiden[7]) * 2.5 / 600
# %% move lat lon to the center (if requested)
if center:
if N == 2:
lon += 20 / 2
lat += 10 / 2
elif N == 4:
lon += 2 / 2
lat += 1.0 / 2
elif N == 6:
lon += 5.0 / 60 / 2
lat += 2.5 / 60 / 2
elif N >= 8:
lon += 5.0 / 600 / 2
lat += 2.5 / 600 / 2
return lat, lon | fddcbdab4f3e0f812dd7fac3509e66bc63f8fb84 | 702,908 |
def flipDP(directionPointer: int) -> int:
"""
Cycles the directionpointer 0 -> 1, 1 -> 2, 2 -> 3, 3 -> 0
:param directionPointer: unflipped directionPointer
:return: new DirectionPointer
"""
if directionPointer != 3:
return directionPointer + 1
return 0 | 928347a5c1934c822c77434ca9a91d913ef7f3b5 | 702,909 |
def process_link(link):
"""
Get text and link from an anchor
"""
return link.text_content(), link.get('href') | 34429289076c8518b076fdf0f228eb6948109c6c | 702,913 |
def merge_unique(list1, list2):
"""
Merge two list and keep unique values
"""
for item in list2:
if item not in list1:
list1.append(item)
return list1 | ecfd32178541dcb5956d4c1c74dc9cea2ab1fa45 | 702,916 |
def increment(number: int) -> int:
"""Increment a number.
Args:
number (int): The number to increment.
Returns:
int: The incremented number.
"""
return number + 1 | 27f4becd9afb747b22de991ab4cf030b14d3dac5 | 702,917 |
import re
def normalize_name(name: str) -> str:
"""Replace hyphen (-) and slash (/) with underscore (_) to generate valid
C++ and Python symbols.
"""
name = name.replace('+', '_PLUS_')
return re.sub('[^a-zA-Z0-9_]', '_', name) | 46624c7180b1303e715d73aefe75cdd8e49b4a22 | 702,919 |
import torch
def project_to_2d(X, camera_params):
"""
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script.
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
k = camera_params[..., 4:7]
p = camera_params[..., 7:]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
r2 = torch.sum(XX[..., :2]**2, dim=len(XX.shape)-1, keepdim=True)
radial = 1 + torch.sum(k * torch.cat((r2, r2**2, r2**3), dim=len(r2.shape)-1), dim=len(r2.shape)-1, keepdim=True)
tan = torch.sum(p*XX, dim=len(XX.shape)-1, keepdim=True)
XXX = XX*(radial + tan) + p*r2
return f*XXX + c | 0727efaeecfa48540590461f7d9222c8e6071f6d | 702,924 |
import hashlib
def checksum_md5(filename, blocksize=8192):
"""Calculate md5sum.
Parameters
----------
filename : str or pathlib.Path
input filename.
blocksize : int
MD5 has 128-byte digest blocks (default: 8192 is 128x64).
Returns
-------
md5 : str
calculated md5sum.
"""
filename = str(filename)
hash_factory = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(blocksize), b''):
hash_factory.update(chunk)
return hash_factory.hexdigest() | 759c0c5cbc37ebe0cc85eb8156127308eff354bc | 702,927 |
def normalize_volume_and_number(volume, number):
"""
Padroniza os valores de `volume` e `number`
Args:
volume (None or str): volume se aplicável
number (None or str): número se aplicável
Notas:
- se `number` é igual a `ahead`, equivale a `None`
- se `00`, equivale a `None`
- se `01`, equivale a `1`
- se `""`, equivale a `None`
Returns:
tuple (str or None, str or None)
"""
if number == "ahead":
return None, None
if volume and volume.isdigit():
value = int(volume)
volume = str(value) if value > 0 else None
if number and number.isdigit():
value = int(number)
number = str(value) if value > 0 else None
volume = volume or None
number = number or None
return volume, number | 1ec42912b942f6c1b7e22d2e64c4d842af100877 | 702,928 |
from typing import List
from typing import Tuple
def style_close(style: List[Tuple[str, str]]) -> str:
"""
HTML tags to close a style.
>>> style = [
... ("font", 'color="red" size="32"'),
... ("b", ""),
... ("i", ""),
... ("u", ""),
... ]
>>> style_close(style)
'</u></i></b></font>'
Tags will always be in reverse (of open) order, so open - close will look like::
<b><i><u>text</u></i></b>
"""
return "".join("</{}>".format(x) for x, _ in reversed(style)) | 2043803e50230f139a6a60599b40c53461ed6ed8 | 702,929 |
def process_categories(cat_path):
""" Returns the mapping between the identifier of a category in
Places365 and its corresponding name
Args:
cat_path: Path containing the information about the Places365
categories
"""
result = {}
with open(cat_path) as f:
lines = f.readlines()
# Map each position to the corresponding category
for l in lines:
parts = l.split(' ')
raw_cat, num_cat = parts[0].rstrip(), int(parts[1].rstrip())
category = raw_cat[3:] # Erase prefix related to first letter
result[num_cat] = category
return result | f87741b990ee9ab9c8216112df73c7aa5bab8d49 | 702,940 |
import struct
def fread(f,byteLocation,structFormat=None,nBytes=1):
"""
Given an already-open (rb mode) file object, return a certain number of bytes at a specific location.
If a struct format is given, calculate the number of bytes required and return the object it represents.
"""
f.seek(byteLocation)
if structFormat:
val = struct.unpack(structFormat, f.read(struct.calcsize(structFormat)))
val = val[0] if len(val)==1 else list(val)
return val
else:
return f.read(nBytes) | ae86bb1f3bc839053ca34c8bf2b4beb0af04aaee | 702,942 |
def _get_year(obj):
"""
Get the year of the entry.
:param obj: year string object
:return: entry year or none if not valid value
"""
year = obj[0:4]
try:
year = int(year)
except ValueError:
year = None
return year | 6d3cbcc8759096ec3e798e90dc3a307b4a34b6ae | 702,945 |
import math
def lat_meters(lat: float) -> float:
"""
Transform latitude degree to meters.
Parameters
----------
lat : float
This represent latitude value.
Returns
-------
float
Represents the corresponding latitude value in meters.
Examples
--------
Latitude in Fortaleza: -3.71839
>>> from pymove.utils.conversions import lat_meters
>>> lat_meters(-3.71839)
110832.75545918777
"""
rlat = float(lat) * math.pi / 180
# meter per degree Latitude
meters_lat = (
111132.92 - 559.82 * math.cos(2 * rlat) + 1.175 * math.cos(4 * rlat)
)
# meter per degree Longitude
meters_lgn = 111412.84 * math.cos(rlat) - 93.5 * math.cos(3 * rlat)
meters = (meters_lat + meters_lgn) / 2
return meters | c31025552778b0039079d33e134e0b19077b83d4 | 702,949 |
def getFilteredLexicon(lexicondict, allwords):
""" Returns a lexicon with just the necessary words.
Assumes all wanted keys exist in the given lexicon """
return { word: lexicondict[word] for word in allwords } | db3c58b71df848ef5b1d49ebdd4670a552481c64 | 702,951 |
import unittest
def filter_suite(condition, suite):
"""Return tests for which ``condition`` is True in ``suite``.
:param condition: A callable receiving a test and returning True if the
test should be kept.
:param suite: A test suite that can be iterated. It contains either tests
or suite inheriting from ``unittest.TestSuite``.
``suite`` is a tree of tests and suites, the returned suite respect the
received suite layout, only removing empty suites.
"""
filtered_suite = suite.__class__()
for test in suite:
if issubclass(test.__class__, unittest.TestSuite):
# We received a suite, we'll filter a suite
filtered = filter_suite(condition, test)
if filtered.countTestCases():
# Keep only non-empty suites
filtered_suite.addTest(filtered)
elif condition(test):
# The test is kept
filtered_suite.addTest(test)
return filtered_suite | 9131d49d3f87a117d2b9625d521e932db2666860 | 702,952 |
import re
def read_cpr(path):
"""Read and parse a CPR file. Return masks, which are (id, RLE data).
Note: Python 3.6.4 docs say its XML module is not secure, se we use regexp.
"""
# Example: <Mask id="123">[12 34 56]</Mask>
mask_pattern = r'<Mask\s.*?id="(.*?)".*?>.*?\[(.*?)\].*?</Mask>'
text = path.read_text()
matches = re.finditer(mask_pattern, text, flags=re.DOTALL)
def parse_match(m):
number, mask = m.groups()
number = int(number)
mask = [int(x) for x in mask.split()]
return number, mask
masks = [parse_match(x) for x in matches]
return masks | b5781607c150d0521d816c9027660e2b5e4b0fe0 | 702,965 |
import math
def schaffer6(phenome):
"""The bare-bones Schaffer function 6."""
sum_of_squares = phenome[0] ** 2 + phenome[1] ** 2
result = math.sin(math.sqrt(sum_of_squares)) ** 2 - 0.5
result /= (1.0 + 0.001 * sum_of_squares) ** 2
result += 0.5
return result | 80f19b28ebf4511a0670dfa622ffd3c55e8ec670 | 702,966 |
import re
import socket
def get_host_name() -> str:
"""Get the host name.
:return: A string value.
"""
return re.sub(r"\.(?:local|lan)", "", socket.gethostname()) | 225889a0bf40943ef962903551bde85fd2729ac8 | 702,970 |
def fix_hyphen_commands(raw_cli_arguments):
"""Update options to match their module names with underscores."""
for i in ['gen-sample', 'run-python', 'run-stacker']:
raw_cli_arguments[i.replace('-', '_')] = raw_cli_arguments[i]
raw_cli_arguments.pop(i)
return raw_cli_arguments | cbc420547f1d3a04787d059aa09eb0199df82dba | 702,971 |
def read_line_offset(file, offset):
""" Seek to offset in file, read a line and return the line and new offset """
fp = open(file, "r")
fp.seek(offset, 0)
line = fp.readline()
offset = fp.tell()
fp.close()
return (line, offset) | 429d592cf44e1f287eea5a67302a78473eb2362f | 702,973 |
def calculate_bmi(weight, height):
"""
Calculates BMI given the weight and height as float
"""
bmi = (weight / (height * height)) * 703.0
return bmi | 8495b11598e50516dca80965d5063df92aa78f40 | 702,974 |
def argstr(arg): # pragma: no cover
"""
Return string representation of a function argument.
:param object arg: Argument object. Differs between Python versions.
:return: String name of argument.
"""
if isinstance(arg, str):
return arg
if 'id' in arg._fields:
return arg.id
return arg.arg | 565349fc4a1fb9b28e8333a44893925cea9e72d9 | 702,976 |
def getDistance(sensor):
"""Return the distance of an obstacle for a sensor."""
# Special case, when the sensor doesn't detect anything, we use an
# infinite distance.
if sensor.getValue() == 0:
return float("inf")
return 5.0 * (1.0 - sensor.getValue() / 1024.0) | 68ff9e0c8c4dd7687e328d3b9c4634677cfe25cd | 702,978 |
import math
def circles_intersection(x_a, y_a, r_a, x_b, y_b, r_b):
"""
Returns the point of intersection of two circkes
:param x_a: x coordinate of the center of first circle
:param y_a: y coordinate of the center of first circle
:param r_a: radius of first circle
:param x_b: x coordinate of the center of second circle
:param y_b: y coordinate of the center of second circle
:param r_b: radius of second circle
:return: Array of coordinates of one of the intersection points
"""
k = x_a*x_a - x_b*x_b + y_a*y_a - y_b*y_b - r_a*r_a + r_b*r_b
c = 0.5 * k / (y_a - y_b)
m = (x_a - x_b) / (y_b - y_a)
g = c - y_a
aa = 1 + m*m
bb = 2*m*g - 2*x_a
cc = x_a*x_a + g*g - r_a*r_a
xi = 0.5 * (-bb + math.sqrt(bb*bb - 4*aa*cc)) / aa
yi = m*xi + c
return [xi, yi] | dc504bbe970c83bf1caf7727613ecaa44c1c818e | 702,985 |
def bubble_sort(vals):
"""Sort the given array using bubble sort."""
for i in range(len(vals) - 1):
for j in range(len(vals) - i - 1):
if vals[j] > vals[j + 1]:
vals[j], vals[j + 1] = vals[j + 1], vals[j]
return vals | ee0286dd53da0fbfc508fa1c26dbf913ba8f92ce | 702,993 |
def public_byte_prefix(is_test):
"""Address prefix. Returns b'\0' for main network and b'\x6f' for testnet"""
return b'\x6f' if is_test else b'\0' | 3ecc4fe0cbbd8dee1a91f90e0112d9be3184fc73 | 702,994 |
import json
def _json_read(filename):
"""Read a json into a dict."""
with open(filename) as file:
return json.load(file) | 84187d2a2281d2725adb8dae903253bdcd41e2b9 | 702,996 |
def addLists(list1, list2):
"""Add lists together by value. i.e. addLists([1,1], [2,2]) == [3,3]."""
# Find big list and small list
blist, slist = list(list2), list(list1)
if len(list1) > len(list2):
blist, slist = slist, blist
# Overlay small list onto big list
for i, b in enumerate(slist):
blist[i] += b
return blist | f5469dab8fd2c62d2d3ffed253803c1a3d343281 | 702,997 |
import tarfile
def files_from_archive(tar_archive: tarfile.TarFile):
"""
Extracts only the actual files from the given tarfile
:param tar_archive: the tar archive from which to extract the files
:return: List of file object extracted from the tar archive
"""
file_members = []
# Find the actual files in the archive
for member in tar_archive.getmembers():
if member.isreg(): # extract the actual files from the archive
file_members.append(member)
files = []
file_names = []
for file_member in file_members:
files.append(tar_archive.extractfile(file_member))
# Extract the file names without the top level directory from the file members
file_names.append("/".join(file_member.name.split("/")[1:]))
return files, file_names | 09ead0b2b955afdc5bf96a8e0a8717989c155406 | 702,998 |
def split_nth(string, count):
"""
Splits string to equally-sized chunks
"""
return [string[i:i+count] for i in range(0, len(string), count)] | 38833ef711ce04f5563b343f477e7792035ec669 | 703,003 |
import requests
def check_all_links(links):
""" Check that the provided links are valid.
Links are considered valid if a HEAD request to the server
returns a 200 status code.
"""
broken_links = []
for link in links:
head = requests.head(link)
if head.status_code != 200:
broken_links.append(link)
return broken_links | b6e784da72b4f81af3e393804ef3f776c2f3fc85 | 703,006 |
def compute_seen_words(inscription_list):
"""Computes the set of all words seen in phrases in the game"""
return {word
for inscription in inscription_list
for phrase in inscription['phrases']
for word in phrase.split()} | 496fc64ee37a6a6b0b3df0c3e9230d7b7ef46d0f | 703,008 |
from typing import List
def _get_create_repo(request) -> List[str]:
"""
Retrieves the list of all GIT repositories to be created.
Args:
request: The pytest requests object from which to retrieve the marks.
Returns: The list of GIT repositories to be created.
"""
names = request.config.getoption("--create-repo", [])
# names.extend(request.node.get_closest_marker("create_repo", []))
# * Split ',' separated lists
# * Remove duplicates - see conftest.py::pytest_collection_modifyitems()
names = [name for i in names for name in i.split(",")]
return list(set(names)) | 4ac8cefefb75af3bb86fcc16f5c8b79953b136bf | 703,012 |
def _check_electrification_scenarios_for_download(es):
"""Checks the electrification scenarios input to :py:func:`download_demand_data`
and :py:func:`download_flexibility_data`.
:param set/list es: The input electrification scenarios that will be checked. Can
be any of: *'Reference'*, *'Medium'*, *'High'*, or *'All'*.
:return: (*set*) -- The formatted set of electrification scenarios.
:raises TypeError: if es is not input as a set or list, or if the components of es
are not input as str.
:raises ValueError: if the components of es are not valid.
"""
# Check that the input is of an appropriate type
if not isinstance(es, (set, list)):
raise TypeError("Electrification scenarios must be input as a set or list.")
# Check that the components of es are str
if not all(isinstance(x, str) for x in es):
raise TypeError("Individual electrification scenarios must be input as a str.")
# Reformat components of es
es = {x.capitalize() for x in es}
if "All" in es:
es = {"Reference", "Medium", "High"}
# Check that the components of es are valid
if not es.issubset({"Reference", "Medium", "High"}):
invalid_es = es - {"Reference", "Medium", "High"}
raise ValueError(f'Invalid electrification scenarios: {", ".join(invalid_es)}')
# Return the reformatted es
return es | ccd1ec8f0b1349267ba1334f7744056bc43e32ec | 703,014 |
def header_is_sorted_by_coordinate(header):
"""Return True if bam header indicates that this file is sorted by coordinate.
"""
return 'HD' in header and 'SO' in header['HD'] and header['HD']['SO'].lower() == 'coordinate' | b656770806818abe742be32bc14c31a8a8e3e535 | 703,016 |
import types
def is_variable(tup):
""" Takes (name, object) tuple, returns True if it is a variable.
"""
name, item = tup
# callable()
# 函数用于检查一个对象是否是可调用的。如果返回True,object仍然可能调用失败;
# 但如果返回False,调用对象ojbect绝对不会成功。
# 对于函数, 方法, lambda 函式, 类, 以及实现了 __call__
# 方法的类实例, 它都返回 True。
if callable(item):
# function or class
return False
if isinstance(item, types.ModuleType):
# imported module
return False
if name.startswith("_"):
# private property
return False
return True | 81055d1ed252160c417b386c875e818b87780f14 | 703,020 |
def mps_to_kmph(mps):
"""
Transform a value from meters-per-second to kilometers-per-hour
"""
return mps * 3.6 | fee133def1727801e5e473d3ffb2df6c7e733a04 | 703,022 |
def create_list_id_title(sheets: list) -> list:
"""
Args:
this function gets a list of all the sheets of a spreadsheet
a sheet is represented as a dict format with the following fields
"sheets" : [
{
"properties": {
"sheetId": 0,
"title": "Sheet1",
"index": 0,
"sheetType": "GRID",
"gridProperties": {
"rowCount": 1000,
"columnCount": 26
}
}
},
...
]
Returns :
the output of the function will be a list of dict in the format
[{'sheetId' : 123 , 'sheet title': 'title'},...]
"""
result = []
for sheet in sheets:
sheetId = sheet.get('properties').get('sheetId')
sheet_title = sheet.get('properties').get('title')
result.append({'SheetId': sheetId, 'Sheet title': sheet_title})
return result | a32d2cbfce6f06d326f49e69983e05e67bfc1697 | 703,025 |
def parse_glyphs_groups(names, groups):
"""
Parse a ``gstring`` and a groups dict into a list of glyph names.
"""
glyph_names = []
for name in names:
# group names
if name[0] == '@':
group_name = name[1:]
if group_name in groups:
glyph_names += groups[group_name]
else:
print('project does not have a group called %s.\n' % group_name)
# glyph names
else:
glyph_names.append(name)
return glyph_names | 50e79acffdc6d26576e8524b52219afad3e40a4e | 703,027 |
def perc_range(n, min_val, max_val, rounding=2):
"""
Return percentage of `n` within `min_val` to `max_val` range. The
``rounding`` argument is used to specify the number of decimal places to
include after the floating point.
Example::
>>> perc_range(40, 20, 60)
50
"""
return round(
min([1, max([0, n - min_val]) / (max_val - min_val)]) * 100, rounding) | 379515f6c0483b4bfed93d0c1012bb2ca111e410 | 703,032 |
def sort_sequence_by_key(sequence, key_name, reverse=False):
"""
often when setting up initial serializations (especially during testing),
I pass a list of dictionaries representing a QS to some fn.
That list may or may not be sorted according to the underlying model's "order" attribute
This fn sorts the list according to the value of "key" in each list item;
typically, "key" would match the "order" attribute of the model
:param key_name: name of key to sort by
:param list: list to sort
:return:
"""
def _sorting_fn(item):
# using this fn ensures that 'sort_sequence_by_key' will work
# for a list of dictionaries or a list of objects
# (the latter is a special use-case; a QS can use the '.order_by' filter, but an actual list of models cannot)
try:
return item.get(key_name)
except AttributeError:
return getattr(item, key_name)
sorted_sequence = sorted(
sequence,
key=lambda item: _sorting_fn(item),
reverse=reverse,
)
return sorted_sequence | fbe46c942ac35d5399450c6bba430a096e6b7503 | 703,033 |
def width_from_bitdefs(bitdefs):
"""
Determine how wide an binary value needs to be based on bitdefs used
to define it.
Args:
bitdefs (list(BitDef)): List of bitdefs to find max width of
Returns:
(int): Maximum width
"""
max_index = max([bitdef.end for bitdef in bitdefs])
width = max_index + 1
return width | 59503f335d6d427579be730806c738108091e9ed | 703,034 |
from typing import Sequence
import difflib
def _validate_magics_with_black(before: Sequence[str], after: Sequence[str]) -> bool:
"""
Validate the state of the notebook before and after running nbqa with black.
Parameters
----------
before
Notebook contents before running nbqa with black
after
Notebook contents after running nbqa with black
Returns
-------
bool
True if validation succeeded else False
"""
diff = difflib.unified_diff(before, after)
result = "".join(i for i in diff if any([i.startswith("+ "), i.startswith("- ")]))
expected = (
'- "def compute(operand1,operand2, bin_op):\\n",\n'
'+ "def compute(operand1, operand2, bin_op):\\n",\n'
'- "compute(5,1, operator.add)"\n'
'+ "compute(5, 1, operator.add)"\n'
'- " ?str.splitlines"\n'
'+ "str.splitlines?"\n'
'- " %time randint(5,10)"\n'
'+ "%time randint(5,10)"\n'
'- "result = str.split??"\n'
'+ "str.split??"\n'
)
return result == expected | e6e655f2e6ea5e8d055f27e8da14cf0233c0c202 | 703,037 |
def get_var_names(var_name):
"""Defines replacement dictionary for the bare variable name and
the names derived from it - the optimization flag and the identifier name.
"""
repl = dict()
repl['opt_var_name'] = "Opt_%s"%var_name
repl['id_var_name'] = "ID_%s"%var_name
repl['var_name'] = var_name
return repl | 37999ffed0a0df1dbf736ada0cc355080dd9997f | 703,046 |
from typing import List
from typing import Dict
import re
def parse_header_links(value: str) -> List[Dict[str, str]]:
"""
Returns a list of parsed link headers, for more info see:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link
The generic syntax of those is:
::
Link: < uri-reference >; param1=value1; param2="value2"
So for instance:
Link; '<http:/.../front.jpeg>; type="image/jpeg",<http://.../back.jpeg>;'
would return
::
[
{"url": "http:/.../front.jpeg", "type": "image/jpeg"},
{"url": "http://.../back.jpeg"},
]
.. note::
Stolen code from httpx _utils.py (private method)
:param value: HTTP Link entity-header field
:return: list of parsed link headers
"""
links: List[Dict[str, str]] = []
replace_chars = " '\""
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ""
link = {"url": url.strip("<> '\"")}
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links | 536d3f2b477666c076ac29312f3acfe63f40e324 | 703,049 |
def parseFloat(value, ret=0.0):
"""
Parses a value as float.
This function works similar to its JavaScript-pendant, and performs
checks to parse most of a string value as float.
:param value: The value that should be parsed as float.
:param ret: The default return value if no integer could be parsed.
:return: Either the parse value as float, or ret if parsing not possible.
"""
if value is None:
return ret
if not isinstance(value, str):
value = str(value)
conv = ""
value = value.strip()
dot = False
for ch in value:
if ch not in "+-0123456789.":
break
if ch == ".":
if dot:
break
dot = True
conv += ch
try:
return float(conv)
except ValueError:
return ret | 6828cc19882dcbfcf7b83781b1e79a6014034cad | 703,050 |
def discard_inserted_documents(error_documents, original_documents):
"""Discard any documents that have already been inserted which are violating index constraints
such documents will have an error code of 11000 for a DuplicateKey error
from https://github.com/mongodb/mongo/blob/master/src/mongo/base/error_codes.yml#L467
Parameters:
error_documents (List[Dict]): list of documents that failed to insert in original transaction
original_documents (List[Dict]): list of documents from original transaction that failed
error_code (Int): error status code to filter on
Returns:
List[Dict]: list of documents with matching error code entries removed
"""
# doc['op'] returns the actual document from the previous transaction
errors = list(doc['op'] for doc in error_documents if doc['code'] == 11000)
return list(doc for doc in original_documents if doc not in errors) | 9d7d47a0ade2300449a7f1a4a20c3a70f6dce583 | 703,051 |
def list_folder(drive, folder_id):
"""
Lists contents of a GoogleDriveFile that is a folder
:param drive: Drive object to use for getting folders
:param folder_id: The id of the GoogleDriveFile
:return: The GoogleDriveList of folders
"""
_q = {'q': "'{}' in parents and trashed=false".format(folder_id)}
return drive.ListFile(_q).GetList() | 1ea5837d6096f2f9c1f0485d5a02bd43a8b8c55e | 703,052 |
import struct
def htons(cpu_context, func_name, func_args):
"""
Convert the provided 16-bit number in host byte order (little-endian) to network byte order (big-endian)
"""
le_port = func_args[0]
port_data = struct.pack("<H", le_port)
return struct.unpack(">H", port_data)[0] | 095be37630fbe0dc86ea5e731180249c29ccac85 | 703,054 |
def ordinal_number(n: int):
"""
Returns a string representation of the ordinal number for `n`
e.g.,
>>> ordinal_number(1)
'1st'
>>> ordinal_number(4)
'4th'
>>> ordinal_number(21)
'21st'
"""
# from https://codegolf.stackexchange.com/questions/4707/outputting-ordinal-numbers-1st-2nd-3rd#answer-4712
return'%d%s' % (n, 'tsnrhtdd'[(n // 10 % 10 != 1) * (n % 10 < 4) * (n % 10)::4]) | 9cb2b333cfe7d4e7b115d21d9d3c1bbaec02cdd9 | 703,057 |
def mode_batch_size(mode, hparams):
"""Returns the batch size for a given mode (train or eval).
Args:
mode: Either 'train' or 'eval'.
hparams: Hyperparameters.
Returns:
Integer batch size.
Raises:
ValueError: If mode is not 'train' or 'eval'.
"""
if mode == 'train':
return hparams.batch_size
elif mode == 'eval':
return hparams.eval_batch_size
else:
raise ValueError('Invalid --mode=%r' % mode) | 42f45e54698a539b27ad764b0c6584fb3620990d | 703,059 |
from typing import Optional
import ast
def _parse_type_comment(
type_comment: Optional[str],
) -> Optional[ast.expr]:
"""
Attempt to parse a type comment. If it is None or if it fails to parse,
return None.
"""
if type_comment is None:
return None
try:
# pyre-ignore[16]: the ast module stubs do not have full details
return ast.parse(type_comment, "<type_comment>", "eval").body
except SyntaxError:
return None | 49aafd9df3d590ccebf1c4b20b0c2a04640a9797 | 703,061 |
def identity(obj):
"""
Identity function computing no operation
Parameters
----------
obj : object
any object
Returns
-------
obj
the input object itself
"""
return obj | 252a44ce3251b74ad25e28bf6bff462f6227f04b | 703,067 |
def extract(key, items):
"""Return the sorted values from dicts using the given key.
:param key: Dictionary key
:type key: str | unicode
:param items: Items to filter.
:type items: [dict]
:return: Set of values.
:rtype: [str | unicode]
"""
return sorted(item[key] for item in items) | d39b6d42a08ea8e2e7d7488fb00a28243c9a6718 | 703,070 |
def format_kvps(mapping, prefix=""):
"""Formats a mapping as key=value pairs.
Values may be strings, numbers, or nested mappings.
Nested mappings, e.g. host:{ip:'0.0.0.1',name:'the.dude.abides'},
will be handled by prefixing keys in the sub-mapping with the key,
e.g.: host.ip=0.0.0.1 host.name=the.dude.abides.
"""
kvp_list = []
for k, v in mapping.items():
if hasattr(v, "keys"): # nested mapping
new_prefix = prefix + "." + k if prefix else k
kvps = format_kvps(v, prefix=new_prefix) # format as string
kvp_list.append(kvps)
continue # already prefixed with key; go to next
if v is None:
v = "None"
elif isinstance(v, int) or isinstance(v, float):
v = "{}".format(v)
elif " " in v:
v = '"' + v.replace('"', '\\"') + '"'
if prefix:
k = prefix + "." + k
kvp_list.append("{}={}".format(k, v))
return " ".join(kvp_list) | 4780d5c705a8805331a1d981e87fa3d3dca263a8 | 703,072 |
import torch
def project(meta_weights, P, Q):
""" project meta_weights to sub_weights
Args:
meta_weights: a 4-D tensor [cout, cin, k, k], the meta weights for one-shot model;
P: a 2-D tensor [cout, cout_p], projection matrix along cout;
Q: a 2-D tensor [cin, cin_p], projection matrix along cin;
Return:
proj_weights: a 4-D tensor [cout_p, cin_p, k, k], the projected weights;
"""
if meta_weights.ndimension() != 4:
raise ValueError("shape error! meta_weights should be 4-D tensors")
elif meta_weights.shape[0] != P.shape[0] or meta_weights.shape[1] != Q.shape[0]:
raise ValueError("shape mismatch! The projection axises of meta weights, P and Q should be consistent.")
proj_weights = torch.einsum('ijhw,ix,jy->xyhw', meta_weights, P, Q)
return proj_weights | a693d51a4a74358bd831954c46bb8cb993dabf66 | 703,074 |
def _is_not_blank(line):
"""Return true if `line` is not blank."""
return len(line.split())>0 | 835d991d71dcb59075b6ae2b317c8fb8c51abee9 | 703,076 |
def overlaps(box1, box2):
"""
Checks whether two boxes have any overlap.
Args:
box1: (float, float, float, float)
Box coordinates as (x0, y0, x1, y1).
box2: (float, float, float, float)
Box coordinates as (x0, y0, x1, y1).
Returns:
bool
True if there is any overlap between given boxes.
"""
if not ((box1[0] <= box2[0] <= box1[2])
or (box1[0] <= box2[2] <= box1[2])
or (box2[0] <= box1[0] and box2[2] >= box1[2])):
return False
if not ((box1[1] <= box2[1] <= box1[3])
or (box1[1] <= box2[3] <= box1[3])
or (box2[1] <= box1[1] and box2[3] >= box1[3])):
return False
return True | b8bf96e87f45f24d337b503184670d0f56d209e0 | 703,077 |
def to_sendeable_block_original_bus(array_of_msgs):
"""Given an array of msgs return it in the format
panda.can_send_many expects.
Input looks like: [(512, bytearray(b'>\x80\x1c'), 0), ...]
Output looks like: [(512, '>\x80\x1c', 0), ...]
"""
new_arr = []
for msg in array_of_msgs:
new_arr.append((msg[0], msg[1], str(msg[2]), msg[3]))
return new_arr | 24fd1cdf8c1bfed095d1685a8eaa246c89425958 | 703,080 |
import yaml
def _GetErrorDetailsSummary(error_info):
"""Returns a string summarizing `error_info`.
Attempts to interpret error_info as an error JSON returned by the Apigee
management API. If successful, the returned string will be an error message
from that data structure - either its top-level error message, or a list of
precondition violations.
If `error_info` can't be parsed, or has no known error message, returns a YAML
formatted copy of `error_info` instead.
Args:
error_info: a dictionary containing the error data structure returned by the
Apigee Management API.
"""
try:
if "details" in error_info:
# Error response might have info on exactly what preconditions failed or
# what about the arguments was invalid.
violations = []
for item in error_info["details"]:
# Include only those details whose format is known.
detail_types = (
"type.googleapis.com/google.rpc.QuotaFailure",
"type.googleapis.com/google.rpc.PreconditionFailure",
"type.googleapis.com/edge.configstore.bundle.BadBundle",
)
if item["@type"] in detail_types and "violations" in item:
violations += item["violations"]
descriptions = [violation["description"] for violation in violations]
if descriptions:
return error_info["message"] + "\n" + yaml.dump(descriptions)
# Almost always seems to be included.
return error_info["message"]
except KeyError:
# Format of the error details is not as expected. As a fallback, just give
# the user the whole thing.
return "\n" + yaml.dump(error_info) | 276e8520bc5ca790fc61c71e0f43ebee0d78b597 | 703,084 |
def format_duration_hhmm(d):
"""
Utility function to format durations in the widget as hh:mm
"""
if d is None:
return ''
elif isinstance(d, str):
return d
hours = d.days * 24 + d.seconds // 3600
minutes = int(round((d.seconds % 3600) / 60))
return '{}:{:02}'.format(hours, minutes) | 941670b1a16c87816589cfb05ff80b651857d337 | 703,086 |
def try_int(obj):
"""return int(obj), or original obj if failed"""
try:
return int(obj)
except ValueError: # invalid literal for int()
return obj | 21613ce19e86f5c5545e9dc4b161f6ddc95fd0ce | 703,088 |
def _anonymous_model_data(ops_data):
"""Returns a dict representing an anonymous model.
ops_data must be a dict representing the model operations. It will
be used unmodified for the model `operations` attribute.
"""
return {"model": "", "operations": ops_data} | 6b64f9098b30cf3e079311b75ca136cfc2f7038f | 703,090 |
def encode_to_dict(encoded_str):
""" 将encode后的数据拆成dict
>>> encode_to_dict('name=foo')
{'name': foo'}
>>> encode_to_dict('name=foo&val=bar')
{'name': 'foo', 'val': 'var'}
"""
pair_list = encoded_str.split('&')
d = {}
for pair in pair_list:
if pair:
key = pair.split('=')[0]
val = pair.split('=')[1]
d[key] = val
return d | a3af4d93d13404f01511483621e166c50d9e489e | 703,091 |
def taxonomy_levels_below(taxa_level):
"""
E.g. 'Order' --> ['Family', 'Genus']
"""
p_levels = ['Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus']
position_of_taxa_level = p_levels.index(taxa_level)
return p_levels[position_of_taxa_level + 1:] | 57e41b0042daa14b8ed09469006c6c3bdac320ec | 703,092 |
def sort(nodes, total_order, dedup=False):
"""Sorts nodes according to order provided.
Args:
nodes: nodes to sort
total_order: list of nodes in correct order
dedup: if True, also discards duplicates in nodes
Returns:
Iterable of nodes in sorted order.
"""
total_order_idx = {}
for i, node in enumerate(total_order):
total_order_idx[node] = i
if dedup:
nodes = set(nodes)
return sorted(nodes, key=lambda n: total_order_idx[n]) | 4cfb53593b3c862ab918507ea3d4fe94ab4899df | 703,093 |
def _test_against_patterns(patterns, entity_id):
"""Test entity against list of patterns, true if any match."""
for pattern in patterns:
if pattern.match(entity_id):
return True
return False | 16881be06e86ae3fe1afc9dc8fb642573cf6fdcf | 703,097 |
def find_root_visual(conn):
"""Find the xcffib.xproto.VISUALTYPE corresponding to the root visual"""
default_screen = conn.setup.roots[conn.pref_screen]
for i in default_screen.allowed_depths:
for v in i.visuals:
if v.visual_id == default_screen.root_visual:
return v | 930bb700bdcb141aba9fc4370d244b588357f8f1 | 703,099 |
import re
def creole_slugify(value):
"""Convert the given string to a slug consistent with heading IDs used
by our creole parser.
>>> creole_slugify("Only 20%!")
"only-20"
"""
if not value:
return value
# Only keep alphanumeric and space characters.
value = re.sub(r"[^a-zA-Z0-9 ]+", "", value)
# replace whitespace with underscores
value = re.sub('[-\s]+', '-', value)
return value.lower() | fe621981715372c4a9c03179d862b745551d87f2 | 703,100 |
def async_wraps(cls, wrapped_cls, attr_name):
"""Similar to wraps, but for async wrappers of non-async functions."""
def decorator(func):
func.__name__ = attr_name
func.__qualname__ = ".".join((cls.__qualname__, attr_name))
func.__doc__ = """Like :meth:`~{}.{}.{}`, but async.
""".format(
wrapped_cls.__module__, wrapped_cls.__qualname__, attr_name
)
return func
return decorator | c93fb2a52bfcb3edc9cbf0442138daa1ecf84dda | 703,103 |
def qgrams_to_char(s: list) -> str:
"""Converts a list of q-grams to a string.
Parameters
----------
s : list
List of q-grams.
Returns
-------
A string from q-grams.
"""
if len(s) == 1:
return s[0]
return "".join([s[0]] + [s[i][-1] for i in range(1, len(s))]) | cc7dc5eb4d5c9e3e5f751cf7c2190e68c3ba11bd | 703,109 |
import re
def arguments_from_docstring(doc):
"""Parse first line of docstring for argument name.
Docstring should be of the form ``min(iterable[, key=func])``.
It can also parse cython docstring of the form
``Minuit.migrad(self[, int ncall_me =10000, resume=True, int nsplit=1])``
"""
if doc is None:
raise RuntimeError('__doc__ is None')
doc = doc.lstrip()
# care only the firstline
# docstring can be long
line = doc.split('\n', 1)[0] # get the firstline
if line.startswith("('...',)"):
line = doc.split('\n', 2)[1] # get the second line
p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
# 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
sig = p.search(line)
if sig is None:
return []
# iterable[, key=func]' -> ['iterable[' ,' key=func]']
sig = sig.groups()[0].split(',')
ret = []
for s in sig:
# get the last one after all space after =
# ex: int x= True
tmp = s.split('=')[0].split()[-1]
# clean up non _+alphanum character
tmp = ''.join([x for x in tmp if x.isalnum() or x == '_'])
ret.append(tmp)
# re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
# ret += self.docstring_kwd_re.findall(s)
ret = list(filter(lambda x: x != '', ret))
if len(ret) == 0:
raise RuntimeError('Your doc is unparsable\n' + doc)
return ret | 4b08f36678247df6119e594ff9859f697f2e8d23 | 703,114 |
def _check_mod_11_2(numeric_string: str) -> bool:
"""
Validate numeric_string for its MOD-11-2 checksum.
Any "-" in the numeric_string are ignored.
The last digit of numeric_string is assumed to be the checksum, 0-9 or X.
See ISO/IEC 7064:2003 and
https://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
"""
# Strip -
nums = numeric_string.replace("-", "")
total = 0
# skip last (check)digit
for num in nums[:-1]:
digit = int(num)
total = (total + digit) * 2
remainder = total % 11
result = (12 - remainder) % 11
if result == 10:
checkdigit = "X"
else:
checkdigit = str(result)
# Compare against last digit or X
return nums[-1].upper() == checkdigit | 685a9e8085000248290c9e482a115c99942c51d1 | 703,115 |
def dictize(aniter, mode, initial=None):
"""iter must contain (key,value) pairs. mode is a string, one of: replace, keep,
tally, sum, append, or a custom function that takes two arguments.
replace: default dict behavior. New value overwrites old if key exists. This
is essentially a pass-thru.
keep: Noop if kv already exists in dict.
tally: Ignore value, count how many times each key occurs.
sum: Each key contains a sum of the (presumably summable) values that arrive
with that key.
append: each key contains a list of the items that arrived with that key.
add: each key contains a set of the items that arrived with that key.
Custom func: The first argument is the existing key value. This function
won't be called if the key doesn't exist. The second is the newly arrived value.
The return value will replace the existing value in the internal dict
initial optional argument: function that gets called the first time a key
occurs. It's parameter is the value. It's return is placed in the dict. Use
to specify a default value."""
data = {}
modes = "replace keep tally sum append add".split(' ')
funcs = [lambda e, n: n,
lambda e, n: e,
lambda e, n=None: e+1,
lambda e, n: e+n,
lambda e, n: e+[n],
lambda e, n: e.union([n])]
inits = [lambda v: v,
lambda v: v,
lambda v: 1,
lambda v: v,
lambda v: [v],
lambda v: set([v])]
if mode in modes:
modei = modes.index(mode)
func = funcs[modei]
if not initial:
initial = inits[modei]
else:
assert hasattr(mode, '__call__'), '2nd argument must be a function or\
one of: %s' % ' '.join(modes)
func = mode
if not initial:
initial = lambda x: x
for (k, v) in aniter:
if k in data:
data[k] = func(data[k], v)
else:
data[k] = initial(v)
return data | c56a2ad83ec9a45e87caa7def33c6b51f63655cb | 703,125 |
def read_txt_file(file_path, n_num=-1, code_type='utf-8'):
"""
read .txt files, get all text or the previous n_num lines
:param file_path: string, the path of this file
:param n_num: int, denote the row number decided by \n, but -1 means all text
:param code_type: string, the code of this file
:return: string, text
"""
with open(file_path, 'r', encoding=code_type) as f:
if n_num <= 0:
text = f.read().strip()
else: # n_num > 0
text = '\n'.join([f.readline() for _ in range(n_num)])
return text | 9c55d370d8e8610965e0f8c4b1bed85e6adcdc5b | 703,128 |
import torch
def l1_loss(pred_traj, pred_traj_gt):
"""
Input:
:param pred_traj: Tensor of shape (batch, seq_len)/(batch, seq_len). Predicted trajectory along one dimension.
:param pred_traj_gt: Tensor of shape (batch, seq_len)/(batch, seq_len).
Groud truth predictions along one dimension.
:return: l1 loss |pred_traj - pred_traj_gt|
"""
return torch.sum(torch.abs(pred_traj - pred_traj_gt), dim=-1, keepdim=True) | 3fb4dd2b7fc85e8f32610065078aa3dc98d728d5 | 703,131 |
def _is_subexpansion_optional(query_metadata, parent_location, child_location):
"""Return True if child_location is the root of an optional subexpansion."""
child_optional_depth = query_metadata.get_location_info(child_location).optional_scopes_depth
parent_optional_depth = query_metadata.get_location_info(parent_location).optional_scopes_depth
return child_optional_depth > parent_optional_depth | 29391226258e75d434e07c291fe9590c1810d85b | 703,134 |
def _filter_labels(text, labels, allowed_labels):
"""Keep examples with approved labels.
:param text: list of text inputs.
:param labels: list of corresponding labels.
:param allowed_labels: list of approved label values.
:return: (final_text, final_labels). Filtered version of text and labels
"""
final_text, final_labels = [], []
for text, label in zip(text, labels):
if label in allowed_labels:
final_text.append(text)
final_labels.append(label)
return final_text, final_labels | e17ed7659acbdadc71a6b3f5b522af1e34d40370 | 703,135 |
def array_check(lst):
"""
Function to check whether 1,2,3 exists in given array
"""
for i in range(len(lst)-2):
if lst[i] == 1 and lst[i+1] == 2 and lst[i+2] == 3:
return True
return False | ef60b52a9d7300fa458b503f49996aec0f0831ad | 703,139 |
from typing import List
from typing import Any
def list_difference(list_1: List[Any], list_2: List[Any]) -> List[Any]:
""" This Function that takes two lists as parameters
and returns a new list with the values that are in l1, but NOT in l2"""
differ_list = [values for values in list_1 if values not in list_2]
return differ_list | 3831d799bb40080b828ee90e2929f77ff8aeb7ba | 703,140 |
def correlation(self, column_a, column_b):
"""
Calculate correlation for two columns of current frame.
Parameters
----------
:param column_a: (str) The name of the column from which to compute the correlation.
:param column_b: (str) The name of the column from which to compute the correlation.
:return: (float) Pearson correlation coefficient of the two columns.
Notes
-----
This method applies only to columns containing numerical data.
Examples
--------
Consider Frame *my_frame*, which contains the data
<hide>
>>> s = [("idnum", int), ("x1", float), ("x2", float), ("x3", float), ("x4", float)]
>>> rows = [ [0, 1.0, 4.0, 0.0, -1.0], [1, 2.0, 3.0, 0.0, -1.0], [2, 3.0, 2.0, 1.0, -1.0], [3, 4.0, 1.0, 2.0, -1.0], [4, 5.0, 0.0, 2.0, -1.0]]
>>> my_frame = tc.frame.create(rows, s)
-etc-
</hide>
>>> my_frame.inspect()
[#] idnum x1 x2 x3 x4
===============================
[0] 0 1.0 4.0 0.0 -1.0
[1] 1 2.0 3.0 0.0 -1.0
[2] 2 3.0 2.0 1.0 -1.0
[3] 3 4.0 1.0 2.0 -1.0
[4] 4 5.0 0.0 2.0 -1.0
my_frame.correlation computes the common correlation coefficient (Pearson's) on the pair
of columns provided.
In this example, the *idnum* and most of the columns have trivial correlations: -1, 0, or +1.
Column *x3* provides a contrasting coefficient of 3 / sqrt(3) = 0.948683298051 .
>>> my_frame.correlation("x1", "x2")
-0.9999999999999998
>>> my_frame.correlation("x1", "x4")
nan
>>> my_frame.correlation("x2", "x3")
-0.9486832980505138
"""
return self._scala.correlation(column_a, column_b) | b8f1600e0b2968ca4013418b2fbfda0b13f5911a | 703,143 |
def good_fft_number(goal):
"""pick a number >= goal that has only factors of 2,3,5. FFT will be much
faster if I use such a number"""
assert goal < 1e5
choices = [2**a * 3**b * 5**c for a in range(17) for b in range(11)
for c in range(8)]
return min(x for x in choices if x >= goal) | e469a37f28869dca520aea3520fa0d763e9bb8ae | 703,149 |
def connect_to_ecs(env):
"""
Return boto connection to the ecs in the specified environment's region.
"""
rh = env.resource_handler.cast()
wrapper = rh.get_api_wrapper()
client = wrapper.get_boto3_client(
'ecs',
rh.serviceaccount,
rh.servicepasswd,
env.aws_region
)
return client | e4c0b7ad80c18fd6d2a90df6670ca9bfa6f1cbe3 | 703,150 |
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
"""
high = len(input_list) - 1
low = 0
while low <= high:
mid = (low + high) // 2
if input_list[mid] == number:
return mid
elif input_list[mid] < number <= input_list[high]:
low = mid + 1
else:
if input_list[low] <= number:
high = mid - 1
else:
low = mid + 1
return -1 | c3deb50e608c58e5e11665d949a602cc661305ae | 703,152 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.