content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def get_tl_num_size(val: int) -> int:
"""
Calculate the length of a TL variable.
:param val: an integer standing for Type or Length.
:return: The length of var.
"""
if val <= 0xFC:
return 1
elif val <= 0xFFFF:
return 3
elif val <= 0xFFFFFFFF:
return 5
else:
return 9 | 4a11da075f57d98b2956e7932adf0cc9a14645a7 | 27,712 |
def dict2yaml(yaml_dict: dict) -> str:
"""
Convert the YAML dict into the YAML front matter string.
Parameters
----------
yaml_dict : dict
Dict made from the YAML front matter.
Returns
-------
str
YAML front matter into string.
"""
yaml_text = "---\n"
for i in yaml_dict:
line = f"{i}: {yaml_dict[i]}"
yaml_text += f"{line.strip()}\n"
yaml_text += "---\n"
return yaml_text | b2bfd40ad5f45f2725384a9de12fbfcc0d7fe9d1 | 27,715 |
def remove_rectangle(rlist, u_low, u):
"""
Function to remove non-optimal rectangle
from the list of rectangles
Parameters
----------
rlist : list
List of rectangles.
u_low : list
Lower bound of the rectangle to remove.
u : list
Upper bound of the rectangle to remove.
Returns
-------
list
Updated list of rectangles.
"""
return [r for r in rlist if r != [u_low, u]] | 4353fc9bb1187b565abcb5932465940ada6ad678 | 27,717 |
def lcp(s1, s2):
"""Return the length of the longest common prefix
between strings `s1` and `s2`."""
comp = 0
for i in range(min(len(s1), len(s2))):
if s1[i] != s2[i]:
break
comp += 1
return comp | dae5fbe70e9684f7e9a336adbd4e74d874dd52f5 | 27,718 |
import torch
def pr(x, y):
""" Metrics calculation from: https://en.wikipedia.org/wiki/Confusion_matrix
Returns precision, recall, specificity and f1 (in that order)
"""
tp = ((x == y) * (x == 1)).sum().to(torch.float32)
tn = ((x == y) * (x == 0)).sum().to(torch.float32)
fp = ((x != y) * (x == 1)).sum().to(torch.float32)
fn = ((x != y) * (x == 0)).sum().to(torch.float32)
pr = tp / (tp + fp)
rc = tp / (tp + fn)
sp = tn / (tn + fp)
f1 = (2 * tp) / (2 * tp + fp + fn)
return pr, rc, sp, f1 | b2095585e3283b8c301c992ea241158c612b4d3b | 27,719 |
import re
def _strip_ken_suffix(name):
"""
>>> _strip_ken_suffix('Akita ken')
'Akita'
>>> _strip_ken_suffix('Akita-ken')
'Akita'
>>> _strip_ken_suffix('Akita Prefecture')
'Akita'
"""
return re.sub(r'[- ](ken|prefecture)', '', name, flags=re.IGNORECASE) | 736dae9f335d5a22e3faace4e947ec02798d3fa2 | 27,721 |
from typing import OrderedDict
import six
def BuildFullMapUpdate(clear, remove_keys, set_entries, initial_entries,
entry_cls, env_builder):
"""Builds the patch environment for an environment update.
To be used when BuildPartialUpdate cannot be used due to lack of support for
field masks containing map keys.
Follows the environments update semantic which applies operations
in an effective order of clear -> remove -> set.
Leading and trailing whitespace is stripped from elements in remove_keys
and the keys of set_entries.
Args:
clear: bool, If true, the patch removes existing keys.
remove_keys: iterable(string), Iterable of keys to remove.
set_entries: {string: string}, Dict containing entries to set.
initial_entries: [AdditionalProperty], list of AdditionalProperty class with
key and value fields, representing starting dict to update from.
entry_cls: AdditionalProperty, The AdditionalProperty class for the type
of entry being updated.
env_builder: [AdditionalProperty] -> Environment, A function which produces
a patch Environment with the given list of entry_cls properties.
Returns:
Environment, a patch environment produced by env_builder.
"""
# Transform initial entries list to dictionary for easy processing
entries_dict = OrderedDict(
(entry.key, entry.value) for entry in initial_entries)
# Remove values that are no longer desired
if clear:
entries_dict = OrderedDict()
remove_keys = set(k.strip() for k in remove_keys or [])
for key in remove_keys:
if key in entries_dict:
del entries_dict[key]
# Update dictionary with new values
# set_entries is sorted by key to make it easier for tests to set the
# expected patch object.
set_entries = OrderedDict(
(k.strip(), v) for k, v in sorted(six.iteritems(set_entries or {})))
entries_dict.update(set_entries)
# Transform dictionary back into list of entry_cls
return env_builder([
entry_cls(key=key, value=value)
for key, value in six.iteritems(entries_dict)
]) | dd43cfe6db69076c61a211d9534f0a5624f76246 | 27,722 |
def exception_match(x, y):
"""Check the relation between two given exception `x`, `y`:
- `x` equals to `y`
- `x` is a subclass/instance of `y`
Note that `BaseException` should be considered.
e.g. `GeneratorExit` is a subclass of `BaseException` but which is not a
subclass of `Exception`, and it is technically not an error.
"""
return (
(issubclass(x, Exception) or issubclass(x, BaseException))
and issubclass(x, y)
) | 6ea965c70c9980834a4b31baac802b5bad295e2a | 27,728 |
import math
def trunc(value, decimals=0):
"""Truncates values after a number of decimal points
:param value: number to truncate
:type value: float
:param decimals: number of decimals points to keep
:type decimals: int
:return: truncated float
:rtype: float
"""
step = 10 ** decimals
return math.trunc(value * step)/step | fda9289eae3274b7c8cb1bd172032fc3c0e7f8f0 | 27,730 |
def identifier_to_label(identifier):
"""Tries to convert an identifier to a more human readable text label.
Replaces underscores by spaces and may do other tweaks.
"""
txt = identifier.replace("_", " ")
txt = txt.replace(" id", "ID")
txt = dict(url="URL").get(txt, txt)
txt = txt[0].upper() + txt[1:]
return txt | 8dbbac38e4e0408354128bf8da0dabbf72d785ae | 27,734 |
def standings_to_str(standings):
"""Format standings list as string. Use enumerate()
index value both to display place. Ties receive the
same place value.
Format: "<place>. <team name> (<points>)\n"
Parameters:
standings (list): team standings
Returns:
str: formatted string representation of list
"""
string = '\n'
prev_points = 0
place_tie = 0
for i, team in enumerate(standings, 1):
points = team['points']
if points == prev_points:
place = place_tie
else:
place, place_tie = i, i # increment
string += f"{place}. {team['name']} ({points})\n"
prev_points = points # increment
return string | be19aff13a20c0c421bf4b58a8f309d36ab9d973 | 27,737 |
def getGameFromCharacter(cname: str) -> str:
"""Return a query to get games with equal Ryu Number to a character.
The query will retrieve the title and Ryu Number of a game whose Ryu
Number is exactly equal to the Ryu Number of the character whose name is
passed. This is used primarily for path-finding towards Ryu.
The resulting query takes the following form for game as G:
`(G.title: str, G.ryu_number: int)`
"""
return (f"SELECT DISTINCT G.title, G.ryu_number "
f"FROM appears_in "
f"INNER JOIN game_character AS C ON cname=C.name "
f"INNER JOIN game AS G ON gtitle=G.title "
f"WHERE cname LIKE '{cname}' AND G.ryu_number=C.ryu_number;"
) | 22e6a5702f69d3d7ca391add6144c8e19734c5a9 | 27,738 |
def is_valid_cidr_netmask(cidr_netmask: str) -> bool:
"""
This function will check that the netmask given in
parameter is a correct mask for IPv4 IP address.
Using to verify a netmask in CIDR (/24) format.
:param cidr_netmask: Netmask to check
:return bool: True if the netmask is valid
"""
return str(cidr_netmask).isdigit() and \
int(cidr_netmask) >= 0 and \
int(cidr_netmask) <= 32 | 3e63d4cf2e9d748977230f7b9c26bf6e1440d313 | 27,739 |
from typing import List
def clean_authors(authors_list: List[str], sign: str) -> List[str]:
"""
Cleans a list of author names by splliting them based on a given sign.
Args:
authors_list (:obj:`List[str]`):
A list of author names.
sign (:obj:`str`):
Sign that separates author names in the list.
Returns:
:obj:`List[str]`:
A list of splitted author names.
"""
if authors_list:
authors = list()
for author in authors_list:
if sign in author:
authors.extend([name.strip() for name in author.split(sign)])
else:
authors.append(author)
return authors
return authors_list | 65d7e625d0e7e98f95c3e2ae4877d5d744518686 | 27,741 |
def _get_crash_key(crash_result):
"""Return a unique identifier for a crash."""
return f'{crash_result.crash_type}:{crash_result.crash_state}' | 0f2472bf984440bb27cd94e1e3fb132529b4fad1 | 27,742 |
def midpoint(A, B):
""" calculates the midpoint between 2 points"""
return (A[0]+B[0])/2, (A[1]+B[1])/2, (A[2]+B[2])/2; | d9895cfed02a86b3a0b9117ff6e697d69c173a96 | 27,743 |
from typing import List
def filter_stacktrace(stacktrace: List[str]) -> List[str]:
"""Removes those frames from a formatted stacktrace that are located
within the DHParser-code."""
n = 0
for n, frame in enumerate(stacktrace):
i = frame.find('"')
k = frame.find('"', i + 1)
if frame.find("DHParser", i, k) < 0:
break
return stacktrace[n:] | ac92251eadc4f53c3a2f14e7386c38ee2aa70e17 | 27,744 |
def avg(iter, key=None, percent=0.0):
"""
自定义求平均值
:param iter:
:param key:
:param percent: 去除百分比(将列表中的最大最小值去掉一定后再计算平均值)
:return:
"""
nlen = len(iter)
if nlen == 0:
return 0
# 要去除的数据
del_num = int(nlen * percent / 2)
if del_num >= nlen:
return 0
new_iter = iter
if percent > 0:
# 先转换成有序
new_iter = sorted(iter, key=key)
if del_num > 0:
new_iter = new_iter[del_num:-del_num]
# 计算新的平均数
nlen = len(new_iter)
total_v = 0
for i in new_iter:
if key is None:
total_v += i
else:
total_v += key(i)
return total_v / nlen | 6305c908d4726412843b5fa936daf04e313096de | 27,748 |
from typing import List
def is_course_section_row(row_cols: List, time_index: int) -> bool:
"""Determines if a row in a course table contains data for a course section.
:param row_cols: A row in a course table.
:param time_index: The column index where the time data is possibly stored.
:return: True if the row contains course section data, and False otherwise.
"""
if len(row_cols) <= time_index:
return False
time = row_cols[time_index].getText().strip()
return len(time) > 2 and time[2] == ':' | 640c92bf289d0b9734912140d6624ce639c05e39 | 27,752 |
def readline(string):
"""
read a line from string
"""
x = ""
i = 0
while i < len(string):
if string[i] != "\n":
x += string[i]
i += 1
else:
return x + "\n"
return x | 67a081e2cba9e791ebcf3d60e42222a89d7c5429 | 27,753 |
import hashlib
def _hashdigest(message, salt):
""" Compute the hexadecimal digest of a message using the SHA256 algorithm."""
processor = hashlib.sha256()
processor.update(salt.encode("utf8"))
processor.update(message.encode("utf8"))
return processor.hexdigest() | 2c9c5886d72700826da11a62f4cc9c82c2078090 | 27,757 |
def inv_permutation(permutation):
"""Get the inverse of a permutation. Used to invert a transposition for example.
Args:
permutation (list or tuple): permutation to invert.
Returns:
list
"""
inverse = [0] * len(permutation)
for i, p in enumerate(permutation):
inverse[p] = i
return inverse | ab75f150d9df12d6bbec64fbe4744d962b9de1c6 | 27,760 |
def parent_user_password(db, parent_user):
"""Creates a parent website user with a password."""
user = parent_user
user.set_password('password')
user.save()
return user | 56718190793179034d5cce86e5bc6060c8d5d5a2 | 27,761 |
def _get_val_list(obj, path_list, reverse=False):
"""Extract values from nested objects by attribute names.
Objects contain attributes which are named references to objects. This will descend
down a tree of nested objects, starting at the given object, following the given
path.
Args:
obj: object
Any type of object
path_list: list
Attribute names
reverse: bool
Reverse the list of values before concatenation.
Returns:
list of objects
"""
try:
y = getattr(obj, path_list[0])
except AttributeError:
return []
if len(path_list) == 1:
return [y]
else:
val_list = [x for a in y for x in _get_val_list(a, path_list[1:], reverse)]
if reverse:
val_list.reverse()
return val_list | b66c7242db6c02340a2b8b2d92d842894990891b | 27,764 |
def remove_all_None(a_list):
"""Remove all None values from a list."""
# type: (list) -> list
return [item for item in a_list if item is not None] | 15eaeb7ef0208f3cd5519534bf155c62da88d3d5 | 27,765 |
def _get_ids(records, key):
"""Utility method to extract list of Ids from Bulk API insert/query result.
Args:
records (:obj:`list`): List of records from a Bulk API insert or SOQL query.
key (:obj:`str`): Key to extract - 'Id' for queries or 'id' for inserted data.
Returns:
(:obj:`list`) of inserted record Ids in form [{'Id':'001000000000001'},...]
"""
return [{'Id': record[key]}
for record in records] | 2fe90c06a7458af49db87d2ee01350e065920113 | 27,766 |
def get_params_for_component(params, component):
"""
Returns a dictionary of all params for one component defined
in params in the form component__param: value
e.g.
>> params = {"vec__min_df": 1, "clf__probability": True}
>> get_params_for_component(params, "vec")
{"min_df": 1}
"""
component_params = {}
for k, v in params.items():
if k.startswith(component):
_, component_arg = k.split(f"{component}__")
component_params[component_arg] = v
return component_params | aefee29c848fecab432efc74acd3d1bcaf80e539 | 27,773 |
import re
def try_include(line):
"""
Checks to see if the given line is an include. If so return the
included filename, otherwise None.
"""
match = re.match('^#include\s*[<"]?(.*)[>"]?$', line)
return match.group(1) if match else None | f30c885ffa783f78f5a71dc1906ac6e158226361 | 27,775 |
from typing import Dict
from pathlib import Path
import json
def get_config() -> Dict:
"""Load config file from disk as python dict and return it. File is
expected to exist on the same path as this source file.
"""
with open(
Path(Path(__file__).parent, "config.json").resolve(),
"r",
) as json_file:
return json.load(json_file) | 68a0a11ddfea137b1ede61686df630e1d9735c21 | 27,777 |
def get_filename_without_extension(filename):
"""
Returns the name of the 'filename', removing any extension. Here, an extension is indicated by a *dot*,
e.g. 'file.txt' where 'file' denotes the name and 'txt' is the extension.
If multiple extensions exist, only the last one is removed. In case no extension is present, the entire
'filename' is returned.
In case the 'filename' is empty, the string 'file' is returned.
:param filename: the filename to get the name from
:return: the name of the given 'filename'
"""
if not filename:
return 'file'
try:
name, extension = filename.rsplit('.', 1)
return name
except ValueError:
return filename | 7359be82706b1aa041c3559293db8e8cfe49f157 | 27,779 |
def make_variable_batch_size(num_inputs, onnx_model):
"""
Changes the input batch dimension to a string, which makes it variable.
Tensorflow interpretes this as the "?" shape.
`num_inputs` must be specified because `onnx_model.graph.input` is a list
of inputs of all layers and not just model inputs.
:param num_inputs: int, Number of model inputs (e.g. 2 for Text and Image)
:param onnx_model: ONNX model instance
:return: ONNX model instance with variable input batch size
"""
for i in range(num_inputs):
onnx_model.graph.input[i].type.tensor_type. \
shape.dim[0].dim_param = 'batch_size'
return onnx_model | e503ea83cac31c33fff0cee6909e7f6640acf4b5 | 27,781 |
def power(x, n):
"""
计算幂的递归算法 x^n = x * x^(n-1)
时间复杂度 O(n)
:param x:
:param n:
:return:
"""
if n == 0:
return 1
else:
return x*power(x, n-1) | 5772fc4ccd1e392f7e8cff4a6068b4cf21989312 | 27,789 |
def filter_list(values, excludes):
"""
Filter a list of values excluding all elements from excludes parameters and return the new list.
Arguments:
values : list
excludes : list
Returns:
list
"""
return list(x for x in values if x not in excludes) | 68f25fe3afd4faebeefde7639a2b3d5885255e6a | 27,796 |
def _process_scopes(scopes):
"""Parse a scopes list into a set of all scopes and a set of sufficient scope sets.
scopes: A list of strings, each of which is a space-separated list of scopes.
Examples: ['scope1']
['scope1', 'scope2']
['scope1', 'scope2 scope3']
Returns:
all_scopes: a set of strings, each of which is one scope to check for
sufficient_scopes: a set of sets of strings; each inner set is
a set of scopes which are sufficient for access.
Example: {{'scope1'}, {'scope2', 'scope3'}}
"""
all_scopes = set()
sufficient_scopes = set()
for scope_set in scopes:
scope_set_scopes = frozenset(scope_set.split())
all_scopes.update(scope_set_scopes)
sufficient_scopes.add(scope_set_scopes)
return all_scopes, sufficient_scopes | 85fa5d8f761358225343f75e1c1dfa531e661eb3 | 27,797 |
import hashlib
def tagged_hash_init(tag: str, data: bytes = b""):
"""Prepares a tagged hash function to digest extra data"""
hashtag = hashlib.sha256(tag.encode()).digest()
h = hashlib.sha256(hashtag + hashtag + data)
return h | 955cc9fe6082d56663b9cd3531b0bb75aa2af472 | 27,798 |
import math
def GsoAzimuth(fss_lat, fss_lon, sat_lon):
"""Computes the azimuth angle from earth station toward GSO satellite.
Based on Appendix D of FCC 05-56.
Inputs:
fss_lat: Latitude of earth station (degrees)
fss_lon: Longitude of earth station (degrees)
sat_lon: Longitude of satellite (degrees)
Returns:
the azimuth angle of the pointing arc from earth station to GSO satellite.
"""
fss_lat = math.radians(fss_lat)
fss_lon = math.radians(fss_lon)
sat_lon = math.radians(sat_lon)
if fss_lat > 0:
azimuth = math.pi - math.atan2(math.tan(sat_lon-fss_lon),
math.sin(fss_lat))
else:
azimuth = math.atan2(math.tan(sat_lon-fss_lon),
math.sin(-fss_lat))
if azimuth < 0: azimuth += 2*math.pi
return math.degrees(azimuth) | f8761e3529f75a02d90369b5f8aa353f22bbc599 | 27,808 |
def read_file(path):
"""Read file."""
with open(path) as _file:
return _file.read() | bed1e255478c6d43d84240e1c1969aa3c1bc21f3 | 27,813 |
def constrain(val, min_val, max_val):
"""
Method to constrain values to between the min_val and max_val.
Keyword arguments:
val -- The unconstrained value
min_val -- The lowest allowed value
max_val -- The highest allowed value
"""
return min(max_val, max(min_val, val)) | 655cc16ad425b6ca308d3edbd2881a3923ef195e | 27,814 |
from typing import Counter
def small_class(raw_data, labels, threshold=20):
"""Removes samples and classes for classes that have less than
`threshold` number of samples."""
counts = Counter(labels)
data, n_labels = [], []
for i, l in enumerate(labels):
if counts[l] >= threshold:
data.append(raw_data[i])
n_labels.append(l)
return data, n_labels | cf80bd67ccc3d69baf0b71f226c8b56ef5b80e7c | 27,818 |
def rescale_size(size, scale, return_scale=False):
"""
Compute the new size to be rescaled to.
Args:
size (tuple[int]): The original size in the form of
``(width, height)``.
scale (int | tuple[int]): The scaling factor or the maximum size. If
it is a number, the image will be rescaled by this factor. When it
is a tuple containing 2 numbers, the image will be rescaled as
large as possible within the scale. In this case, ``-1`` means
infinity.
return_scale (bool, optional): Whether to return the scaling factor.
Default: ``False``.
Returns:
:obj:`np.ndarray` | tuple: The new size (and scaling factor).
"""
w, h = size
if isinstance(scale, (float, int)):
scale_factor = scale
elif isinstance(scale, tuple):
if -1 in scale:
max_s_edge = max(scale)
scale_factor = max_s_edge / min(h, w)
else:
max_l_edge = max(scale)
max_s_edge = min(scale)
scale_factor = min(max_l_edge / max(h, w), max_s_edge / min(h, w))
else:
raise TypeError(
"'scale must be a number or tuple of int, but got '{}'".format(
type(scale)))
new_size = int(w * scale_factor + 0.5), int(h * scale_factor + 0.5)
if return_scale:
return new_size, scale_factor
else:
return new_size | 4baa26011ab191c4adca963c5ad7b6e63941b740 | 27,820 |
def compose(f, g):
"""Function composition.
``compose(f, g) -> f . g``
>>> add_2 = lambda a: a + 2
>>> mul_5 = lambda a: a * 5
>>> mul_5_add_2 = compose(add_2, mul_5)
>>> mul_5_add_2(1)
7
>>> add_2_mul_5 = compose(mul_5, add_2)
>>> add_2_mul_5(1)
15
"""
# pylint: disable = invalid-name, star-args
return lambda *args, **kwargs: f(g(*args, **kwargs)) | 053c1c6db1517a10ef0580268abb709441a71333 | 27,822 |
def keynat(string):
"""
A natural sort helper function for sort() and sorted()
without using regular expressions or exceptions.
>>> items = ('Z', 'a', '10th', '1st', '9')
>>> sorted(items)
['10th', '1st', '9', 'Z', 'a']
>>> sorted(items, key=keynat)
['1st', '9', '10th', 'a', 'Z']
:type string: string
:param string: String to compare
:rtype: int
:return: Position
"""
it = type(1)
r = []
for c in string:
if c.isdigit():
d = int(c)
if r and type( r[-1] ) == it:
r[-1] = r[-1] * 10 + d
else:
r.append(d)
else:
r.append(c.lower())
return r | fa8a1e52ae97ff78cecab0afe1050142fd12d18a | 27,826 |
def format_address(address):
"""Remove non alphanumeric/whitespace characers from restaurant address
but allows for commas
"""
return ''.join(chr for chr in address if chr.isalnum()
or chr.isspace() or chr == ",") | 6cb191b6672744dfedb570fa1e85f85876fa2895 | 27,832 |
import pickle
def load_pickled_data(path):
"""Load in a pickled data file
Args
----
path (str) : path to the file to read
Returns
-------
the data object
"""
with open(path, "rb") as f:
data = pickle.load(f)
return data | 18a4c352d14762c4b52dc205336a49a2c88cfbc1 | 27,833 |
def get_outputs(lst, uses, seen):
"""Return the list of nodes whose values are required beyond this segment.
Arguments:
lst: list of nodes (the segment)
uses: dict mapping each node to its uses (globally)
seen: set of nodes that are part of the segment
"""
outputs = []
for n in lst:
if n.is_apply() and any(u[0] not in seen for u in uses[n]):
outputs.append(n)
return outputs | 03d6c859bb70aa5ce868b9c71ff7ce6092d52604 | 27,837 |
def get_matches_metadata(infile):
"""
Reads match IDs and metadata from a filename.
Args:
infile: Filename where match IDs and metadata are stored (string).
Returns:
List of dicts with IDs and metadata for each match.
"""
out = []
with open(infile, "r") as file:
lines = file.read().split("\n")
# First two lines are column names and types.
header = lines[0].split(",")
types = lines[1].split(",")
for line in filter(lambda x: len(x) > 0, lines[2:]):
data = line.split(",")
row = {}
for i in range(len(data)):
if types[i] == "int":
row[header[i]] = int(data[i])
elif types[i] == "float":
row[header[i]] = float(data[i])
elif types[i] == "str":
row[header[i]] = data[i]
else:
err = "Unsupported column type: {}".format(types[i])
raise ValueError(err)
out.append(row)
return out | c9b70b40ea0c1ada0af0b6b9c6fbb7e3d95d83e5 | 27,844 |
def lmParamToPoint(a, c):
""" Return the coordinates of a landmark from its line parameters.
Wall landmarks are characterized by the point corresponding to the
intersection of the wall line and its perpendicular passing through the
origin (0, 0). The wall line is characterized by a vector (a, c) such as
its equation is given by y = ax + c.
"""
xp = float(-c*a / (1+a**2))
yp = float(c / (1+a**2))
return [xp, yp] | 6b98613216f1287ed9b25f1345ea0a18aa0fc90b | 27,847 |
from typing import Any
from typing import Callable
def pipe(in_: Any, *args: Callable[[Any], Any]) -> Any:
"""Basic pipe functionality
Example usage:
>>> pipe(
... [True, False, 1, 3],
... all,
... lambda x: "It's true" if x else "They lie"
... )
'They lie'
"""
for function in args:
in_ = function(in_)
return in_ | 8eff195886ec9daf8391532cb21dc61182462c34 | 27,848 |
from re import X
def label_data(frame, model):
"""Predict cluster label for each tract"""
frame["cluster"] = model.predict(X)
ix = ["geoid", "state_abbr", "logrecno", "geo_label", "cluster"]
return frame.reset_index().set_index(ix) | 7a27a0722394b90aba237a821be3d2a5730403c0 | 27,856 |
def rel_2_pil(rel_coords, w, h):
"""Scales up the relative coordinates to x1, y1, x2, y2"""
x1, x2, y1, y2 = rel_coords
return [int(x) for x in [x1 * w, y1 * h, x2 * w, y2 * h]] | f619f4a0920db503401abdd0cfd86b61116c4992 | 27,858 |
import time
def datetime_format(epoch):
"""
Convert a unix epoch in a formatted date/time string
"""
datetime_fmt = '%Y-%m-%dT%H:%M:%SZ'
return time.strftime(datetime_fmt, time.gmtime(epoch)) | e45f7874bebdbe99a1e17e5eb41c5c92e15a96b3 | 27,859 |
def count_genes_in_pathway(pathways_gene_sets, genes):
"""Calculate how many of the genes are associated to each pathway gene set.
:param dict pathways_gene_sets: pathways and their gene sets
:param set genes: genes queried
:rtype: dict
"""
return {
pathway: len(gene_set.intersection(genes))
for pathway, gene_set in pathways_gene_sets.items()
} | bb3859c9a6b8c17448a6cbcc3a85fc315abbab31 | 27,861 |
import unittest
import inspect
def AbstractTestCase(name, cls):
"""Support tests for abstract base classes.
To be used as base class when defining test cases for abstract
class implementations. cls will be bound to the attribute `name`
in the returned base class. This allows tests in the subclass to
access the abstract base class or its concretization.
"""
class BaseTestCase(unittest.TestCase):
"""TestCase that is skipped if the tested class is abstract."""
def run(self, *args, **opts):
"""Run the test case only for non-abstract test classes."""
if inspect.isabstract(getattr(self, name)):
return
else:
return super(BaseTestCase, self).run(*args, **opts)
setattr(BaseTestCase, name, cls)
return BaseTestCase | 66641e3c9d805946880ac8dfc41827f51986f6aa | 27,863 |
import itertools
def flat_map(visitor, collection):
"""Flat map operation where returned iterables are flatted.
Args:
visitor: Function to apply.
collection: The collection over which to apply the function.
Returns:
Flattened results of applying visitor to the collection.
"""
return itertools.chain.from_iterable(
map(visitor, collection)
) | 5501e4adc18ca8b45081df4158bcd47491743f29 | 27,868 |
def hex_sans_prefix(number):
"""Generates a hexadecimal string from a base-10 number without the standard '0x' prefix."""
return hex(number)[2:] | 6faaec36b2b3d419e48b39f36c1593297710a0a4 | 27,869 |
def curly_bracket_to_img_link(cb):
"""
Takes the curly-bracket notation for some mana type
and creates the appropriate image html tag.
"""
file_safe_name = cb[1:-1].replace('/', '_').replace(' ', '_')
ext = 'png' if 'Phyrexian' in file_safe_name or file_safe_name in ('C', 'E') else 'gif'
return f"<img src=\"/images/mana/{file_safe_name}.{ext}\">" | 99a1a7ebf6318d2fbc9c2c24035e5115829b6feb | 27,872 |
def name_options(options, base_name):
"""Construct a dictionary that has a name entry if options has name_postfix"""
postfix = options.get("name_postfix")
if postfix is not None:
return { "name": base_name + str(postfix) }
return {} | c5db1619fa951298743e28c78b8f62165b5d09de | 27,876 |
def first_index(keys, key_part):
"""Find first item in iterable containing part of the string
Parameters
----------
keys : Iterable[str]
Iterable with strings to search through
key_part : str
String to look for
Returns
-------
int
Returns index of first element in keys containing key_part, 0 if not found.
"""
for i, key in enumerate(keys):
if key_part in key:
return i
return 0 | 45b41954e795ee5f110a30096aa74ea91f8e6399 | 27,879 |
def _calc_shape(original_shape, stride, kernel_size):
"""
Helper function that calculate image height and width after convolution.
"""
shape = [(original_shape[0] - kernel_size) // stride + 1,
(original_shape[1] - kernel_size) // stride + 1]
return shape | 46a40efec8c7163ead92425f9a884981e6a4a8bc | 27,880 |
def needs_column_encoding(mode):
"""
Returns True, if an encoding mode needs a column word embedding vector, otherwise False
"""
return mode in ["one-hot-column-centroid",
"unary-column-centroid",
"unary-column-partial",
"unary-random-dim"] | d5642d03628357508be87e227c5a9edf8e65da2d | 27,881 |
import json
def decode_frame(frame, tags=None):
""" Extract tag values from frame
:param frame: bytes or str object
:param tags: specific tags to extract from frame
:return: dictionary of values
"""
# extract string and convert to JSON dict
framebytes = frame if isinstance(frame, bytes) else frame.bytes
if framebytes[-1] == 0:
framestring = framebytes[:-1].decode('utf-8')
else:
framestring = framebytes.decode('utf-8')
framedict = json.loads(framestring)
# extract tags
if tags:
if isinstance(tags, str):
tags = [tags]
tagdict = {k:framedict[k] for k in tags if k in framedict}
else:
tagdict = framedict
return tagdict | 1e239c380c7050ff536aa7bfc1cd0b0a01959f39 | 27,884 |
import textwrap
def construct_using_clause(metarels, join_hint, index_hint):
"""
Create a Cypher query clause that gives the planner hints to speed up the query
Parameters
----------
metarels : a metarels or MetaPath object
the metapath to create the clause for
join_hint : 'midpoint', bool, or int
whether to add a join hint to tell neo4j to traverse form both ends of
the path and join at a specific index. `'midpoint'` or `True` specifies
joining at the middle node in the path (rounded down if an even number
of nodes). `False` specifies not to add a join hint. An int specifies
the node to join on.
index_hint : bool
whether to add index hints which specifies the properties of the source
and target nodes to use for lookup. Enabling both `index_hint` and
`join_hint` can cause the query to fail.
"""
using_query = ""
# Specify index hint for node lookup
if index_hint:
using_query = (
"\n"
+ textwrap.dedent(
"""\
USING INDEX n0:{source_label}({property})
USING INDEX n{length}:{target_label}({property})
"""
)
.rstrip()
.format(
property=property,
source_label=metarels[0][0],
target_label=metarels[-1][1],
length=len(metarels),
)
)
# Specify join hint with node to join on
if join_hint is not False:
if join_hint is True or join_hint == "midpoint":
join_hint = len(metarels) // 2
join_hint = int(join_hint)
assert join_hint >= 0
assert join_hint <= len(metarels)
using_query += f"\nUSING JOIN ON n{join_hint}"
return using_query | 61c4dc58782aeb1bc31affb7ec2c74361eac8089 | 27,889 |
from typing import List
import torch
def import_smallsemi_format(lines: List[str]) -> torch.Tensor:
"""
imports lines in a format used by ``smallsemi`` `GAP package`.
Format description:
* filename is of a form ``data[n].gl``, :math:`1<=n<=7`
* lines are separated by a pair of symbols ``\\r\\n``
* there are exactly :math:`n^2` lines in a file
* the first line is a header starting with '#' symbol
* each line is a string of :math:`N` digits from :math:`0` to :math:`n-1`
* :math:`N` is the number of semigroups in the database
* each column represents a serialised Cayley table
* the database contains only cells starting from the second
* the first cell of each Cayley table is assumed to be filled with ``0``
:param lines: lines read from a file of `smallsemi` format
:returns: a list of Cayley tables
.. _GAP package: https://www.gap-system.org/Manuals/pkg/smallsemi-0.6.12/doc/chap0.html
"""
raw_tables = torch.tensor(
[list(map(int, list(line[:-1]))) for line in lines[1:]]
).transpose(0, 1)
tables = torch.cat(
[torch.zeros([raw_tables.shape[0], 1], dtype=torch.long), raw_tables],
dim=-1,
)
cardinality = int(tables.max()) + 1
return tables.reshape(tables.shape[0], cardinality, cardinality) | 2ca9708944379633162f6ef9b4df3357bca77e80 | 27,891 |
import re
def load_tolerances(fname):
""" Load a dictionary with custom RMS limits.
Dict keys are file (base)names, values are RMS limits to compare.
"""
regexp = r'(?P<name>\w+\.png)\s+(?P<tol>[0-9\.]+)'
dct = {}
with open(fname, 'r') as f:
for line in f:
match = re.match(regexp, line)
if match is None:
continue
dct[match.group('name')] = float(match.group('tol'))
return dct | 60af52ec49cadfdb5d0f23b6fa5618e7cc64b4c2 | 27,897 |
def is_list(input_check):
"""
helper function to check if the given
parameter is a list
"""
return isinstance(input_check, list) | 9ff5767c862a110d58587cccb641a04532c1a1a5 | 27,899 |
import re
def is_valid_hostname(hostname):
"""
Check if the parameter is a valid hostname.
:type hostname: str or bytearray
:param hostname: string to check
:rtype: boolean
"""
try:
if not isinstance(hostname, str):
hostname = hostname.decode('ascii', 'strict')
except UnicodeDecodeError:
return False
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
# the maximum length of the domain name is 255 bytes, but because they
# are encoded as labels (which is a length byte and an up to 63 character
# ascii string), you change the dots to the length bytes, but the
# host element of the FQDN doesn't start with a dot and the name doesn't
# end with a dot (specification of a root label), we need to subtract 2
# bytes from the 255 byte maximum when looking at dot-deliminated FQDN
# with the trailing dot removed
# see RFC 1035
if len(hostname) > 253:
return False
# must not be all-numeric, so that it can't be confused with an ip-address
if re.match(r"[\d.]+$", hostname):
return False
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split(".")) | 31f16d1c648a230de3eb0f3158be42e0841db5a4 | 27,901 |
def _mgmtalgomac(rack, chassis, slot, idx, prefix=2):
""" Returns the string representation of an algorithmic mac address """
return "%02x:%02x:%02x:%02x:%02x:%02x" % (prefix, rack >> 8, rack & 0xFF, chassis, slot, idx << 4) | ea90898d50d5946abb6e0d6c678e876aa8b5f8cf | 27,902 |
def _model_insert_new_function_name(model):
"""Returns the name of the function to insert a new model object into the database"""
return '{}_insert_new'.format(model.get_table_name()) | bd3079813b266a4e792ea323ab59eb3ef377159e | 27,905 |
def create_dataverse_url(base_url, identifier):
"""Creates URL of Dataverse.
Example: https://data.aussda.at/dataverse/autnes
Parameters
----------
base_url : str
Base URL of Dataverse instance
identifier : str
Can either be a dataverse id (long), a dataverse alias (more
robust), or the special value ``:root``.
Returns
-------
str
URL of the dataverse
"""
assert isinstance(base_url, str)
assert isinstance(identifier, str)
base_url = base_url.rstrip("/")
url = "{0}/dataverse/{1}".format(base_url, identifier)
assert isinstance(url, str)
return url | 8dcaacf58c7ca8b601ed2543f8d8de20bbcbc8a2 | 27,906 |
def isinsetf(s):
"""
Returns a function which tests whether an element is in a set `s`.
Examples
--------
>>> colors = ['red', 'green', 'blue']
>>> f = isinsetf(colors)
>>> map(f, ['yellow', 'green'])
[False, True]
"""
s = set(s)
return lambda e: e in s | 492f5381a66ef42670e5dd229c41a5481290114a | 27,910 |
import math
def f(n):
"""
Define f(n) as the sum of the digit factorials for given number n.
For example:
f(342) = 3! + 4! + 2! = 32
:param n: number
:return: sum digit factorial
"""
return sum(math.factorial(int(ch)) for ch in str(n)) | 334ca97a936876d79643cad70994c3da8cbee98e | 27,914 |
def createc_fbz(stm):
"""
Function returning Createc channel feedback z value
Parameters
----------
stm : createc.CreatecWin32
Createc instance
Returns
-------
value : str
"""
# from createc.Createc_pyCOM import CreatecWin32
# stm = CreatecWin32()
return stm.client.getdacvalfb() | affda33fd1050fdf865544cfc66e3899788fccc2 | 27,922 |
def similar(x,y):
"""
function that checks for the similarity between the words of
two strings.
:param x: first string
:param y: second string
:return: returns a float number which is the result of the
division of the length of the intersection between the two strings'
words by the length of their union.
"""
result = ''
x_list = x.split() # convert string to a list for parsing
x_list = [word.lower() for word in x_list] # transform all words to lowercase
x_list = list(set(x_list)) # list of of words that appear at least one in x_list
y_list = y.split() # convert string to a list for parsing
y_list = [word.lower() for word in y_list] # transform all words to lowercase
y_list = list(set(y_list)) # list of words that appear at least one in y_list
intersection = [word for word in x_list if word in y_list] # obtain the common words between x_list and y_list
union = list(set(x_list).union(y_list)) # words that appear in both lists as well as their common ones
result = float(len(intersection) / len(union) ) # find the coefficient of their similarity
return result | 92edaf8ebcedcbfbb1adf2b87c8d00f159b3ccc8 | 27,923 |
from datetime import datetime
def convert_time(ts):
"""converts timestamps from time.time() into reasonable string format"""
return datetime.fromtimestamp(ts).strftime("%Y-%m-%d::%H:%M:%S") | 195124dac4c4c145c397fe8e4fd10d3ab3d6700f | 27,926 |
def get_nr_to_check(selection, line_scores):
"""
Gets the number of checks the annotators should do given a selection and a line_score
:param selection: selection of the lines to check
:param line_scores: the lines with the given score
:return: the number of checks that still need to be performed
"""
total_checks = 0
maximum_checks = 0
for (name, lines), (name2, (lines_with_score)) in zip(selection, line_scores):
score = sum([score for (line, score) in lines_with_score if line in lines])
max_score = len(lines) * 2
total_checks += score
maximum_checks += max_score
return maximum_checks - total_checks | 4560a1f6a8ab3671b73513e6eab193dd5300ec82 | 27,930 |
def read_pair_align(read1, read2):
""" Extract read pair locations as a fragment oriented in increasing chromosome coordinates
:param read1: read #1 of pair in pysam AlignedSegment format
:param read2: read #2 of pair in pysam AlignedSegment format
:return 4-item array in the following format: [fragA-start, fragA-end, fragB-start, fragB-end]
with monotonically increasing chromosome coordinates
"""
r1pos = [x+1 for x in read1.positions]
r2pos = [x+1 for x in read2.positions]
if read1.mate_is_reverse and r1pos[0] < r2pos[0]: # read1 is earlier
read = [r1pos[0], r1pos[-1], r2pos[0], r2pos[-1]]
elif read2.mate_is_reverse and r2pos[0] < r1pos[0]: # read2 is earlier
read = [r2pos[0], r2pos[-1], r1pos[0], r1pos[-1]]
else:
read = []
# print("Skipping read pair from error in alignment.")
# print("%s--%s> <%s--%s" % tuple(read))
return read | f9d1476330a8cf1c9e836654d67a8bcda9e18eb7 | 27,933 |
def getaxeslist(pidevice, axes):
"""Return list of 'axes'.
@type pidevice : pipython.gcscommands.GCSCommands
@param axes : Axis as string or list of them or None for all axes.
@return : List of axes from 'axes' or all axes or empty list.
"""
axes = pidevice.axes if axes is None else axes
if not axes:
return []
if not hasattr(axes, '__iter__'):
axes = [axes]
return axes | 6a01538eb46a7f19efcc2bfb737bf1945ec4db52 | 27,942 |
import mimetypes
def is_html(path: str) -> bool:
"""
Determine whether a file is an HTML file or not.
:param path: the path to the file
:return: True or False
"""
(mime_type, _) = mimetypes.guess_type(path)
return mime_type in ('application/xhtml+xml', 'text/html') | bfd570f19c78447adf2ab28b2d94f1119922b97d | 27,945 |
def _next_set(args):
"""
Deterministically take one element from a set of sets
"""
# no dupes, deterministic order, larger sets first
items = sorted(list(map(frozenset, args)), key=lambda x: -len(x))
return items[0], set(items[1:]) | 37d1fdf1796d2b0b455f638bc8e03de030d668f0 | 27,951 |
def _get_figure_size(numaxes):
"""
Return the default figure size.
Width: 8 units
Height: 3 units for every subplot or max 9 units
Return
------
(width, height)
The figure size in inches.
"""
figure_width = 8
figure_height = max(6, min(numaxes * 3, 10))
return (figure_width, figure_height) | bb6f3a08b974cac2d5da2b69eac8653e9b41411e | 27,957 |
def _simpsons_inner(f, a, f_a, b, f_b):
"""Calculate the inner term of the adaptive Simpson's method.
Parameters
----------
f : callable
Function to integrate.
a, b : float
Lower and upper bounds of the interval.
f_a, f_b : float
Values of `f` at `a` and `b`.
Returns
-------
m : float
Midpoint (the mean of `a` and `b`).
f_m : float
Value of `f` at `m`.
whole : float
Simpson's method result over the interval [`a`, `b`].
"""
# pprint({k: format(v, '0.3f') for k, v in locals().items() if k != 'f'})
m = (a + b) / 2
f_m = f(m)
return (m, f_m, abs(b - a) / 6 * (f_a + 4 * f_m + f_b)) | e0e9170b8030f8f5c2f66927b91b034d9cd4a82f | 27,958 |
def upload_to_dict(upload):
"""Creates a Python dict for an Upload database entity.
This is an admin-only function that exposes more database information than
the method on Upload.
"""
return dict(
id=upload.id,
flake=upload.flake,
filename=upload.filename,
mimetype=upload.mimetype,
uri=upload.uri,
uploader=upload.user.to_dict(),
post=upload.post.to_dict()
) | c6fdc5b53dbbc1fa28e64fb574c5a3919f5e780e | 27,963 |
def wrong_adjunction(left, right, cup):
""" Wrong adjunction error. """
return "There is no {0}({2}, {3}) in a rigid category. "\
"Maybe you meant {1}({2}, {3})?".format(
"Cup" if cup else "Cap", "Cap" if cup else "Cup", left, right) | 263684e737a3212a1d44fcd88ba719fc9f1c07a1 | 27,965 |
def magnify_contents(contents, features):
"""
Create additional features in each entry by replicating some column
of the original data. In order for the colums to differ from the
original data append a suffix different for each new additional
artificial column.
"""
magnified_contents = []
for entry in contents:
magnified_entry = entry[:-1] + [entry[feature]+str(i) for i, feature
in enumerate(features)] + [entry[-1]]
magnified_contents.append(magnified_entry)
return magnified_contents | ec2a43cdb280da74b44a6fec96d0708c90d03f18 | 27,978 |
def binary_search(arr, first, last, element):
"""
Function to search an element in a given sorted list.
The function returns the index of the first occurrence of an element in the list.
If the element is not present, it returns -1.
Arguments
arr : list of elements
first : position of the first element
last : position of the last element
element : element that is to be searched
"""
mid = (first + last) // 2
if first <= last:
if element == arr[mid]:
return arr.index(element)
elif element > arr[mid]:
return binary_search(arr, mid+1, last, element)
elif element < arr[mid]:
return binary_search(arr, first, mid-1, element)
else:
return -1 | d006f751bf13efe04d55ab72e166ea279bef9d3d | 27,979 |
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr] | 3af09d6aae798be53d4f99fb63f17a3fd8e0f3ed | 27,980 |
def right_digit(x):
"""Returns the right most digit of x"""
return int(x%10) | 3f52393e9241714839e97a41f858753485cc5c89 | 27,983 |
import random
def generate_string(length: int) -> str:
"""Generates a random string of a given lentgh."""
symbols: str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
final_string: str = ""
for i in range(length):
final_string += symbols[random.randint(0, len(symbols) - 1)]
return final_string | 9e6d4cbccf52f8abb6adf462a9a37b384a707ca3 | 27,985 |
def is_number(n):
"""
Return True if the value can be parsed as a float.
"""
try:
float(n)
return True
except ValueError as _:
return False | d9a2f8e4893b7379c2dcabf24f7f5f731423a753 | 27,987 |
from pathlib import Path
def create_flag_file(filepath: str) -> str:
"""
Create a flag file in order to avoid concurrent build of same previews
:param filepath: file to protect
:return: flag file path
"""
flag_file_path = "{}_flag".format(filepath)
Path(flag_file_path).touch()
return flag_file_path | 80ad8e181574600fcb1b9ded6e5c64c3c0d5b457 | 27,996 |
def _get_range_clause(column, value, bucket_interval):
"""Returns an SQL clause specifying that column is in the range
specified by value. Uses bucket_interval to avoid potentially
ambiguous ranges such as 1.0B-1.9B, which really means [1B, 2B).
"""
if value[0] == '-':
# avoid minus sign with split
arr = value[1:].split('-', 1)
arr[0] = '-' + arr[0]
else:
arr = value.split('-', 1)
if len(arr) > 1:
low = arr[0]
high = arr[1]
else:
return column + " = " + value
if low.endswith('M'):
low = int(round(float(low[:-1]) * 1000000))
high = low + bucket_interval
elif low.endswith('B'):
low = int(round(float(low[:-1]) * 1000000000))
high = low + bucket_interval
elif '.' not in low:
low = int(low)
high = low + bucket_interval
# low is inclusive, high is exclusive
# See https://github.com/elastic/elasticsearch-dsl-py/blob/master/elasticsearch_dsl/faceted_search.py#L125
return column + " >= " + str(low) + " AND " + column + " < " + str(high) | 7b0e9da8fa1ac9365e93ccd1137d519f08dadbed | 28,000 |
def createStructuringElement(radius=1, neighborhood="8N"):
"""Create a structuring element function based on the neighborhood and the radius.
Args:
radius (integer): The radius of the structuring element excluding the center pixel.
neighborhood (string): 4N or 8N neighborhood definition around the center pixel.
Returns:
getStructuringElement (function): A function, which returns the neighborhood for a given center based on the configured radius and neighboorhood definition.
"""
def getStructuringElement(center):
"""Create a set of pixel coordinates for all neighbor elements.
Args:
center (number tuple): A pixel coordinate tuple of the center pixel of the structuring element.
Returns:
setImg (number tuple set): A set of the foreground pixel coordinate tuples that make up the neighboorhood for the given center.
"""
neighbors = set()
if neighborhood == "4N":
for x in range(center[0]-radius, center[0]+radius+1):
for y in range(center[1]-radius, center[1]+radius+1):
if abs(center[0] - x) + abs(center[1] - y) <= radius:
neighbors.add((x, y))
else:
for x in range(center[0]-radius, center[0]+radius+1):
for y in range(center[1]-radius, center[1]+radius+1):
neighbors.add((x, y))
return neighbors
# Use partial application of function arguments to dynamically calculate the neighborhood based on previous constraints.
return getStructuringElement | f99601729155fb6993a63a6317454d9359c4fd69 | 28,008 |
def get_id_update(update: dict) -> int:
"""Функция для получения номера обновления.
Описание - получает номер обновления из полученного словаря
Parameters
----------
update : dict
словарь, который содержит текущий ответ от сервера телеграм
Returns
-------
update['update_id'] : int
номер текущего обновления
"""
return update['update_id'] | 68672ff86cda83a11d557ff25f1a206bd1e974b3 | 28,010 |
def extract_some_key_val(dct, keys):
"""
Gets a sub-set of a :py:obj:`dict`.
:param dct: Source dictionary.
:type dct: :py:obj:`dict`
:param keys: List of subset keys, which to extract from ``dct``.
:type keys: :py:obj:`list` or any iterable.
:rtype: :py:obj:`dict`
"""
edct = {}
for k in keys:
v = dct.get(k, None)
if v is not None:
edct[k] = v
return edct | 80dff136ada8cfd754e1a02423e7eef364223a48 | 28,013 |
def ddiff_pf_contact(phi):
""" Double derivative of phase field contact. """
return -3.*phi/2. | 53150d05e6c2b6399da503b87c6ff83f2585483b | 28,018 |
def removeSpaces(string):
"""Returns a new string with spaces removed from the original string
>>> string = '1 173'
>>> removeSpaces(string)
'1173'
"""
return ''.join([char for char in string if char != ' ']) | ce00687c43ce521c14b578105bd9412c31b9817a | 28,019 |
from typing import Any
def do_nothing_collate(batch: Any) -> Any:
"""
Returns the batch as is (with out any collation
Args:
batch: input batch (typically a sequence, mapping or mixture of those).
Returns:
Any: the batch as given to this function
"""
return batch | 45cd76fb2ab1e4ad11053041a70ae9eb9c1948ec | 28,020 |
def _load_table_data(table_file):
"""Load additional data from a csv table file.
Args:
table_file: Path to the csv file.
Returns:
header: a list of headers in the table.
data: 2d array of data in the table.
"""
with open(table_file, encoding="utf-8") as f:
lines = f.readlines()
header = lines[0].strip().split(",")
data = [line.strip().split(",") for line in lines[1:]]
return header, data | c1f1ee84c2f04a613616897b897a01ee2364b98c | 28,030 |
def backlog_color(backlog):
"""Return pyplot color for queue backlog."""
if backlog < 5:
return 'g'
if backlog > 24:
return 'r'
return 'y' | 551413b28c9c9736ea19e63c740f9c28613784ee | 28,032 |
import requests
def get_url_content(url):
"""
返回url对应网页的内容,用于分析和提取有价值的内容
:param url: 网页地址
:return: url对应的网页html内容
"""
return requests.get(url).text | 07f2e7ce8c365e601fd7ed4329f04e6ae56e214f | 28,035 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.