content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import socket
import struct
def int2ip(addr: int) -> str:
"""convert an IP int to a string
"""
return socket.inet_ntoa(struct.pack("!I", addr)) | 57de2a840a1f1b25ce9de835d29aa88f9955f8ed | 26,167 |
def phase_LogLinear(phase, slope=0.04):
"""A logLinear phase function, roughly appropriate for cometary nuclei.
An H-G phase function is likely a better approximation.
Parameters
----------
phase : float or array
Phase angle (degrees)
slope : float, optional
The slope for the phase function. Default 0.04.
Returns
-------
phi : float or array
Phase function evaluated at phase
"""
return 10**(-0.4 * slope * phase) | 1c5cdbf4a41387244d38a0fde368af3ecf224f52 | 26,169 |
def _is_section(tag):
"""Check if `tag` is a sphinx section (linkeable header)."""
return (
tag.tag == 'div' and
'section' in tag.attributes.get('class', [])
) | 8605d8d95e9344c80e91dd1735bec79072507f7b | 26,172 |
def event_asn_org(event):
"""Get asn org from event."""
asn_org = event.get("details", {}).get("asn_org")
if not asn_org:
return "UNKOWN"
return asn_org | 61ad3a5ee61a9ce0a088724c199fe0c432ec900b | 26,174 |
import platform
def is_windows() -> bool:
"""is the script runnning on a windows system?"""
return platform.system() == 'Windows' | a8469c16b4942ec07b8e5c6072327d7eac66ed79 | 26,175 |
from typing import Callable
def exp_decay_with_warmup(warmup: int, gamma: float, min_val: float = 1e-8) -> Callable:
"""Returns exponential decay with warmup function.
The function increases linearly from ``min_val`` to 1.0 until ``step`` is equal
to warmup. For a ``step`` larger than ``warmup``, the function decays with a
given ``gamma`` (last_val * gamma).
Parameters
----------
warmup
The number of steps until which the function increases linearly.
gamma
The parameter of decay in (0, 1). Large numbers for slow decay.
min_val
The minimum lr factor that is used for the 0-th step, a small number > 0.
Returns
----------
A function taking the current step as single argument.
"""
def f(x):
return min_val + x * (1.0 - min_val) / warmup if x < warmup else gamma ** (x - warmup)
return f | b26488aab2844521c4fdc9377bb6c90454531984 | 26,176 |
def name_matches(name,pattern):
"""Simple wildcard matching of project and sample names
Matching options are:
- exact match of a single name e.g. pattern 'PJB' matches 'PJB'
- match start of a name using trailing '*' e.g. pattern 'PJ*' matches
'PJB','PJBriggs' etc
- match using multiple patterns by separating with comma e.g. pattern
'PJB,IJD' matches 'PJB' or 'IJD'. Subpatterns can include trailing
'*' character to match more names.
Arguments
name: text to match against pattern
pattern: simple 'glob'-like pattern to match against
Returns
True if name matches pattern; False otherwise.
"""
for subpattern in pattern.split(','):
if not subpattern.endswith('*'):
# Exact match required
if name == subpattern:
return True
else:
if name.startswith(subpattern.rstrip('*')):
return True
else:
return False | eb8ceead45cc0766af0aec92ca02b37f387c3311 | 26,177 |
def path_in_cc(path, cc):
"""Determines whether all vertices of a given path belong to a given
connected component.
:param path:
:param cc: list of vertices representing a connected component in a graph
:return: True if the path vertices are found in the connected component,
False otherwise
"""
for node in path:
if node not in cc:
return False
return True | 9d549234f2a1294380a3e416fea93cdf7944d8b2 | 26,181 |
import urllib.request
import urllib.parse
import re
def search(query , results:int=10) -> list:
"""
Search:
-
Will search `youtube.com` and fetch the number of requested results (default is `10`)
If the number of results are not sufficient then it will return as much as it can find
ie:
if the number of results are `6` but you requested `10` then;
it will return `as many results as possible` or `6` in our case.
Note:
-
- The results is a `list` of (video) links
"""
tmp = list()
format_of_link = r"watch\?v=(\S{11})"
raw_query = query
print()
query = urllib.parse.quote(raw_query)
html = urllib.request.urlopen(f"https://www.youtube.com/results?search_query={query}")
videos = re.findall(format_of_link , html.read().decode())
for video in videos:
tmp.append(f'https://www.youtube.com/watch?v={video}')
while True:
try:
return tmp[:results]
break
except IndexError:
results -= 1
continue | 8350e9b0fb4b545a5e51df256cb6cd95a8cd355c | 26,186 |
def find_point(coords, point_list):
"""
Coordinates represent either the source or destination [x,y] coordinates of a line.
Given a list of unique points, map one to the given coordinates.
"""
for p in point_list:
if p["coordinates"] == coords:
return p["idx"]
print("Couldn't find the point -- {}!\n".format(str(point_list))) | 2d6cb286b630dc49e86532111bba7d0a00f385cc | 26,187 |
def twolens_efl(efl1, efl2, separation):
"""Use thick lens equations to compute the focal length for two elements separated by some distance.
Parameters
----------
efl1 : `float`
EFL of the first lens
efl2 : `float`
EFL of the second lens
separation : `float`
separation of the two lenses
Returns
-------
`float`
focal length of the two lens system
"""
phi1, phi2, t = 1 / efl1, 1 / efl2, separation
phi_tot = phi1 + phi2 - t * phi1 * phi2
return 1 / phi_tot | dfcaff2e30f2b6249ec5dc9ec1a4c443fdccd2f5 | 26,191 |
def read_xyz_charge_mult(file):
"""
Reads charge and multiplicity from XYZ files. These parameters should be defined
in the title lines as charge=X and mult=Y (i.e. FILENAME charge=1 mult=1 Eopt -129384.564)
"""
charge_xyz,mult_xyz = None,None
# read charge and mult from xyz files
with open(file, "r") as F:
lines = F.readlines()
for line in lines:
for keyword in line.strip().split():
if keyword.lower().find('charge') > -1:
charge_xyz = int(keyword.split('=')[1])
elif keyword.lower().find('mult') > -1:
mult_xyz = int(keyword.split('=')[1])
elif charge_xyz is not None and mult_xyz is not None:
break
if charge_xyz is None:
charge_xyz = 0
if mult_xyz is None:
mult_xyz = 1
return charge_xyz,mult_xyz | 9048ff64f5385206ccb8d926776ed5d1ff1c4dc6 | 26,195 |
import base64
def convert_image_file_to_b64_string(filename):
"""Convert a image file to the corresponding b64_string
This function will all the b64encode function from the base64 module and
convert the specified image file to b64_string for saving in data bse and
transmission. The image file could be either JPEG file or PNG file.
Args:
filename (str): The name (path) of image you want to process
Returns:
str : The b64_string that was generated
"""
with open(filename, "rb") as image_file:
b64_bytes = base64.b64encode(image_file.read())
b64_string = str(b64_bytes, encoding="utf-8")
return b64_string | 3036abf52a38ea7ef3534de4929334a221176fe8 | 26,196 |
import uuid
def validate_uuid_string(uuid_obj, uuid_version=4):
""" Checks whether the provided string is a valid UUID string
:param uuid_obj: A string or stringable object containing the UUID
:param uuid_version: The UUID version to be used
"""
uuid_string = str(uuid_obj).lower()
try:
uuid.UUID(uuid_string, version=uuid_version)
except ValueError:
# If it's a value error, then the string
# is not a valid hex code for a UUID.
return False
return True | c83ca62527e05473973d034e03c3eb3b4abec3ad | 26,197 |
def arrangeByType(service_list, preferred_types):
"""Rearrange service_list in a new list so services are ordered by
types listed in preferred_types. Return the new list."""
def bestMatchingService(service):
"""Return the index of the first matching type, or something
higher if no type matches.
This provides an ordering in which service elements that
contain a type that comes earlier in the preferred types list
come before service elements that come later. If a service
element has more than one type, the most preferred one wins.
"""
for i, t in enumerate(preferred_types):
if preferred_types[i] in service.type_uris:
return i
return len(preferred_types)
# Build a list with the service elements in tuples whose
# comparison will prefer the one with the best matching service
prio_services = sorted((bestMatchingService(s), orig_index, s) for (orig_index, s) in enumerate(service_list))
# Now that the services are sorted by priority, remove the sort
# keys from the list.
for i in range(len(prio_services)):
prio_services[i] = prio_services[i][2]
return prio_services | b65999d4fbaa4e6839482c018b1233bbeb301bf1 | 26,199 |
def read_byte(bus, i2caddr, adr):
"""
Read a single byte from the bus
"""
return bus.read_byte_data(i2caddr, adr) | 2205d26977a92fb6cf6db6226cbf748addb71f5f | 26,201 |
def represents_int(s):
"""Returns boolean value if parameter can be cast to int"""
try:
int(s)
return True
except ValueError:
return False | 356ddc018e5d605e4219cb6356aa8c11437d275b | 26,204 |
def _store_dir_and_cache(tmpdir_factory):
"""Returns the directory where to build the mock database and
where to cache it.
"""
store = tmpdir_factory.mktemp('mock_store')
cache = tmpdir_factory.mktemp('mock_store_cache')
return store, cache | 01ef0527e63cb62003c6f0ccb0e63c629c2f1e42 | 26,205 |
def resize_keypoint(keypoint, in_size, out_size):
"""Change values of keypoint according to paramters for resizing an image.
Args:
keypoint (~numpy.ndarray): Keypoints in the image.
The shape of this array is :math:`(K, 2)`. :math:`K` is the number
of keypoint in the image.
The last dimension is composed of :math:`y` and :math:`x`
coordinates of the keypoints.
in_size (tuple): A tuple of length 2. The height and the width
of the image before resized.
out_size (tuple): A tuple of length 2. The height and the width
of the image after resized.
Returns:
~numpy.ndarray:
Keypoint rescaled according to the given image shapes.
"""
keypoint = keypoint.copy()
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
keypoint[:, 0] = y_scale * keypoint[:, 0]
keypoint[:, 1] = x_scale * keypoint[:, 1]
return keypoint | b299a1e2e0031e6ae9111d2261dd98b9d6ce0660 | 26,209 |
def to_bytes(value):
""" returns 8-byte big-endian byte order of provided value """
return value.to_bytes(8, byteorder='big', signed=False) | d32fb88c24274d3bdf3fbd8b8c5917e4a0f3bcca | 26,211 |
def abs_fold_change(row, fold_change_column):
"""Add absolute fold change value for two group comparison.
# Arguments:
row: certain row of peak table (pandas dataframe).
fold_change_column: column name in the peak table of the fold change value.
# Returns:
absolute fold change value.
"""
return abs(row[fold_change_column]) | 1189b7411af6c37bede3c601d9325f7a10efa528 | 26,213 |
def without_fixed_prefix(form, prefix_length):
""" Return a new form with ``prefix_length`` chars removed from left """
word, tag, normal_form, score, methods_stack = form
return (word[prefix_length:], tag, normal_form[prefix_length:],
score, methods_stack) | b34736da1ce95daeac09a865b6750aea24b52dcd | 26,214 |
def convert_dms_to_dd(dms):
"""
Convert values expressed in DMS (decimals, minutes, seconds) to decimal degrees.
Parameters
----------
dms: string
DMS value without special symbols as well as hemisphere location
Returns
-------
dd: float
Decimal degree value
"""
# Split DMS into different components
dms_split = dms.split(' ')
degrees = int(dms_split[0])
minutes = int(dms_split[1])
seconds = int(dms_split[2])
hemisphere = dms_split[3]
# Calculate decimal degree value using the DMS
dd = degrees + minutes/60 + seconds/3600
# Calculate the sign of the decimal degree value based on the hemisphere
if hemisphere == 'N' or hemisphere == 'E':
dd = abs(dd)
if hemisphere == 'S' or hemisphere == 'W':
dd = -abs(dd)
return dd | 4cc84d572ec445ac2ae586f3151edd10f2d3d611 | 26,215 |
def process_small_clause(graph, cycle, cut=True):
"""
Match a cycle if there is a small clause relationship: verb with outgoing edge ARG3/H to a preposition node, the
preposition node has an outgoing edge ARG1/NEQ, and
1) an outgoing edge ARG2/NEQ, or
2) an outgoing edge ARG2/EQ to a noun;
ARG2/NEQ or ARG2/EQ edge is removed if cut is set to True.
:param graph: DmrsGraph object
:param cycle: Set of Node objects in the cycle
:param cut: If True and cycle is matched, the cycle is broken by removing a target edge
:return: True if cycle is matched, otherwise False
"""
verb_nodes = [node for node in cycle if node.pos == 'v']
if len(verb_nodes) == 0:
return False
for verb_node in verb_nodes:
outgoing_edges = [edge for edge in graph.get_outgoing_node_edges(verb_node) if edge.to_node in cycle]
outgoing_labels = dict((edge.label, edge.to_node) for edge in outgoing_edges)
if 'ARG3_H' not in outgoing_labels:
continue
prep_node = outgoing_labels['ARG3_H']
if prep_node.pos != 'p':
continue
prep_outgoing_labels = [edge.label for edge in graph.get_outgoing_node_edges(prep_node) if edge.to_node in cycle]
if 'ARG1_NEQ' not in prep_outgoing_labels:
continue
if 'ARG2_NEQ' in outgoing_labels:
if cut:
arg2_neq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG2_NEQ'][0]
graph.edges.remove(arg2_neq_edge)
return True
if 'ARG2_EQ' in outgoing_labels and outgoing_labels['ARG2_EQ'].pos == 'n':
if cut:
arg2_eq_edge = [edge for edge in outgoing_edges if edge.label == 'ARG2_EQ'][0]
graph.edges.remove(arg2_eq_edge)
return True
return False | 4cc714838efc47b2de8c35821763249286039299 | 26,216 |
def localhost_url(url, local_hostname):
"""Return a version of the url optimized for local development.
If the url includes the string `localhost`, it will be replaced by
the `local_hostname`.
Parameters
----------
url : str
The url to check
Returns
-------
str : The url, possibly converted to use a different local hostname
"""
return url.replace('localhost', local_hostname) | dc2cba2acc89fe4ad7da30da9e6a3a1c16465731 | 26,217 |
def weighted_average(cols):
"""Given tuples of (weight, value),
return weighted average.
>>> weighted_average(((100, 1), (200, 2), (100, 5)))
2.5
"""
return sum(w * v for (w, v) in cols) / sum(w for (w, v) in cols) | ef1607dc4e12fc23558dbd642e8fcc6e1692e31f | 26,224 |
import json
def get_best_hyperparams(hyperparams_dict, fit_params_dict, best, file_name=None):
"""
Helper function to extract the numerical values of best hyperparameters from hyperopt into a more easily usable format.
:param hyperparams_dict: Dictionary of hyperparameter values
:param fit_params_dict: Dictionary of fit parameter values
:param best: The best hyperparameters as returned by hyperopt
:param file_name: Directory plus name of the file you want to save the best parameters to. File name must end in .json as this is the expected output format
:return: Parameter dictionary. Contains both model hyperparameters and epochs parameter for model fit.
"""
# Extract hyperparameters for the model
best_params = {}
for key, val in best.items():
if key in hyperparams_dict:
input_ = hyperparams_dict[key]
if input_[0] == 'choice':
best_params[key] = input_[1][val]
else:
best_params[key] = val
# The only other parameter I need to get out is the number of epochs to train for.
# I'll put it all into the best_params dictionary, but I'll need to pop it out before defining the model
best_params['num_epochs'] = fit_params_dict['num_epochs'][1][best['num_epochs']]
if file_name is not None:
json_out =json.dumps(best_params)
f = open(file_name, "w")
f.write(json_out)
f.close()
return best_params | 1bac0d463ba5cf5a69912673d1235794d9a448ff | 26,226 |
def get(x, key, default=None):
"""
Get the value associated with the provided key from a `dict`-like object
:param x: dictlike
Any object with `__contains__()` or `__iter__()` and `__getitem__()`
methods which accept `key`
:param key:
The key to extract from the provided `dict`-like object`
:param default:
A default value to return if the provided key is not present
:return:
Either the value associated with the key or the default if the key
is missing. If he object does not implement the required methods an
error will be thrown.
"""
return x[key] if key in x else default | 7e54520fe5a0ec8e56bfa0e55fbca6060282b838 | 26,230 |
def calc_csr(sigma_veff, sigma_v, pga, rd, gwl, depth):
"""
Cyclic stress ratio from CPT, Eq 2.2,
"""
return 0.65 * (sigma_v / sigma_veff) * rd * pga | becedff4526031f5047e68a0a2d51476bf56ca9b | 26,232 |
def gen_dots(val):
"""Generate dots from real data
val = dict (x:y)
return ox, oy lists """
oy = []
ox = []
for x in sorted(val.keys()):
ox.append(int(x[:-1]))
if val[x][0] != 0:
oy.append(1.0/val[x][0])
else:
oy.append(0)
return ox, oy | cbd38754f696cd39b21fea1ae307f62819f4f7ee | 26,240 |
def bgr2rgb(img):
"""Converts an RGB image to BGR and vice versa
"""
return img[..., ::-1] | ece41c92d036ccf28b27d18019f0e0fc1b6d315b | 26,243 |
def find_min(nums):
"""
Find minimum element in rotated sorted array
:param nums: given array
:type nums: list[int]
:return: minimum element
:rtype: int
"""
left, right = 0, len(nums) - 1
while left + 1 < right:
mid = (left + right) // 2
if nums[mid] < nums[right]:
right = mid
elif nums[mid] > nums[right]:
left = mid
else:
# we cannot determine which side is sorted subarray
# when nums[left] == nums[mid]
# so just move right pointer step backward
right -= 1
if nums[left] < nums[right]:
return nums[left]
else:
return nums[right] | 7b5c48e599e74c396617b3981d7b2e83061da05c | 26,249 |
def parse_processing_parents(processings, parent_keys):
"""Return a dictionary relating each processing identifier to its parent.
Parameters
----------
processings : dict
A dictionary of processing data, whose keys are processing identifiers
and values are dictionaries containing corresponding processing data.
This sort of dictionary is generated by reading the JSON file containing
processing/artifact metadata derived from the processing network/tree on
Qiita.
parent_keys : ordered iterable of str
An ordered collection of strings that are keys that will be
sequentially used to find parent processing identifiers in the
processing data of the `processings` argument.
Returns
-------
dict
Dictionary whose keys are processing identifiers and values are the
identifiers of a parent processing.
"""
processing_parents = {}
for proc_id, proc_data in processings.items():
for key in parent_keys:
try:
parent_id = proc_data[key]
except KeyError:
# Processings can also have no parents, in which case the for
# loop will simply exhaust all keys.
pass
else:
processing_parents[proc_id] = parent_id
break
return processing_parents | 7a7940371648f7236ba73c519cebe8610c0a7b8e | 26,250 |
def chunk_string(string, length):
"""
Split a string into chunks of [length] characters, for easy human readability.
Source: https://stackoverflow.com/a/18854817
"""
return (string[0 + i:length + i] for i in range(0, len(string), length)) | 48d3c406ae9577cf3eb72c44398290eaca40821e | 26,252 |
import math
def depth(n):
"""Tree depth (distance from root), used to calculate node spacing."""
return int(math.log(n + 1, 2)) | 80f49914786f2ba322d7a9321cc75ee4fdb1f01a | 26,255 |
from typing import Iterable
from typing import Callable
from typing import Generator
def argmap(functions: Iterable[Callable], args: Iterable) -> Generator:
"""Maps the same argument(s) to multiple functions.
>>> inc = lambda x:x+1
>>> dec = lambda x:x-1
>>> list(argmap([inc, dec],[1]))
[2,0]
you can even map multiple arguments
>>> add = lambda a,b: a+b
>>> sub = lambda a,b: a-b
>>> list(argmap([add, sub], [2, 1])) # two arguments
[3, 1]
Added in version: 0.1.0
"""
return (f(*args) for f in functions) | 56b5d28ecd6daeef8e78df0c8e774ee4aedafe09 | 26,260 |
def dot_t(inputa,inputb):
"""Dot product for four vectors"""
return inputa[:,0]*inputb[:,0] - inputa[:,1]*inputb[:,1] - inputa[:,2]*inputb[:,2] - inputa[:,3]*inputb[:,3] | 5983a3da541d2884832154dac4a8fb11a583fa3e | 26,262 |
def SummarizeWindow(states):
"""Collapse the set or list of values in a particular temporal activity window
into a single summary. Valid values are ambiguous, inactive, activation,
inhibition.
"""
# Verify that all states are recognized
validStates = set(["ambiguous", "inactive", "activation", "inhibition"])
for state in states:
if state not in validStates:
raise RuntimeError("Invalid temporal activity state: %s" % state)
# If any are ambiguous, the entire window is ambiguous
if "ambiguous" in states:
return "ambiguous"
# If all are activation or inhibition, return that state
if all([s == "activation" for s in states]):
return "activation"
if all([s == "inhibition" for s in states]):
return "inhibition"
# A combination of activation and inhibition is ambiguous, regardless
# of whether there is also inactive
if "activation" in states and "inhibition" in states:
return "ambiguous"
# If all inactive, return inactive
if all([s == "inactive" for s in states]):
return "inactive"
# Otherwise the states are a mix of inactive and activation or inhibition
# so activation/inhibition dominates
if "activation" in states:
return "activation"
if "inhibition" in states:
return "inhibition"
raise RuntimeError("Invalid case reached") | 3d9ff71beddd0f38f0d5a98a4ab0a4e2aac047aa | 26,265 |
def uniq(seq):
"""Return unique elements in the input collection, preserving the order.
:param seq: sequence to filter
:return: sequence with duplicate items removed
"""
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))] | 2180b6aac4760d31503a9d4c7f2b3cf528d2a4f2 | 26,267 |
def set_verbosity(level=1):
"""Set logging verbosity level, 0 is lowest."""
global verbosity
verbosity = level
return verbosity | 4588991bbfb9f52041e46fb0c302ded7f1a83667 | 26,273 |
def get_connection_config(configuration, connection):
"""
Extracts information for a specified connection from configuration.
Parameters:
configuration (dict): Configuration dictionary containing a 'connections' key
connection (string): Name of a connection to extract configuration for
Returns:
dict: Configuration associated with the specified connection
"""
if not connection in configuration['connections']:
raise RuntimeError('connection "%s" not found in config.connections' % connection)
return configuration['connections'][connection] | 772ffdcdea363d9adf5493fff01a060835471753 | 26,275 |
def subset_1d(da, dim, domain):
"""Subsets data along a single dimension.
Parameters
----------
da : xarray.DataArray
Data to subset.
dim : str
Name of dimension to subset along.
domain : bcdp.Domain
1D Domain object with .min and .max accessors.
Returns
-------
xarray.DataArray
Subsetted data.
"""
coord = da[dim]
dmin, dmax = domain.min, domain.max
if dim == 'time':
coord = coord.astype(str)
dmin, dmax = str(dmin), str(dmax)
selection = (coord >= dmin) & (coord <= dmax)
return da.isel(**{dim: selection}) | 29e13c4262f0f693c2d0ca16d4a2e235b37e400b | 26,276 |
import random
def data_p4(local_jobs_list, local_data_no):
"""
This function will generate data for p4
:param local_jobs_list: list of all the job titles
:param local_data_no: total number of data entries in the dict
:return: a dataset in dictionary format
"""
# create an empty dict to begin with
local_data_dict = {}
# generate a data_no of entries in the dict
for i in range(local_data_no):
local_person_id = random.randint(10000, 99999)
local_job = random.choice(local_jobs_list)
local_age = random.randint(0, 120)
local_area = random.choice([True, False])
# add the generated data to a list
local_data_dict[local_person_id] = [local_job, local_age, local_area]
return local_data_dict | 3d93719346620bf5429a6b2f4d5d215c90bf2ca5 | 26,285 |
def pyeapi_result(output):
"""Return the 'result' value from the pyeapi output."""
return output[0]['result'] | d4af079c3776ec7bfb6fcdcfd396836b2edc58fb | 26,288 |
import itertools
def generate_jobs(entry):
"""
Generate a list of job configurations by varying the parameters in the given entry.
In practice, computes the cartesian product of all the lists at the toplevel of
the input dictionary, and generates one job for each resulting instance.
Args:
entry (dict): Input job configurations
Returns:
list: List of jobs with list parameters replaced by actual values
"""
keys = [k for k, v in entry.items() if isinstance(v, list)]
all_vals = itertools.product(
*[v for __, v in entry.items() if isinstance(v, list)])
all_jobs = []
for vals in all_vals:
job = entry.copy()
for k, v in zip(keys, vals):
job[k] = v
all_jobs.append(job)
return all_jobs | ee376c0a2e3817784b8aac20ffa0b1144138e852 | 26,293 |
import torch
import warnings
def safe_cholesky(covariance_matrix, jitter=1e-6):
"""Perform a safe cholesky decomposition of the covariance matrix.
If cholesky decomposition raises Runtime error, it adds jitter to the covariance
matrix.
Parameters
----------
covariance_matrix: torch.Tensor.
Tensor with dimensions batch x dim x dim.
jitter: float, optional.
Jitter to add to the covariance matrix.
"""
try:
return torch.cholesky(covariance_matrix)
except RuntimeError:
dim = covariance_matrix.shape[-1]
if jitter > 1:
# When jitter is too big, then there is some numerical issue and this avoids
# stack overflow.
warnings.warn("Jitter too big. Maybe some numerical issue somewhere.")
return torch.eye(dim)
return safe_cholesky(
covariance_matrix + jitter * torch.eye(dim), jitter=10 * jitter
) | 94c91a1b34908e7e7b62be89e6ea0a3bc7432bac | 26,295 |
import re
def recognize_delivery_service(tracking_code: str):
"""Infer the parcel carrier for a tracking code.
Can be used as a quick validation."""
service = None
# Strip whitespace
tracking_code = re.sub(r"\s+", "", tracking_code)
usps_pattern = [
"^(94|93|92|94|95)[0-9]{20}$",
"^(94|93|92|94|95)[0-9]{22}$",
"^(70|14|23|03)[0-9]{14}$",
"^(M0|82)[0-9]{8}$",
"^([A-Z]{2})[0-9]{9}([A-Z]{2})$",
]
ups_pattern = [
"^(1Z)[0-9A-Z]{16}$",
"^(T)+[0-9A-Z]{10}$",
"^[0-9]{9}$",
"^[0-9]{26}$",
]
fedex_pattern = ["^[0-9]{20}$", "^[0-9]{15}$", "^[0-9]{12}$", "^[0-9]{22}$"]
usps = "(" + ")|(".join(usps_pattern) + ")"
fedex = "(" + ")|(".join(fedex_pattern) + ")"
ups = "(" + ")|(".join(ups_pattern) + ")"
if re.match(usps, tracking_code) != None:
service = "USPS"
elif re.match(ups, tracking_code) != None:
service = "UPS"
elif re.match(fedex, tracking_code) != None:
service = "FedEx"
else:
raise ValueError("Unable to determine service for %s", tracking_code)
return service | ff30a95a0f0bdbcaec25a8f8b6db1ac6346800ca | 26,297 |
import re
def replace_urls(string_input: str, replace_by: str = "URL"):
"""
Replace url's in a string by replace_by
:param string_input: string input
:param replace_by: string, what we want to replace the url with
:return: string, with urls replaced by replaced_by
"""
return re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', replace_by,
string_input) | 56e66ef06580e608dc09c2ba690179d820819c9a | 26,298 |
def target_title(target):
"""The title of a Target instance in text representations"""
return 'Target #%d @ %fs' % (target.targetNumber, target.frameGrabTime) | a755aeb428c94ce6b79ac066877be1130112cba5 | 26,301 |
from datetime import datetime
def parse_session(line):
"""Parse H4 line into session."""
if not line.startswith("H4"):
raise ValueError("Not H4 line for session parser!")
return {
"data" : None,
"start" : datetime(
year = int(line[6:10]),
month = int(line[11:13]),
day = int(line[14:16]),
hour = int(line[17:19]),
minute = int(line[20:22]),
second = int(line[23:25])
),
"end" : datetime(
year = int(line[26:30]),
month = int(line[31:33]),
day = int(line[34:36]),
hour = int(line[37:39]),
minute = int(line[40:42]),
second = int(line[43:45])
),
"troposphere_corrected" : int(line[49]),
"CoM_corrected" : int(line[51]),
"receive_amplitude_corrected" : int(line[53]),
"station_delay_corrected" : int(line[55]),
"spacecraft_delay_corrected" : int(line[57]),
"range_type" : int(line[59]),
"data_quality" : int(line[61])
} | 3fa735495b296e5e3b6c5e4b60f23a48dfceada6 | 26,303 |
def add_storage_mappings_arguments_to_parser(parser):
""" Given an `argparse.ArgumentParser` instance, add the arguments required
for the 'storage_mappings' field for both Migrations and Replicas:
* '--default-storage-backend' will be under 'default_storage_backend'
* '--disk-storage-mapping's will be under 'disk_storage_mappings'
* '--storage-backend-mapping's will be under 'storage_backend_mappings'
"""
parser.add_argument(
"--default-storage-backend",
dest='default_storage_backend',
help="Name of a storage backend on the destination platform to "
"default to using.")
# NOTE: arparse will just call whatever 'type=' was supplied on a value
# so we can pass in a single-arg function to have it modify the value:
def _split_disk_arg(arg):
disk_id, dest = arg.split('=')
return {
"disk_id": disk_id.strip('\'"'),
"destination": dest.strip('\'"')}
parser.add_argument(
"--disk-storage-mapping", action='append', type=_split_disk_arg,
dest='disk_storage_mappings',
help="Mappings between IDs of the source VM's disks and the names of "
"storage backends on the destination platform as seen by running "
"`coriolis endpoint storage list $DEST_ENDPOINT_ID`. "
"Values should be fomatted with '=' (ex: \"id#1=lvm)\"."
"Can be specified multiple times for multiple disks.")
def _split_backend_arg(arg):
src, dest = arg.split('=')
return {
"source": src.strip('\'"'),
"destination": dest.strip('\'"')}
parser.add_argument(
"--storage-backend-mapping", action='append', type=_split_backend_arg,
dest='storage_backend_mappings',
help="Mappings between names of source and destination storage "
"backends as seen by running `coriolis endpoint storage "
"list $DEST_ENDPOINT_ID`. Values should be fomatted with '=' "
"(ex: \"id#1=lvm)\". Can be specified multiple times for "
"multiple backends.") | c79cdd273b561e6605bc7322092f289251816d76 | 26,304 |
def remove_outliers(df):
"""
For INS and OUTS, remove implausibly values
:param df: dataframe
:return: dataframe
"""
df = df[df['INS'] < 200000]
df = df[df['INS'] >= 0]
df = df[df['OUTS'] < 200000]
df = df[df['OUTS'] >= 0]
return df | c9f0002992b971dc31839e9803049a4db5b1702e | 26,307 |
def construct_path(vertex, reverse_paths):
"""Returns the shortest path to a vertex using the reverse_path mapping."""
path = []
while vertex is not None:
path.append(vertex)
vertex = reverse_paths[vertex]
return list(reversed(path)) | 9ed1aa226a2055c16c22743f7930c4981f2ac16c | 26,308 |
def _learning_rate_schedule(global_step_value, max_iters, initial_lr):
"""Calculates learning_rate with linear decay.
Args:
global_step_value: int, global step.
max_iters: int, maximum iterations.
initial_lr: float, initial learning rate.
Returns:
lr: float, learning rate.
"""
lr = initial_lr * (1.0 - global_step_value / max_iters)
return lr | c0adcf9d64d83e9917b278fe7b49dd152cff9a47 | 26,310 |
def read_pid(pid_file):
"""
Read PID from given PID file.
:param str pidfile: Name of the PID file to read from.
:return: PID from given PID file.
:rtype: int
"""
with open(pid_file, 'r') as pidfd:
return int(pidfd.readline().strip()) | 61df745a73483bd9a9dd16b722ca53d559f07539 | 26,312 |
def bytechr(i):
"""Return bytestring of one character with ordinal i; 0 <= i < 256."""
if not 0 <= i < 256:
if not isinstance(i, int):
raise TypeError('an integer is required')
else:
raise ValueError('bytechr() arg not in range(256)')
return chr(i).encode('latin1') | 0949f417ec521edb4e3edef103e099ef43057869 | 26,313 |
def macro(name):
"""Replaces :func:`~flask_admin.model.template.macro`, adding support for using
macros imported from another file. For example:
.. code:: html+jinja
{# templates/admin/column_formatters.html #}
{% macro email(model, column) %}
{% set address = model[column] %}
<a href="mailto:{{ address }}">{{ address }}</a>
{% endmacro %}
.. code:: python
class FooAdmin(ModelAdmin):
column_formatters = {
'col_name': macro('column_formatters.email')
}
Also required for this to work, is to add the following to the top of your
master admin template:
.. code:: html+jinja
{# templates/admin/master.html #}
{% import 'admin/column_formatters.html' as column_formatters with context %}
"""
def wrapper(view, context, model, column): # skipcq: PYL-W0613 (unused arg)
if '.' in name:
macro_import_name, macro_name = name.split('.')
m = getattr(context.get(macro_import_name), macro_name, None)
else:
m = context.resolve(name)
if not m:
return m
return m(model=model, column=column)
return wrapper | 622aec9bd44e5cdb412e4fd92b07fdf4f2cb8aa7 | 26,314 |
from typing import Tuple
def _get_level_percentiles(level: float) -> Tuple[float, float]:
"""Convert a credibility level to percentiles.
Similar to the highest-density region of a symmetric, unimodal distribution
(e.g. Gaussian distribution).
For example, an credibility level of `95` will be converted to
`(2.5, 97.5)`.
Parameters
----------
level:
The credibility level used to calculate the percentiles. For example,
`[95]` for a 95% credibility interval. These levels are split
symmetrically, e.g. `95` corresponds to plotting values between the
2.5% and 97.5% percentiles, and are equivalent to highest-density
regions for a normal distribution. For skewed distributions, asymmetric
percentiles may be preferable, but are not yet implemented.
Returns
-------
The percentiles, with the lower percentile first.
"""
lower_percentile = (100 - level) / 2
return lower_percentile, 100 - lower_percentile | 7b7ff713e4b5c95c6e38c5807ee3706bcb01bc0f | 26,315 |
def hysteresis(im, weak, strong=255):
"""Transforms weak pixel into strong ones if at least 1 pixel around the one being processed is a strong one
Parameters
----------
im
input image
weak
weak pixel that has intensity value that is not enough to be considered strong ones, but not non-relevant
strong
pixel with very high intensity
Returns
-------
im
output result image
"""
row, col = im.shape
for i in range(1, row-1):
for j in range(1, col-1):
if (im[i,j] == weak):
if ((im[i+1, j-1] == strong) or (im[i+1, j] == strong) or (im[i+1, j+1] == strong)
or (im[i, j-1] == strong) or (im[i, j+1] == strong)
or (im[i-1, j-1] == strong) or (im[i-1, j] == strong) or (im[i-1, j+1] == strong)):
im[i, j] = strong
else:
im[i, j] = 0
return im | 4fe1f6c7728f69cef393432a6298744e5a4c383d | 26,316 |
def print_tree(ifaces):
"""
Prints the tree for the given ifaces.
"""
return " ".join(i.get_tree() for i in ifaces) | 27d117b3454420580a836a6419300b918b08b135 | 26,317 |
import importlib
import warnings
def import_or_raise(library, error_msg=None, warning=False):
"""Attempts to import the requested library by name. If the import fails, raises an ImportError or warning.
Args:
library (str): The name of the library.
error_msg (str): Rrror message to return if the import fails.
warning (bool): If True, import_or_raise gives a warning instead of ImportError. Defaults to False.
Returns:
Returns the library if importing succeeded.
Raises:
ImportError: If attempting to import the library fails because the library is not installed.
Exception: If importing the library fails.
"""
try:
return importlib.import_module(library)
except ImportError:
if error_msg is None:
error_msg = ""
msg = f"Missing optional dependency '{library}'. Please use pip to install {library}. {error_msg}"
if warning:
warnings.warn(msg)
else:
raise ImportError(msg)
except Exception as ex:
msg = f"An exception occurred while trying to import `{library}`: {str(ex)}"
if warning:
warnings.warn(msg)
else:
raise Exception(msg) | 51d890fde3cbc9740299fd262b544d09acfe7bdc | 26,321 |
from bs4 import BeautifulSoup
def parse_response(response):
"""
Convert a valid response form the https://www.myshiptracking.com/vessels/ website
into a dictionary containing the information parsed from the 'vessels_table2'
:param response:
:return: dict
"""
soup = BeautifulSoup(response.text, "html.parser")
tables = soup.find_all("table", {"class": "vessels_table2"})
data = []
for table in tables:
rows = table.findAll(lambda tag: tag.name == "tr")
for row in rows:
cols = row.find_all("td")
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
ans = {x[0]: x[1] for x in data if len(x) == 2}
return ans | cd08616001d10de4c45b7240b4df5b71393ac68f | 26,324 |
def bool_to_str(boolean: bool) -> str:
"""Converts a bool such as True to 'true'."""
return 'true' if boolean else 'false' | 515701bf0b8d60a875ac7d49446df7ea62a0abdb | 26,326 |
def read_input(filename):
"""read input file and return list of raw intcodes."""
with open(filename, "r") as infile:
raw_intcodes = infile.readlines()[0].strip().split(",")
return raw_intcodes | 6ba22122472d69b7adf067d652b8da67092f87d4 | 26,332 |
from typing import List
def dsv_line_to_list(line: str, *, delimiter=',', quote='"') -> List[str]:
"""
Splits line into fields on delimiter ignoring delimiters in fields that
start and end with quote
NB: Empty fields produce an empty string
:param line: The line to be split
:param delimiter: The delimiter to use to split the fields
:param quote: The quote char to surround fields that contain the delimiter
:return: a list of the fields found
"""
result = []
within_quoted_field = False
at_start_of_field = True
last_was_quote = False # Used to see if quote is not at end of field
field = ''
def new_field():
nonlocal field, within_quoted_field, at_start_of_field, last_was_quote
result.append(field)
within_quoted_field = False
at_start_of_field = True
last_was_quote = False
field = ''
for char in line:
if at_start_of_field:
at_start_of_field = False
# Check for quote
if char == quote:
within_quoted_field = True
continue # Skip quote do not include in field
if within_quoted_field:
if char == quote:
last_was_quote = True
continue # May not want to add this char if end of field
if last_was_quote:
if char == delimiter:
new_field()
continue
else:
field += quote
last_was_quote = False
field += char
else:
if char == delimiter:
new_field()
else:
field += char
# Add last field that was being filled (or empty if empty in comma)
result.append(field)
return result | d0e1248152ecbe95d68e57de15bf7c0e22be7c7f | 26,335 |
def flatten(x):
"""Build a flat list out of any iter-able at infinite depth"""
result = []
for el in x:
# Iteratively call itself until a non-iterable is found
if hasattr(el, "__len__") and not isinstance(el, str):
flt = flatten(el)
result.extend(flt)
else:
result.append(el)
return result | c77950ae9e3839a450b797be1aaee9bff8a1f164 | 26,336 |
def combine_points(points_list):
"""Combine list of points (`ImagePoints`, `Points`, etc).
List must be nonempty.
Returns:
combined_points
"""
cls = type(points_list[0])
return cls.combine(points_list) | bddc4262057359ff65d1b8356eacf445a78449e8 | 26,337 |
from typing import List
import collections
def unique(list_: List) -> List:
"""Remove duplicate entries from list, keeping it in its original order
>>> unique([1, 2, 2, 3, 4, 6, 2, 5])
[1, 2, 3, 4, 6, 5]
>>> unique(['bb', 'aa', 'aa', 'aa', 'aa', 'aa', 'bb'])
['bb', 'aa']
"""
return list(collections.OrderedDict.fromkeys(list_)) | 8707e2d2dbf6b77f8818ad39282b113da9d22707 | 26,339 |
def read_reqn_file(path_to_file):
"""
Reads the contents of a file and returns it as a list of lines.
Parameters
----------
path_to_file : str
Path to file that is to read in
Returns
-------
list of str
The file contents as separate strings in a list
"""
with open(path_to_file) as f:
lines = f.readlines()
return lines | 3df65ff2b6475ffbdceb9c2727684ab69c146da4 | 26,341 |
def weekDay (obj):
"""
return the weekDay from a obj with a datetime 'date' field
weekDay 0 is monday
"""
return obj['date'].weekday() | f67d086076e99727e2b39f6a52608144ac24165d | 26,342 |
def get_bl_data(adni_comp, clin_data, scan_data):
"""This function extracts the data from the baseline visit only for each patient.
Supply the three dataframes adni_comp, clin_data, and scan_data as input.
"""
# extract the baseline data only
adni_bl = adni_comp[adni_comp.EXAMDATE == adni_comp.EXAMDATE_bl]
clin_bl = clin_data[clin_data.EXAMDATE == clin_data.EXAMDATE_bl]
scan_bl = scan_data[scan_data.EXAMDATE == scan_data.EXAMDATE_bl]
# return the three dataframes
return adni_bl, clin_bl, scan_bl | fad3b7b422a23e597b2f37b8e2f2a9a702b1af0a | 26,344 |
import math
def formatElapsedSeconds(seconds):
"""
Returns a string of the form "mm:ss" or "hh:mm:ss" or "n days",
representing the indicated elapsed time in seconds.
"""
sign = ''
if seconds < 0:
seconds = -seconds
sign = '-'
# We use math.floor() instead of casting to an int, so we avoid
# problems with numbers that are too large to represent as
# type int.
seconds = math.floor(seconds)
hours = math.floor(seconds / (60 * 60))
if hours > 36:
days = math.floor((hours + 12) / 24)
return "%s%d days" % (sign, days)
seconds -= hours * (60 * 60)
minutes = (int)(seconds / 60)
seconds -= minutes * 60
if hours != 0:
return "%s%d:%02d:%02d" % (sign, hours, minutes, seconds)
else:
return "%s%d:%02d" % (sign, minutes, seconds) | de8ec8614dd534871a3628c15f3d89f3f7a87d6f | 26,345 |
def from_keyed_iterable(iterable, key, filter_func=None):
"""Construct a dictionary out of an iterable, using an attribute name as
the key. Optionally provide a filter function, to determine what should be
kept in the dictionary."""
generated = {}
for element in iterable:
try:
k = getattr(element, key)
except AttributeError:
raise RuntimeError("{} does not have the keyed attribute: {}".format(
element, key
))
if filter_func is None or filter_func(element):
if k in generated:
generated[k] += [element]
else:
generated[k] = [element]
return generated | e2d349876f446b378d2b6b5b535f9119e648cfbe | 26,346 |
import hashlib
def check_sum(fl_name, tid=None):
"""
compute checksum for generated file of `fl_name`
"""
hasher = hashlib.md5()
with open(fl_name, 'rb') as fin:
for chunk in iter(lambda: fin.read(4096), b""):
hasher.update(chunk)
ret = hasher.hexdigest()
if tid is not None:
ret = ret + str(tid)
return ret | 842f6de54827b524f1d5cf18d4c5ad18b8ad8b59 | 26,352 |
def _get_class_with_reference(visible_name: str, ref: str) -> str:
"""
Return the name of the class with a valid reference to be used by sphinx.
"""
return f"\\ :class:`{visible_name} <{ref}>`\\" | 4a31fdcffcd2b295deecba062a683bb1cab91bee | 26,354 |
def int_median_cutter(lower_bound: int, upper_bound: int, value: int):
"""
Simple function for cutting values to fit between bounds
Args:
lower_bound (int): lower cutting bound
upper_bound (int): upper cutting bound
value (int): value to fit between bounds
Returns:
value cut to fit into bounds (as integer)
"""
return int(max(min(value, upper_bound), lower_bound)) | dc6ebea3876f35470b2289312596f83ab8ba5fed | 26,365 |
import random
def get_framesets(cls, maximum=10, pattern=None):
""" Gather FrameSet objects from either Frames or Bars.
If `pattern` is set to a compiled regex pattern,
return all FrameSets matching the pattern.
Otherwise, return up to `maximum` random FrameSets.
"""
frametypes = set()
framenames = cls.names()
if pattern is None:
while len(frametypes) < maximum:
frametypes.add(cls.get_by_name(random.choice(framenames)))
else:
frametypes.update(
cls.get_by_name(s)
for s in framenames
if pattern.search(s) is not None
)
return frametypes | f6f094b67243685f352ea624e2bf45b62693e38d | 26,375 |
def psi2(ctx, z):
"""Shortcut for psi(2,z) (the tetragamma function)"""
return ctx.psi(2, z) | 68b3ade0f3844cf67c57b37b95744ffc09b46e52 | 26,378 |
def remove_char(string,
iterable):
"""
Return str without given elements from the iterable. More convenient than
chaining the built-in replace methods.
Parameters
-------
string: str
String from which the characters from the iterable are removed.
iterable: str, list, tuple, set
Iterable with characters that are removed from the string.
Returns
-------
str
Without elements from the iterable.
"""
for i in iterable:
string = string.replace(i, "")
return string | a7106236fc15adf7b7aa4cdc7e4f3b6b86e6a889 | 26,379 |
def parse_command(cmd_str):
"""
# the line has one word for the command and n pairs that go to key, value (separator is space)
:param cmd_str: string with name of command and pairs of params and values
:return: cmd : str (name of the command)
cmd_par: dictionary {par_name: str(par_value)} with the parameters for the command
"""
split_cmd = cmd_str.split(' ')
assert (len(split_cmd) % 2)
cmd_par = {split_cmd[i]: split_cmd[i + 1] for i in range(1, len(split_cmd), 2)}
cmd = split_cmd[0]
return cmd, cmd_par | ac48d05bcd88c7eb5e04cedeb26c5d5278bbc3bd | 26,382 |
def central_slice(k):
"""Return central slice objects (last 2 dimensions)."""
if k < 1:
return ..., slice(None), slice(None)
return ..., slice(k, -k), slice(k, -k) | a69a18adf07c9e841f58328c1869e44bd0de24e2 | 26,385 |
from typing import List
from typing import Tuple
def concatenate_shapes(shapes: List[Tuple[int]], axis: int):
"""Concatenate shapes along axis"""
out = list(shapes[0])
out[axis] = sum(list(s)[axis] for s in shapes)
return tuple(out) | 959a66fec11fa7d67218f2fb1b76d5bcf990d463 | 26,386 |
def GetModSaveMetaDataFileName () -> str:
"""
Get the file name of every mod save meta data file.
"""
return "Meta_Data.json" | 68af6a2a4428242aedf7f1b313322d8a4e148e78 | 26,391 |
def support(transactions, itemsets):
"""Returns the percentages of transactions that contain the itemsets.
Parameters
----------
transactions : list of list
itemsets : list of frozenset
Returns
-------
dict
Key of each item is the itemset and the value is the itemset's support
"""
counts = {}
for itemset in itemsets:
counts[itemset] = 0
for transaction in transactions:
for itemset in itemsets:
if itemset.issubset(transaction):
counts[itemset] += 1
supports = {}
total_transactions = len(transactions)
for itemset, count in counts.items():
supports[itemset] = count / total_transactions
return supports | f6d9751b2560dd8ac636dac965d912595ccb9831 | 26,394 |
import functools
def compose(*functions):
"""
Compose multiple functions into a single function
Create a function that calls a series of functions, passing the output of
one function as the input of the next.
See https://mathieularose.com/function-composition-in-python/
"""
return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x) | 32190bf758f4e7198fff96b684001bf50229b4c2 | 26,395 |
def extended_GCD(a, b):
"""
The extended Euclidean algorithm computes the greatest common divisor and the Bézout
coefficients s, t.
Returns (remainder, (s, t))
"""
(r, rP) = (a, b)
(s, sP) = (1, 0)
(t, tP) = (0, 1)
while rP != 0:
q = r // rP
(r, rP) = (rP, r - q * rP)
(s, sP) = (sP, s - q * sP)
(t, tP) = (tP, t - q * tP)
return (r, (s, t)) | ca963ab6fff79e8cd375d4ce6a866cf28fd94f3b | 26,400 |
def find_digits_in_str(string: str) -> str:
"""Find digits in a given string.
Args:
string: str, input string with the desired digits
Returns:
digits: str, found in the given string
"""
return "".join(x for x in string if x.isdigit()) | 7b9e824f8100d6289a8ed135b50e10d3b3046ed1 | 26,402 |
from pathlib import Path
def exists( fileName:str ) -> bool:
"""
Check whether the given fileName exists
"""
fname = fileName + ".pkl"
my_file = Path(fname)
return my_file.exists() | 5173fbe936564c67f9f32b33ba0a7f85bb172ec8 | 26,404 |
def _format_defaults(value):
"""
Format value to CLI syntax.
Example usage: if default value of a parameter is ['gene'], the description
in CLI would be::
(default: ['gene'])
After using this function the description is tailored to CLI usage::
(default: gene)
"""
if isinstance(value, list):
return ' '.join(value)
else:
return value | e665ba6cf28ccb03db5a3ee69721bf03dfa4b41e | 26,405 |
import ast
def parseTypeFromString(value):
"""
Parse a string representation of a variable into a true, typed, python variable
"""
return ast.literal_eval(value) | 2a7664af015a60a9070e3090772c73af4ef76fb5 | 26,409 |
def number_to_digits(number, base):
"""Convert a positive number to its digit representation in base."""
digits = []
while number > 0:
digits.insert(0, number % base)
number = number // base
return digits | d04091511cbf2c1a86a315239321612b60a27a2d | 26,414 |
def select_bboxes(selection_bbox: dict, page_bboxes: list, tolerance: int = 10) -> list:
"""
Filter the characters bboxes of the document page according to their x/y values.
The result only includes the characters that are inside the selection bbox.
:param selection_bbox: Bounding box used to select the characters bboxes.
:param page_bboxes: Bounding boxes of the characters in the document page.
:param tolerance: Tolerance for the coordinates values.
:return: Selected characters bboxes.
"""
selected_char_bboxes = [
char_bbox
for char_bbox in page_bboxes
if int(selection_bbox["x0"]) - tolerance <= char_bbox["x0"]
and int(selection_bbox["x1"]) + tolerance >= char_bbox["x1"]
and int(selection_bbox["y0"]) - tolerance <= char_bbox["y0"]
and int(selection_bbox["y1"]) + tolerance >= char_bbox["y1"]
]
return selected_char_bboxes | 6b3a091aafc0b2af7045e964daf02237ae18f1f4 | 26,415 |
def merge(data, delimiter=","):
"""
Merge rows with an equal starting index from an array of CSV data rows.
Args:
data (list): input list of string rows or row values as a list to merge.
delimiter (str): delimiter of the CSV format to use for value splitting.
Returns:
merged list of rows in string format.
"""
data_merged = []
row_merged = []
# Register an empty field.
id = None
for row in data:
# Convert our string row into a list of strings if it's not already one.
values = row.split(delimiter) if type(row) is str else row
# Assign a value if this is the first run.
if not id:
id = values[0]
row_merged.append(id)
# If our identifier does not match up with the last, append the row and reset.
if values[0] != id:
data_merged.append(row_merged)
row_merged = []
id = values[0]
row_merged.append(id)
# Begin iteration over values skipping our identifier.
for value in values[1:]:
row_merged.append(value)
# If this is the last row append it.
if row == data[-1]:
data_merged.append(row_merged)
return data_merged | 121f3b5d3057b7e8aa639fad9063fff82e7033ab | 26,417 |
def sum_ratios_to_percentage(ratios):
"""Sums ratios and converts to two-decimal rounded percentage"""
return round(sum(ratios) * 100, 2) | 2341436755fbf67559b164919bbab275d9a935ea | 26,418 |
import re
def _remove_whitespace(string):
"""Return a version of the input string with whitespace removed"""
whitespace_re = re.compile(r"\s+")
return whitespace_re.sub('', string) | 0da9083b1f4d4e4c8cb4a375b2e70cd3b70a4564 | 26,422 |
def count(s, value):
"""
for循环版本的遍历
:param s: 待遍历链表
:param value: 待查找值
:return: value的出现次数
"""
total = 0
for elem in s:
if elem == value:
total = total + 1
return total | bbe0ff7d00943a45c912d02f8e6e52748f9725e3 | 26,424 |
def _split_attrs(attrs, second_part_keys=None):
"""Split `attrs` dictionary into two parts:
* Dict with keys that are not in `second_part_keys`
* Remainder dict with keys in `second_part_keys`
"""
dict_1 = {k: v for k, v in attrs.iteritems() if k not in second_part_keys}
dict_2 = {k: v for k, v in attrs.iteritems() if k in second_part_keys}
return dict_1, dict_2 | ed19608ec7bc5bf5b40a0a1315e952e7af103dc7 | 26,425 |
from urllib.parse import urlparse, urlencode, parse_qs, unquote
def merge_url_query(url, _doseq=True, **kwargs):
"""Add (new) query params to url, overwriting existing query parameters.
Note: Python 3.5+ only
Args:
url (str): A URL, e.g. 'http://stackoverflow.com/search?q=question'
_doseq (bool): Wether to urlencode using doseq
kwargs (dict): Query parameters to add to url, e.g. {'q': ['a', 'b']}
Returns:
str: Modified URL, e.g. 'http://stackoverflow.com/search?q=a&q=b'
"""
parsed = urlparse(unquote(url))
return parsed._replace(
query=urlencode({**parse_qs(parsed.query), **kwargs}, doseq=_doseq)
).geturl() | fad16c498b9aa8fcac10cbcd995166a5f443a59d | 26,427 |
from typing import Iterable
from typing import Callable
def count_nice_words(all_words: Iterable[str], checker: Callable[[str], bool]) -> int:
"""
Count the number of words which are nice, using the given nice-word-checking function.
"""
return sum(1 for word in all_words if checker(word)) | 53d7de35aad33d159c985acadad1a68fbb5ef11e | 26,428 |
def getAngleStatus(angle):
""" returns status based on the angle. Status codes:
0 - straight (-10 to +10)
1 - left or down (less than -10)
2 - right or up (greater than 10)
"""
if angle < -10:
return 1
elif angle > 10:
return 2
return 0 | 235c988bc285be4fbfbb3f3113030b79585c1976 | 26,430 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.