content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def format_address(msisdn):
"""
Format a normalized MSISDN as a URI that ParlayX will accept.
"""
if not msisdn.startswith('+'):
raise ValueError('Only international format addresses are supported')
return 'tel:' + msisdn[1:] | f5a5cc9f8bcf77f1185003cfd523d7d6f1212bd8 | 5,320 |
def get_nag_statistics(nag):
"""Return a report containing all NAG statistics"""
report = """Constants: {0}
Inputs: {1}
NANDs: {2}
Outputs: {3}
Min. I/O distance: {4}
Max. I/O distance: {5}""".format(
nag.constant_number,
nag.input_number,
nag.nand_number,
nag.output_number,
nag.input_to_output_min_distance,
nag.input_to_output_max_distance)
return report | 44d3f32bc0b05d8b1d81c3b32dc140af4fd20aa0 | 5,321 |
def mean(image):
"""The mean pixel value"""
return image.mean() | 176dd8d483008fa1071f0f0be20c4b53ad0e2a5f | 5,322 |
import math
def ellipse_properties(x, y, w):
"""
Given a the (x,y) locations of the foci of the ellipse and the width return
the center of the ellipse, width, height, and angle relative to the x-axis.
:param double x: x-coordinates of the foci
:param double y: y-coordinates of the foci
:param double w: width of the ellipse
:rtype: tuple of doubles
:returns: (center_coordinates, width, height, angle_in_rads)
"""
p1 = [x[0], y[0]]
p2 = [x[1], y[1]]
#center point
xy = [(p1[0] + p2[0])/2, (p1[1] + p2[1])/2]
#distance between points
d = ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)**(0.5)
#theta to positive Xaxis
angle = math.atan((p1[1] - p2[1])/(p1[0] - p2[0]))
#sin = math.sin(-angle)
#cos = math.cos(-angle)
#width will be the axis the points lie on
width = 2*((0.5*d)**2 + (0.5*w)**2)**(0.5)
height = w
return (xy, width, height, angle*180/math.pi) | 95864eac0feb9c34546eefed5ca158f330f88e3d | 5,324 |
import os
def dir_exists(dir):
"""Test if dir exists"""
return os.path.exists(dir) and os.path.isdir(dir) | f787457f3a03c3e9c605a1753de0ab7c648e4a2c | 5,325 |
import requests
def fetch_data(full_query):
"""
Fetches data from the given url
"""
url = requests.get(full_query)
# Parse the json dat so it can be used as a normal dict
raw_data = url.json()
# It's a good practice to always close opened urls!
url.close()
return raw_data | 576b2548c1b89827e7586542e4d7e3f0cc89051d | 5,326 |
def _get_precision_type(network_el):
"""Given a network element from a VRP-REP instance, returns its precision type:
floor, ceil, or decimals. If no such precision type is present, returns None.
"""
if 'decimals' in network_el:
return 'decimals'
if 'floor' in network_el:
return 'floor'
if 'ceil' in network_el:
return 'ceil'
return None | b3b451a26ec50ce5f2424ea7a3652123ae96321d | 5,327 |
import hashlib
def md5_str(content):
"""
计算字符串的MD5值
:param content:输入字符串
:return:
"""
m = hashlib.md5(content.encode('utf-8'))
return m.hexdigest() | affe4742c2b44a60ef6dafa52d7a330594a70ed9 | 5,328 |
def update_params(old_param, new_param, errors="raise"):
""" Update 'old_param' with 'new_param'
"""
# Copy old param
updated_param = old_param.copy()
for k,v in new_param.items():
if k in old_param:
updated_param[k] = v
else:
if errors=="raise":
raise Exception(f"Parameters {k} not recognized as a default parameter for this estimator")
else:
pass
return updated_param | 95de4e8e1278b07d2bd8ccc61af4e2dc43f87ca2 | 5,329 |
def collections(id=None):
"""
Return Collections
Parameters
----------
id : STR, optional
The default is None, which returns all know collections.
You can provide a ICOS URI or DOI to filter for a specifict collection
Returns
-------
query : STR
A query, which can be run against the SPARQL endpoint.
"""
if not id:
coll = '' # create an empyt string insert into sparql query
else:
coll = ''.join(['FILTER(str(?collection) = "' + id+ '" || ?doi = "' + id + '") .'])
query = """
prefix cpmeta: <http://meta.icos-cp.eu/ontologies/cpmeta/>
prefix dcterms: <http://purl.org/dc/terms/>
select * where{
?collection a cpmeta:Collection .
%s
OPTIONAL{?collection cpmeta:hasDoi ?doi} .
?collection dcterms:title ?title .
OPTIONAL{?collection dcterms:description ?description}
FILTER NOT EXISTS {[] cpmeta:isNextVersionOf ?collection}
}
order by ?title
""" % coll
return query | 0cd1704d2ac43f34d6e83a3f9e9ead39db390c2e | 5,331 |
def sanitize_bvals(bvals, target_bvals=[0, 1000, 2000, 3000]):
"""
Remove small variation in bvals and bring them to their closest target bvals
"""
for idx, bval in enumerate(bvals):
bvals[idx] = min(target_bvals, key=lambda x: abs(x - bval))
return bvals | a92b170748b5dbc64c4e62703a3c63103675b702 | 5,332 |
from typing import Dict
import requests
import logging
def get_estate_urls(last_estate_id: str) -> Dict:
"""Fetch urls of newly added estates
Args:
last_estate_id (str): estate_id of the most recent estate added (from last scrape)
Returns:
Dict: result dict in format {estate_id_1: {estate_url_1}, ... estate_id_N: {estate_url_N}}
"""
# Calculate number of API pages based on result size and estates per page
base_url = 'https://www.sreality.cz/api/'
res = requests.get(base_url + 'cs/v2/estates?per_page=1&page=1')
num_pages = res.json()['result_size'] // 500
# Obtain url suffix for each estate up until the newest from last scrape
estate_urls = {}
for page in range(1, num_pages):
url = base_url + f'cs/v2/estates?per_page=500&page={page}'
# EAFP
try:
res = requests.get(url)
res.raise_for_status()
except requests.exceptions.HTTPError as error:
logging.error(error)
# Some API responses are missing the content
# which causes the entire scraper to fail
res = res.json().get("_embedded")
if res is None:
continue
estates = res["estates"]
for estate in estates:
estate_url = estate["_links"]["self"]["href"]
estate_id = estate_url.split("/")[-1]
# Break once we hit an estate from last scraping
already_scraped = estate_id == last_estate_id
if already_scraped:
return estate_urls
estate_urls[estate_id] = estate_url
return estate_urls | d93299002204edc9d26b3c77e2dff1f56f4b93d8 | 5,333 |
def bib_to_string(bibliography):
""" dict of dict -> str
Take a biblatex bibliography represented as a dictionary
and return a string representing it as a biblatex file.
"""
string = ''
for entry in bibliography:
string += '\n@{}{{{},\n'.format(
bibliography[entry]['type'],
bibliography[entry]['id']
)
for field in bibliography[entry]:
if field != 'id' and field != 'type':
string += '\t{} = {{{}}},\n'.format(
field,
bibliography[entry][field]
)
string = string[:-2] + '}\n'
return string | c8fc4247210f74309929fdf9b210cd6f1e2ece3f | 5,335 |
def check_prio_and_sorted(node):
"""Check that a treap object fulfills the priority requirement and that its sorted correctly."""
if node is None:
return None # The root is empty
else:
if (node.left_node is None) and (node.right_node is None): # No children to compare with
pass # Do nothing
elif node.left_node is None: # No left child
assert node.prio <= node.right_node.prio # Check priority of right child and node
assert node.data < node.right_node.data # Check sorting
elif node.right_node is None: # No right child
assert node.prio <= node.left_node.prio # Check priority of left child and node
assert node.data > node.left_node.data # Check sorting
else: # Two children
assert node.prio <= (node.left_node.prio and node.right_node.prio) # Check priority of both left and right child with node
assert (node.data > node.left_node.data) and (node.data < node.right_node.data) # Check sorting
check_prio_and_sorted(node.left_node) # Recursion. Goes down the left tree first
check_prio_and_sorted(node.right_node) # Recursion. Goes down the right tree next | 64100fd4ba9af699ab362d16f5bbf216effa2da5 | 5,336 |
import pickle
async def wait_for_msg(channel):
"""Wait for a message on the specified Redis channel"""
while await channel.wait_message():
pickled_msg = await channel.get()
return pickle.loads(pickled_msg) | dca398cb3adeb778458dd6be173a53cdd204bcb9 | 5,337 |
def get_tile_prefix(rasterFileName):
"""
Returns 'rump' of raster file name, to be used as prefix for tile files.
rasterFileName is <date>_<time>_<sat. ID>_<product type>_<asset type>.tif(f)
where asset type can be any of ["AnalyticMS","AnalyticMS_SR","Visual","newVisual"]
The rump is defined as <date>_<time>_<sat. ID>_<product type>
"""
return rasterFileName.rsplit("_", 1)[0].rsplit("_AnalyticMS")[0] | 15b517e5ba83b2cfb5f3b0014d800402c9683815 | 5,339 |
def preprocess(df):
"""Preprocess the DataFrame, replacing identifiable information"""
# Usernames: <USER_TOKEN>
username_pattern = r"(?<=\B|^)@\w{1,18}"
df.text = df.text.str.replace(username_pattern, "<USERNAME>")
# URLs: <URL_TOKEN>
url_pattern = (
r"https?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]"
r"|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
df.text = df.text.str.replace(url_pattern, "<URL>")
# Email: <EMAIL_TOKEN>
email_pattern = r"[-.+\w]+@[-\w]+\.[-.\w]+"
df.text = df.text.str.replace(email_pattern, "<EMAIL>")
# Replace tokens in Wikipedia Talk dataset
df.text = df.text.str.replace("NEWLINE;?_TOKEN", "\n")
df.text = df.text.str.replace("TAB_TOKEN", "\t")
return df | d592d9e56af9ec17dcebede31d458dfdc001c220 | 5,340 |
def layout(mat,widths=None,heights=None):
"""layout"""
ncol=len(mat[0])
nrow=len(mat)
arr=[]
list(map(lambda m: arr.extend(m),mat))
rscript='layout(matrix(c(%s), %d, %d, byrow = TRUE),' %(str(arr)[1:-1],nrow,ncol)
if widths:
rscript+='widths=c(%s),' %(str(widths)[1:-1])
if heights:
rscript+='heights=c(%s),' %(str(heights)[1:-1])
rscript=rscript[:-1]+')\n'
return rscript | 813fb351b4e09d4762255ecbbe6f9ee7e050efd0 | 5,341 |
import re
def MatchNameComponent(key, name_list, case_sensitive=True):
"""Try to match a name against a list.
This function will try to match a name like test1 against a list
like C{['test1.example.com', 'test2.example.com', ...]}. Against
this list, I{'test1'} as well as I{'test1.example'} will match, but
not I{'test1.ex'}. A multiple match will be considered as no match
at all (e.g. I{'test1'} against C{['test1.example.com',
'test1.example.org']}), except when the key fully matches an entry
(e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
@type key: str
@param key: the name to be searched
@type name_list: list
@param name_list: the list of strings against which to search the key
@type case_sensitive: boolean
@param case_sensitive: whether to provide a case-sensitive match
@rtype: None or str
@return: None if there is no match I{or} if there are multiple matches,
otherwise the element from the list which matches
"""
if key in name_list:
return key
re_flags = 0
if not case_sensitive:
re_flags |= re.IGNORECASE
key = key.upper()
name_re = re.compile(r"^%s(\..*)?$" % re.escape(key), re_flags)
names_filtered = []
string_matches = []
for name in name_list:
if name_re.match(name) is not None:
names_filtered.append(name)
if not case_sensitive and key == name.upper():
string_matches.append(name)
if len(string_matches) == 1:
return string_matches[0]
if len(names_filtered) == 1:
return names_filtered[0]
return None | ad522feba9cabb3407e3b8e1e8c221f3e9800e16 | 5,342 |
def _non_overlapping_chunks(seq, size):
"""
This function takes an input sequence and produces chunks of chosen size
that strictly do not overlap. This is a much faster implemetnation than
_overlapping_chunks and should be preferred if running on very large seq.
Parameters
----------
seq : tuple or list
Sequence of integers.
size : int
Length of each produced chunk.
Returns
-------
zip
zip object that produces chunks of specified size, one at a time.
"""
return zip(*[iter(seq)] * size) | 15b5d2b4a7d8df9785ccc02b5369a3f162704e9e | 5,344 |
def neighbour(x,y,image):
"""Return 8-neighbours of image point P1(x,y), in a clockwise order"""
img = image.copy()
x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1;
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]] | 8e645f7634d089a0e65335f6ea3363d4ed66235b | 5,348 |
def gravity_effect(position, other_position):
"""Return effect other_position has on position."""
if position == other_position:
return 0
elif position > other_position:
return -1
return 1 | 25130c253cb888057e9b52817cac9cf3778a4c69 | 5,350 |
import fnmatch
def ignore_paths(path_list, ignore_patterns, process=str):
"""
Go through the `path_list` and ignore any paths that match the patterns in `ignore_patterns`
:param path_list: List of file/directory paths.
:param ignore_patterns: List of nukeignore patterns.
:param process: Function to apply to every element in the path list before performing match.
:return: The updated path list
"""
for pattern in ignore_patterns:
path_list = [
n for n in path_list if not fnmatch.fnmatch(process(n), pattern)
]
return path_list | 63196e54eb4505cbe12ebf77d2a42fede68c1d0b | 5,351 |
def reachable(Adj, s, t):
"""
Adj is adjacency list rep of graph
Return True if edges in Adj have directed path from s to t.
Note that this routine is one of the most-used and most time-consuming
of this whole procedure, which is why it is passed an adjacency list
rep rather than a list of vertices and edges, since the adjacency list
rep is easy to update when a new edge is committed to in RP.
"""
# search for path
Q = [ s ] # vertices to expand
R = set([s]) # reachable
while Q:
i = Q.pop()
for j in Adj[i]:
if j == t:
return True
if j not in R:
R.add(j)
Q.append(j)
return False | dc0ea0c6d2314fa1c40c3f3aa257a1c77892141f | 5,353 |
def concatenate_unique(la, lb):
"""Add all the elements of `lb` to `la` if they are not there already.
The elements added to `la` maintain ordering with respect to `lb`.
Args:
la: List of Python objects.
lb: List of Python objects.
Returns:
`la`: The list `la` with missing elements from `lb`.
"""
la_set = set(la)
for l in lb:
if l not in la_set:
la.append(l)
la_set.add(l)
return la | 307fde291233727c59e2211afc3e0eed7c8ea092 | 5,354 |
import os
import inspect
def _find_path(image):
"""Searches for the given filename and returns the full path.
Searches in the directory of the script that called (for example)
detect_match, then in the directory of that script's caller, etc.
"""
if os.path.isabs(image):
return image
# stack()[0] is _find_path;
# stack()[1] is _find_path's caller, e.g. detect_match;
# stack()[2] is detect_match's caller (the user script).
for caller in inspect.stack()[2:]:
caller_image = os.path.join(
os.path.dirname(inspect.getframeinfo(caller[0]).filename),
image)
if os.path.isfile(caller_image):
return os.path.abspath(caller_image)
# Fall back to image from cwd, for convenience of the selftests
return os.path.abspath(image) | 402a4ee96229db6a94ce77bfca73749509fbd714 | 5,357 |
def create_feature_indices(header):
"""
Function to return unique features along with respective column indices
for each feature in the final numpy array
Args:
header (list[str]): description of each feature's possible values
Returns:
feature_indices (dict): unique feature names as keys with value
types (dicrete or continuous) and data column indices where present
"""
feature_indices = {}
for i, head in enumerate(header):
current = head.split("->")
str_name = current[0].replace(" ", "_")
if current[0] == "mask":
feature_indices["presence_" +
current[1].replace(" ", "_")] = ["discrete", i]
elif feature_indices == {} or str_name not in feature_indices:
if len(current) > 1:
feature_indices[str_name] = ["discrete", i]
else:
feature_indices[str_name] = ["continuous", i]
elif str_name in feature_indices:
feature_indices[str_name].extend([i])
return feature_indices | a29d8c4c8f3a31ad516216756b7eba7eb4110946 | 5,364 |
def lower(word):
"""Sets all characters in a word to their lowercase value"""
return word.lower() | f96b1470b3ab1e31cd1875ad9cbf9ed017aa0158 | 5,365 |
def _in_dir(obj, attr):
"""Simpler hasattr() function without side effects."""
return attr in dir(obj) | f95e265d278e3014e8e683a872cd3b70ef6133c9 | 5,366 |
import os
def file_exists(work_dir, path):
"""
goal: check if file exists
type: (string, string) -> bool
"""
prev_dir = os.getcwd()
try:
os.chdir(work_dir)
if os.path.exists(path) and os.path.isfile(path):
return True
else:
return False
os.chdir(prev_dir)
except Exception as e:
os.chdir(prev_dir)
return False | 7bb2e3a4d245908054014cf9210769bf89a7b693 | 5,367 |
def handler_good():
"""Return True for a good event handler."""
return True | 302ea021276cb9be2d5e98c2a09776f4ee53cc97 | 5,369 |
import argparse
def optional_list():
"""Return an OptionalList action."""
class OptionalList(argparse.Action):
"""An action that supports an optional list of arguments.
This is a list equivalent to supplying a const value with nargs='?'. Which itself only allows a single optional
value.
"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values if values else self.const)
return OptionalList | 8e6f84e75c75893862dfbbb93d2d9c75ce229c68 | 5,371 |
def validate_comma_separated_list(argument):
"""Convert argument to a list."""
if not isinstance(argument, list):
argument = [argument]
last = argument.pop()
items = [i.strip(u' \t\n') for i in last.split(u',') if i.strip(u' \t\n')]
argument.extend(items)
return argument | bdf68db95d6070be4ffb5a74a646f5c730c726b4 | 5,372 |
import argparse
def setup():
"""
Parse command line arguments
Returns parsed arguments
"""
parser = argparse.ArgumentParser(description='Search Reddit Thing')
parser.add_argument(
'subreddit',
help="Enter the name of the subreddit to search.")
parser.add_argument(
'query',
help=("Enter search query. See {} for information on how to form search"
" queries.".format("https://www.reddit.com/wiki/search")))
parser.add_argument(
'-p', '--print', action="store_true", default=False,
help=("Pass this argument to print out detailed data."))
parser.add_argument(
'-t', '--title', action="store_true", default=False,
help=("Pass this argument to include post titles in data."))
parser.add_argument(
'-l', '--limit', action="store", type=int, default=None,
metavar='AMOUNT',
help=("Number of posts to grab. Default is as many as possible."))
parser.add_argument(
'-e', '--export', action="store", type=str, default=None,
metavar='FILENAME',
help=("Filename to export data to."))
parser.add_argument(
'-g', '--graph', action="store", type=str,
metavar='FILENAME',
help=("Export a graph of the data.")
)
return parser.parse_args() | 49748a64532fcedf3fcc96c8a56de224e6daac43 | 5,373 |
def get_holdout_set(train, target_column):
"""This is a sample callable to demonstrate how the Environment's `holdout_dataset` is evaluated. If you do provide a
callable, it should expect two inputs: the train_dataset (pandas.DataFrame), and the target_column name (string). You should
return two DataFrames: a modified train_dataset, and a holdout_dataset. What happens in between is up to you, perhaps split
apart a portion of the rows, but the idea is to remove part of train_dataset, and turn it into holdout_dataset. For this
example, we'll just copy train_dataset, which is a VERY BAD IDEA in practice. Don't actually do this"""
return train, train.copy() | ba2ea647c287f11f37bc4557ef389ed288b0bb02 | 5,374 |
def getprop(obj, string):
"""
Par exemple 'position.x'
:param string:
:return:
"""
tab = string.split('.')
curr_val = obj
for str in tab:
curr_val = getattr(curr_val, str)
return curr_val | c82f869395129a8b69d1a89cde97ce36fe5affd9 | 5,376 |
import argparse
def get_options():
"""Parses options."""
parser = argparse.ArgumentParser(
description="Dynamic inventory for Decapod.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-l", "--list",
help="List all inventory.",
action="store_true",
default=False
)
group.add_argument(
"-s", "--host",
help="List host specific variables",
default=None
)
return parser.parse_args() | c4efc1978dfb735e6e08060400d3752357e924a0 | 5,377 |
import warnings
def tracks(track):
"""
Check if the submitted RGTs are valid
Arguments
---------
track: ICESat-2 reference ground track (RGT)
"""
#-- string length of RGTs in granules
track_length = 4
#-- total number of ICESat-2 satellite RGTs is 1387
all_tracks = [str(tr + 1).zfill(track_length) for tr in range(1387)]
if track is None:
return ["????"]
else:
if isinstance(track, (str,int)):
assert int(track) > 0, "Reference Ground Track must be positive"
track_list = [str(track).zfill(track_length)]
elif isinstance(track, list):
track_list = []
for t in track:
assert int(t) > 0, "Reference Ground Track must be positive"
track_list.append(str(t).zfill(track_length))
else:
raise TypeError(
"Reference Ground Track as a list or string"
)
#-- check if user-entered RGT is outside of the valid range
if not set(all_tracks) & set(track_list):
warnings.filterwarnings("always")
warnings.warn("Listed Reference Ground Track is not available")
return track_list | 306b213fc040dbaabf515dfeff7efe45db656549 | 5,378 |
import random
def random_sampling(predictions, number):
"""
This method will return us the next values that we need to labelise
for our training with a random prioritisation
Args:
predictions : A matrix of probabilities with all the predictions
for the unlabelled data
number : The number of indexes that we need to return
Returns:
The indexes that we need to labelised and enter in the training set
"""
return random.sample(range(len(predictions)), number) | 22a1b13122bdf5c1b95d2b039458d27a62544f6d | 5,379 |
def get_element_parts(
original_list: list, splitter_character: str, split_index: int
) -> list:
"""
Split all elements of the passed list on the passed splitter_character.
Return the element at the passed index.
Parameters
----------
original_list : list
List of strings to be split.
splitter_character : str
Character to split the strings on.
split_index : int
Index of the element to be returned.
Returns
-------
list
List with the elements at the passed index.
"""
new_list = []
for element in original_list:
temp_element = element.rsplit(splitter_character)[split_index] # split element
temp_element = temp_element.strip() # clean data
temp_element = temp_element.casefold() # force lower case
new_list.append(temp_element)
return new_list | 8c663fd64ebb1b2c53a64a17f7d63e842b457652 | 5,380 |
def create_inference_metadata(object_type, boundary_image, boundary_world):
"""
Create a metadata of **each** detected object
:param object_type: Type of the object | int
:param boundary: Boundary of the object in GCS - shape: 2(x, y) x points | np.array
:return: JSON object of each detected object ... python dictionary
"""
obj_metadata = {
"obj_type": object_type,
"obj_boundary_image": boundary_image
}
object_boundary = "POLYGON (("
for i in range(boundary_world.shape[1]):
object_boundary = object_boundary + str(boundary_world[0, i]) + " " + str(boundary_world[1, i]) + ", "
object_boundary = object_boundary + str(boundary_world[0, 0]) + " " + str(boundary_world[1, 0]) + "))"
# print("object_boundary: ", object_boundary)
obj_metadata["obj_boundary_world"] = object_boundary # string in wkt
# print("obj_metadata: " ,obj_metadata)
return obj_metadata | b13f1ad4abc22f3eaca2c81c56ab9cb0eae80aa9 | 5,381 |
from typing import Tuple
from typing import Dict
def parse_line_protocol_stat_key(key: str) -> Tuple[str, Dict[str, str]]:
"""Parseline protocolish key to stat prefix and key.
Examples:
SNMP_WORKER;hostname=abc.com,worker=snmp-mti
will become:
("SNMP_WORKER", {"hostname": "abc.com", "worker": "snmp-mti"})
"""
try:
prefix, raw_labels = key.split(";", 1)
labels = dict(raw_label.split("=", 1) for raw_label in raw_labels.split(","))
return prefix, labels
except ValueError:
return key, {} | a6806f7dd67fb2a4734caca94bff3d974923f4b2 | 5,382 |
def add_without_punctuation(line, punctuation):
"""Returns the line cleaned of punctuation.
Param:
line (unicode)
Returns:
False if there are not any punctuation
Corrected line
"""
cleaned_line = line.translate(str.maketrans('', '', punctuation))
if line != cleaned_line:
return cleaned_line
else:
return False | 20dafde21efad966f8ea1be0da928594e2ee5cc4 | 5,384 |
from typing import Callable
def there_is_zero(
f: Callable[[float], float], head: float, tail: float, subint: int
) -> bool:
"""
Checks if the function has a zero in [head, tail], looking at subint
subintervals
"""
length = tail - head
step = length / subint
t = head
a = f(head)
for i in range(1, subint + 1):
t += step
if a * f(t) <= 0:
return True
return False | dd80c55d4be5fed2e3100672ea63862014b0f8cc | 5,385 |
def acknowledgements():
"""Provides acknowlegements for the JRO instruments and experiments
Returns
-------
ackn : str
String providing acknowledgement text for studies using JRO data
"""
ackn = ' '.join(["The Jicamarca Radio Observatory is a facility of the",
"Instituto Geofisico del Peru operated with support from",
"the NSF AGS-1433968 through Cornell University."])
return ackn | 013727319d43baaec57461995af8a683b5f02278 | 5,386 |
def list_vmachines(vdc):
"""
Returns:
list: vmachines info
"""
return vdc.to_dict()["vmachines"] | b3ce74c5b6f7d6d9f109a884f0c050ffae840e70 | 5,387 |
import os
def abspaths(paths):
"""
an wrapper function of os.path.abspath to process mutliple paths.
"""
return [os.path.abspath(p) for p in paths] | ae3591a11ee152a0ca2eb342644749c669c8d78e | 5,389 |
def zero_at(pos, size=8):
"""
Create a size-bit int which only has one '0' bit at specific position.
:param int pos: Position of '0' bit.
:param int size: Length of value by bit.
:rtype: int
"""
assert 0 <= pos < size
return 2**size - 2**(size - pos - 1) - 1 | 7ebdcc1ac9db4ad934108f67a751b336b4f18011 | 5,391 |
import torch
def get_bert_model():
"""
Load uncased HuggingFace model.
"""
bert_model = torch.hub.load('huggingface/pytorch-transformers',
'model',
'bert-base-uncased')
return bert_model | 51b49255fe4b1291d538251c8c199bd570fb1a31 | 5,392 |
def md5s_loaded(func):
"""Decorator which automatically calls load_md5s."""
def newfunc(self, *args, **kwargs):
if self.md5_map == None:
self.load_md5s()
return func(self, *args, **kwargs)
return newfunc | 9eba943b939c484280b6dca79cf79fc04337f0ab | 5,393 |
def get_categories(categories_file):
""" Group categories by image
"""
# map each category id to its name
id_to_category = {}
for category in categories_file['categories']:
id_to_category[category['id']] = category['name']
image_categories = {}
for category in categories_file['annotations']:
if category['image_id'] not in image_categories:
image_categories[category['image_id']] = []
if id_to_category[category['category_id']] not in image_categories[category['image_id']]:
image_categories[category['image_id']].append(id_to_category[category['category_id']])
return image_categories | 10377ea688c2e33195f137cc9470cadd6eb2b9e7 | 5,396 |
def unpack_uid(uid):
"""
Convert packed PFile UID to standard DICOM UID.
Parameters
----------
uid : str
packed PFile UID as a string
Returns
-------
uid : str
unpacked PFile UID as string
"""
return ''.join([str(i-1) if i < 11 else '.' for pair in [(ord(c) >> 4, ord(c) & 15) for c in uid] for i in pair if i > 0]) | cb131f3df386c40382cf70ddee5125f901de5fa8 | 5,398 |
import os
def read_links(ls):
"""Returns list of objects with source and target"""
return list(map(lambda el: {"source": el, "target": os.readlink(el)}, ls)) | 9768106043c706ed37b90de9289325f0574096db | 5,399 |
from typing import Counter
def generate_samples(n_samples, func, *args, **kwargs):
"""Call a function a bunch of times and count the results.
Args:
n_samples: Number of time to call the function.
func: The function results are counted from.
*args
**args: The arguments to pass to func.
Returns:
Counter containing results.
"""
samples = Counter()
for _ in range(n_samples):
res = func(*args, **kwargs)
samples[res] += 1
return samples | 625c2bf6713420e26704d2c2842504343be09434 | 5,400 |
def capitalize_1(string):
"""
Capitalizes a string using a combination of the upper and lower methods.
:author: jrg94
:param string: any string
:return: a string with the first character capitalized and the rest lowercased
"""
return string[0].upper() + string[1:].lower() | 9ad830a6d38e19b195cd3dff9a38fe89c49bd5c8 | 5,401 |
def get_url_and_token(string):
""" extract url and token from API format """
try:
[token, api] = string.split(":", 1)
[_, _, addr, _, port, proto] = api.split("/", 5)
url = f"{proto}://{addr}:{port}/rpc/v0"
except Exception:
raise ValueError(f"malformed API string : {string}")
return (url, token) | f3abd327c9de2d098100e539f701bf2fff1742f5 | 5,403 |
import json
def handle_error(ex, hed_info=None, title=None, return_as_str=True):
"""Handles an error by returning a dictionary or simple string
Parameters
----------
ex: Exception
The exception raised.
hed_info: dict
A dictionary of information.
title: str
A title to be included with the message.
return_as_str: bool
If true return as string otherwise as dictionary
Returns
-------
str or dict
"""
if not hed_info:
hed_info = {}
if hasattr(ex, 'error_type'):
error_code = ex.error_type
else:
error_code = type(ex).__name__
if not title:
title = ''
if hasattr(ex, 'message'):
message = ex.message
else:
message = str(ex)
hed_info['message'] = f"{title}[{error_code}: {message}]"
if return_as_str:
return json.dumps(hed_info)
else:
return hed_info | 4b7bc24c9b4fd83d39f4447e29e383d1769e6b0f | 5,405 |
def getLineagesFromChangeo(changeodb, print_summary):
"""subsets the changeo_db output by bracer by only those cells which are within lineages (non singletons)"""
df = changeodb
_df = df[df.CLONE != "None"] # get rid of unassigned cells (no BCR reconstructed)
_df = (df.CLONE.value_counts() > 1) #find clones with more than 1 member
if print_summary == True:
print( "There are", len(_df[_df == 1]), "lineages with more than one member")
CHANGEO_confidentlineages = df[df.CLONE.isin(_df[_df == 1].index)].sort_values('CLONE')
CHANGEO_confidentlineages = CHANGEO_confidentlineages[CHANGEO_confidentlineages.CLONE != 'None']
if print_summary == True:
print("number of cells in original dataframe", df.shape[0])
print("number of distinct Clones in original dataframe", df.drop_duplicates('CLONE').shape[0] -1) #subtract 1 for the 'None' entry
print(CHANGEO_confidentlineages.shape[0]/df.shape[0], 'percent of cells in a lineage' )
return CHANGEO_confidentlineages | 1a497b084118ce0993cf6509889831cab78d2a36 | 5,407 |
def command(cmd, label, env={}):
"""Create a Benchpress command, which define a single benchmark execution
This is a help function to create a Benchpress command, which is a Python `dict` of the parameters given.
Parameters
----------
cmd : str
The bash string that makes up the command
label : str
The human readable label of the command
env : dict
The Python dictionary of environment variables to define before execution'
Returns
-------
command : dict
The created Benchpress command
"""
return {'cmd': cmd,
'label': label,
'env': env} | 487e7b8518ae202756177fc103561ea03ded7470 | 5,409 |
import re
def get_text(string):
"""
normalizing white space and stripping HTML markups.
"""
text = re.sub('\s+',' ',string)
text = re.sub(r'<.*?>',' ',text)
return text | 5ef04effe14ee9b0eee90de791b3e3f3be6c15e3 | 5,410 |
import struct
def decode_ia(ia: int) -> str:
""" Decode an individual address into human readable string representation
>>> decode_ia(4606)
'1.1.254'
See also: http://www.openremote.org/display/knowledge/KNX+Individual+Address
"""
if not isinstance(ia, int):
ia = struct.unpack('>H', ia)[0]
return '{}.{}.{}'.format((ia >> 12) & 0x1f, (ia >> 8) & 0x07, (ia) & 0xff) | 6f107f47110a59ca16fe8cf1a7ef8f061bf117c7 | 5,411 |
from pathlib import Path
def _validate_magics_flake8_warnings(actual: str, test_nb_path: Path) -> bool:
"""Validate the results of notebooks with warnings."""
expected = (
f"{str(test_nb_path)}:cell_1:1:1: F401 'random.randint' imported but unused\n"
f"{str(test_nb_path)}:cell_1:2:1: F401 'IPython.get_ipython' imported but unused\n"
f"{str(test_nb_path)}:cell_3:6:21: E231 missing whitespace after ','\n"
f"{str(test_nb_path)}:cell_3:11:10: E231 missing whitespace after ','\n"
)
return actual == expected | 4baa419ad4e95bf8cc794298e70211c0fa148e5b | 5,412 |
def _split_uri(uri):
"""
Get slash-delimited parts of a ConceptNet URI.
Args:
uri (str)
Returns:
List[str]
"""
uri = uri.lstrip("/")
if not uri:
return []
return uri.split("/") | 91b48fff83041fe225a851a9e3016e3722bd9771 | 5,413 |
import time
def current_date(pattern="%Y-%m-%d %H:%M:%S"):
"""
获取当前日期
:param: pattern:指定获取日期的格式
:return: 字符串 "20200615 14:57:23"
"""
return time.strftime(pattern, time.localtime(time.time())) | 9a554e91e0842fe52f822f4403366695c92a609b | 5,414 |
def isValidPasswordPartTwo(firstIndex: int, secondIndex: int, targetLetter: str, password: str) -> int:
"""
Takes a password and returns 1 if valid, 0 otherwise. Second part of the puzzle
"""
bool1: bool = password[firstIndex - 1] == targetLetter
bool2: bool = password[secondIndex - 1] == targetLetter
return 1 if bool1 ^ bool2 else 0 | 81f85c3848909b5037f13ed641ec3a1b77dff3b1 | 5,415 |
def pre_process(cpp_line):
"""预处理"""
# 预处理
cpp_line = cpp_line.replace('\t', ' ')
cpp_line = cpp_line.replace('\n', '')
cpp_line = cpp_line.replace(';', '')
return cpp_line | 4c0db8ae834286106472aba425c45a8eeded3183 | 5,416 |
import os
def _safe_clear_dirflow(path):
"""
Safely check that the path contains ONLY folders of png files,
if any other structure, will simply ERROR out.
Parameters
----------
path: str
string to the path to the folders of data images to be used
"""
print("Clearing {}...".format(path))
assert os.path.isdir(path), "Didn't pass a folder to be cleaned"
list_dir = [f for f in os.listdir(path) if not f.startswith('.')]
for folder in list_dir:
cat_folder = os.path.join(path, folder)
assert os.path.isdir(cat_folder), \
"Dir contains Non-Folder File!"
cat_folder_item = [f for f in os.listdir(cat_folder)
if not f.startswith('.')]
for file in cat_folder_item:
# For every file, confirm is PNG or error.
# DONT DELETE YET, IN CASE OF ERRORS!
assert ".png" in file, "Folder has Non PNG Contents!"
# If we got though that with no error, then now we can delete!
# for folder in os.listdir(the_path):
# cat_folder = os.path.join(the_path, folder)
# for file in os.listdir(cat_folder):
# os.remove(os.path.join(cat_folder, file))
# os.rmdir(cat_folder)
# os.rmdir(the_path)
return True | af92454143fee21c497e1dca2c268c9cb915dbd2 | 5,418 |
from functools import cmp_to_key
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), key=cmp_to_key(f), reverse=reverse) | 7bcc6f44f02be4fb329b211b5caadf057d6d9b9a | 5,419 |
def numentries(arrays):
"""
Counts the number of entries in a typical arrays from a ROOT file,
by looking at the length of the first key
"""
return arrays[list(arrays.keys())[0]].shape[0] | e3c9f2e055f068f12039741ff9bb1091716263d5 | 5,420 |
import re
def split_range_str(range_str):
"""
Split the range string to bytes, start and end.
:param range_str: Range request string
:return: tuple of (bytes, start, end) or None
"""
re_matcher = re.fullmatch(r'([a-z]+)=(\d+)?-(\d+)?', range_str)
if not re_matcher or len(re_matcher.groups()) != 3:
return None
unit, start, end = re_matcher.groups()
start = int(start) if type(start) == str else None
end = int(end) if type(end) == str else None
return unit, start, end | a6817017d708abf774277bf8d9360b63af78860d | 5,428 |
import sys
import os
import asyncio
def runner(fun, *args):
"""
Generic asyncio.run() equivalent for Python >= 3.5
"""
if sys.version_info >= (3, 7):
if os.name == "nt" and sys.version_info < (3, 8):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
return asyncio.run(fun(*args))
if os.name == "nt":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.get_child_watcher().attach_loop(loop)
result = loop.run_until_complete(fun(*args))
loop.close()
return result | 1893c46c40e1d42fc4927b0fb3d240c209405318 | 5,430 |
def read_custom_enzyme(infile):
"""
Create a list of custom RNase cleaving sites from an input file
"""
outlist = []
with open(infile.rstrip(), 'r') as handle:
for line in handle:
if '*' in line and line[0] != '#':
outlist.append(line.rstrip())
return outlist | 144c6de30a04faa2c9381bfd36bc79fef1b78443 | 5,431 |
def min_equals_max(min, max):
"""
Return True if minimium value equals maximum value
Return False if not, or if maximum or minimum value is not defined
"""
return min is not None and max is not None and min == max | 1078e9ed6905ab8b31b7725cc678b2021fc3bc62 | 5,433 |
def infer_app_url(headers: dict, register_path: str) -> str:
"""
ref: github.com/aws/chalice#485
:return: The Chalice Application URL
"""
host: str = headers["host"]
scheme: str = headers.get("x-forwarded-proto", "http")
app_url: str = f"{scheme}://{host}{register_path}"
return app_url | f3c0d1a19c8a78a0fd2a8663e2cb86427ff8f61b | 5,435 |
import random
def mutationShuffle(individual, indpb):
"""
Inputs : Individual route
Probability of mutation betwen (0,1)
Outputs : Mutated individual according to the probability
"""
size = len(individual)
for i in range(size):
if random.random() < indpb:
swap_indx = random.randint(0, size - 2)
if swap_indx >= i:
swap_indx += 1
individual[i], individual[swap_indx] = \
individual[swap_indx], individual[i]
return individual, | dea67e03b2905f1169e1c37b3456364fb55c7174 | 5,436 |
import json
import os
import subprocess
def adjust(*args, stdin=None):
"""run 'adjust' with the current directory set to the dir where this module is found.
Return a tuple (exitcode, parsed_stdout), the second item will be None if stdout was empty. An exception is raised if the process cannot be run or parsing stdout as json fails. A non-zero exit code from the process does not trigger an exception."""
if stdin is not None and not isinstance(stdin, bytes):
stdin = json.dumps(stdin)
stdin = stdin.encode("UTF-8")
mydir = os.path.dirname(os.path.abspath(__file__))
old_dir = os.getcwd()
try:
os.chdir(mydir)
r = subprocess.run(["./adjust"]+list(args), input=stdin, stdout=subprocess.PIPE, check=False)
finally:
os.chdir(old_dir)
# on success, parse the output from the subprocess (if not empty)
if r.stdout:
# take only the last line, if there are many (this discards any 'progress' lines)
stdout = r.stdout.strip().split(b"\n")[-1]
# return r.returncode, json.loads(stdout) # direct json.loads() of bytes doesn't work before py 3.6
# print(">>>", stdout.decode("UTF-8"), "<<<")
return r.returncode, json.loads(stdout.decode("UTF-8"))
else:
return r.returncode, None | 262d757520ff97a666df62b0700391acc03a42ba | 5,437 |
import os
def read_STAR_Logfinalout(Log_final_out):
"""
Log_final_out -- Folder or Log.final.out produced by STAR
"""
if os.path.isdir(Log_final_out):
Log_final_out = Log_final_out.rstrip('/')
dirfiles = os.listdir(Log_final_out)
if 'Log.final.out' in dirfiles:
Log_final_out += '/Log.final.out'
else:
raise RuntimeError("No Log.final.out file in directory")
Mapping = {}
for line in open(Log_final_out):
if '|' in line:
left, right = line.split('|')
left = left.strip()
right = right.strip()
if 'Number of input reads' == left:
Mapping['input'] = int(right)
elif 'Average input read length' == left:
Mapping['length'] = int(right)
elif 'Uniquely mapped reads number' == left:
Mapping['uniq_num'] = int(right)
elif 'Number of reads mapped to multiple loci' == left:
Mapping['mult_num'] = int(right)
elif 'Number of reads mapped to too many loci' == left:
Mapping['toomany_num'] = int(right)
unmapped = Mapping['input'] - Mapping['uniq_num'] - Mapping['mult_num'] - Mapping['toomany_num']
Mapping['unmap'] = unmapped
return Mapping | 9599a71624f5b65092cf67fd28c9dd3d7f44e259 | 5,438 |
import socket
def validate_args(args):
""" Checks if the arguments are valid or not. """
# Is the number of sockets positive ?
if not args.number > 0:
print("[ERROR] Number of sockets should be positive. Received %d" % args.number)
exit(1)
# Is a valid IP address or valid name ?
try:
servers = socket.getaddrinfo(args.address, args.port, proto=socket.IPPROTO_TCP)
return servers[0]
except socket.gaierror as error:
print(error)
print("Please, provide a valid IPv4, IPv6 address or a valid domain name.")
exit(1) | 37a77f59ae78e3692e08742fab07f35cf6801e54 | 5,439 |
from typing import Any
def ifnone(x: Any, y: Any):
"""
returns x if x is none else returns y
"""
val = x if x is not None else y
return val | f2c7cf335ff919d610a23fac40d6af61e6a1e595 | 5,441 |
import re
def _rst_links(contents: str) -> str:
"""Convert reStructuredText hyperlinks"""
links = {}
def register_link(m: re.Match[str]) -> str:
refid = re.sub(r"\s", "", m.group("id").lower())
links[refid] = m.group("url")
return ""
def replace_link(m: re.Match[str]) -> str:
text = m.group("id")
refid = re.sub(r"[\s`]", "", text.lower())
try:
return f"[{text.strip('`')}]({links[refid]})"
except KeyError:
return m.group(0)
# Embedded URIs
contents = re.sub(
r"`(?P<text>[^`]+)<(?P<url>.+?)>`_", r"[\g<text>](\g<url>)", contents
)
# External Hyperlink Targets
contents = re.sub(
r"^\s*..\s+_(?P<id>[^\n:]+):\s*(?P<url>http\S+)",
register_link,
contents,
flags=re.MULTILINE,
)
contents = re.sub(r"(?P<id>[A-Za-z0-9_\-.:+]|`[^`]+`)_", replace_link, contents)
return contents | c7c937cdc04f9d5c3814538978062962e6407d65 | 5,442 |
def fibonacci(n: int) -> int:
"""
Calculate the nth Fibonacci number using naive recursive implementation.
:param n: the index into the sequence
:return: The nth Fibonacci number is returned.
"""
if n == 1 or n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2) | 08de1ff55f7cada6a940b4fb0ffe6ba44972b42d | 5,443 |
import subprocess
def get_current_SSID():
"""Helper function to find the WiFi SSID name.
Returns:
str: Wifi SSID name("" on Exception).
"""
try:
p = subprocess.Popen(["iwgetid", "-r"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out.strip().decode("utf-8")
except Exception:
return "" | f88021770d292cf21d7de7221efc4c0aef0099b5 | 5,444 |
def compute_reference_gradient_siemens(duration_ms, bandwidth, csa=0):
"""
Description: computes the reference gradient for exporting RF files
to SIEMENS format, assuming the gradient level curGrad is desired.
Theory: the reference gradient is defined as that gradient for which
a 1 cm slice is excited for a 5.12 ms pulse. Demanding the product
Slicethickness * gamma * gradient * duration
to be equal in both cases (reference and current), one obtains
gamma*refGrad*(10 mm)*(5.12 ms) = gamma*curGrad*curThickness*pulse.tp
However, gamma*curGrad*curThickness = the pulses's bandwidth, pulseBW, so
refGrad = pulseBW*pulse.tp / (gamma*(10 mm)*(5.12 ms))
In general, the formula is,
(pulse_duration[ms]*pulse_bandwidth[kHz])
Ref_grad [mT/m] = --------------------------------
(Gyr[kHz/mT] * Ref_slice_thickness[m]* Ref_pulse_duration[ms])
Input Variables
Variables Name Units Description
------------------------------------
duration_ms ms Duration of pulse
bandwidth kHz Bandwidth of current pulse
csa kHz Chemical shift artifact "immunity" - see below.
Optional, set to 0 if not present.
Output Variables
Variables Name Units Description
------------------------------------
ref_grad mT/m Reference gradient
Chemical Shift Artifact immunity:
Since different chemical shifts shift the excitation region, it follows
the if we want to excite a range [-x,+x], we will not actually excite
that range for any offset other than 0 if we calibrate our gradient
for 0 offset. However, we CAN calibrate our gradient for 0 offset BUT
excite a larger range [-x-dx, x+dx] such that the pulse will affect
all chemical shifts equally. This of course comes at the price of
exciting a larger region which might have unwanted signals. This however
is good for:
1. Cases in which there are not external unwanted signals.
2. For dual-band suppression pulses, one sometimes uses the PASSBAND,
which is also the VOI, to calibrate the pulse. If we don't want
any spins in the VOI affected despite their varying chemical shifts
we can grant them immunity, at the cost of pushing away the
suppression bands - this works if, e.g., we're interested in killing
off fat away from the VOI, so we don't care if a bit of signal comes
from the region close to the VOI.
To use, set CSA to the range of +-chemical shifts you want to feel the
pulse. e.g., if you want all spins +-100 Hz from resonance to be affected
equally within the VOI, set CSA = 0.1.
"""
ref_slice_thickness = 0.01 # in meters
ref_duration = 5.12 # ms
gyromagnetic_ratio = 42.57 # kHz/milliTesla
ref_grad = ((bandwidth-2*csa)*duration_ms)/(gyromagnetic_ratio*ref_slice_thickness*ref_duration)
return ref_grad | 65cf8bd8e805e37e5966170daeae90594e45595e | 5,445 |
def read_config(lines):
"""Read the config into a dictionary"""
d = {}
current_section = None
for i, line in enumerate(lines):
line = line.strip()
if len(line) == 0 or line.startswith(";"):
continue
if line.startswith("[") and line.endswith("]"):
current_section = line[1:-1]
d[current_section] = {}
else:
if "=" not in line:
raise ValueError("No = in line: {}".format(line))
key, val = line.split("=", maxsplit=1)
if key in d[current_section]:
old_val = d[current_section][key]
if type(old_val) == list:
old_val.append(val)
else:
d[current_section][key] = [old_val, val]
else:
d[current_section][key] = val
return d | 613ed9291ab6546700b991fc9a5fc301c55ae497 | 5,446 |
import logging
def _filter_all_warnings(record) -> bool:
"""Filter out credential error messages."""
if record.name.startswith("azure.identity") and record.levelno == logging.WARNING:
message = record.getMessage()
if ".get_token" in message:
return not message
return True | f16490ef39f9e3a63c791bddcba1c31176b925b7 | 5,447 |
def _parse_slice_str(slice_str):
"""Parses the given string as a multidimensional array slice and returns a
list of slice objects and integer indices."""
is_valid = False
if len(slice_str) > 2:
is_valid = slice_str[0] == "[" and slice_str[-1] == "]"
sliced_inds = []
if is_valid:
slice_str_list = [x.strip() for x in slice_str[1:-1].split(",")]
for s in slice_str_list:
parts = s.split(":")
if len(parts) > 3:
is_valid = False
break
if len(parts) == 1:
try:
sliced_inds.append(int(s))
except:
is_valid = False
break
else:
try:
start = int(parts[0]) if len(parts[0]) > 0 else None
stop = int(parts[1]) if len(parts[1]) > 0 else None
if len(parts) == 3:
step = int(parts[2]) if len(parts[2]) > 0 else None
else:
step = None
except:
is_valid = False
break
sliced_inds.append(slice(start, stop, step))
if not is_valid:
raise ValueError("Invalid slice specified: %s" % slice_str)
return sliced_inds | 6eb7a6b5d1dc2ee57e878b37be70e1e75d7d6ecc | 5,449 |
def AverageZComparison(x, y):
""" Take the average of second and third element in an array and compare
which is bigger. To be used in conjunction with the sort function. """
xsum = x[1]+x[2]
ysum = y[1]+y[2]
if xsum < ysum:
return -1
if xsum > ysum:
return 1
return 0 | 84c9e7b92df4b3e4914c769293f71790def5e4dd | 5,450 |
from pathlib import Path
def parse_taxid_names(file_path):
"""
Parse the names.dmp file and output a dictionary mapping names to taxids
(multiple different keys) and taxids to scientific names.
Parameters
----------
file_path : str
The path to the names.dmp file.
Returns
-------
name2taxid : dict
Keys are all possible names and values are taxids.
taxid2name : dict
Keys are taxids and values are scientific names.
"""
names = Path(file_path)
with names.open() as f:
lines_processed = 0
name2taxid = {}
taxid2name = {}
for line in f:
lines_processed += 1
if lines_processed % 1000000 == 0:
print('processing line', str(lines_processed))
entries = [entry.strip() for entry in line.split('|')]
name2taxid[entries[1]] = entries[0]
if 'scientific name' in line:
taxid2name[entries[0]] = entries[1]
return name2taxid, taxid2name | 1d136f73a56ac8d3c02fd53c6e7928a39440e27a | 5,451 |
def convert_dict_id_values_to_strings(dict_list):
"""This function ensures that the ``id`` keys in a list of dictionaries use string values.
:param dict_list: List (or tuple) of dictionaries (or a single dictionary) containing API object data
:type dict_list: list, tuple, dict, None
:returns: A new dictionary list with properly formatted ``id`` values
:raises: :py:exc:`TypeError`
"""
dict_list = [dict_list] if isinstance(dict_list, dict) else dict_list
new_dict_list = []
for single_dict in dict_list:
if not isinstance(single_dict, dict):
raise TypeError("The 'dict_list' argument must be a dictionary or a list of dictionaries.")
if 'id' in single_dict and not isinstance(single_dict.get('id'), str):
single_dict['id'] = str(single_dict.get('id'))
new_dict_list.append(single_dict)
return new_dict_list | 7d1348910e5802c928b94bc74d71f3ce35770215 | 5,453 |
import os
def FileExtensionMatch(filePath, supportedFileTypeList):
"""
Check whether the file extension matches any of the supported file types.
Parameters
----------
filePath : string
File path
supportedFileTypeList : list
List of supported file extensions
"""
return (os.path.splitext(filePath)[1] in supportedFileTypeList) | bdab68917ead387269f52a51465f500a581967f6 | 5,454 |
from typing import List
def count_pairs(array: List[int], difference: int) -> int:
"""
Given an array of integers, count the number of unique pairs of integers that have a given difference.
These pairs are stored in a set in order to remove duplicates.
Time complexity: O(n^2).
:param array: is the array to count.
:param difference: is the difference between two elements.
:return: the number of unique pairs of integers that have a given difference.
"""
pairs = set()
for i in range(len(array)):
for j in range(len(array)):
if array[i] - array[j] == difference:
pairs.add((array[i], array[j]))
return len(pairs) | e027e8885f4c4531da9b7dab7de8e84a7004c913 | 5,457 |
from json import dumps
def GetJson(data):
"""
将对象转换为JSON
@data 被转换的对象(dict/list/str/int...)
"""
if data == bytes: data = data.decode('utf-8')
return dumps(data) | 372b501f5ada7254efab10447dcbdc91c8799408 | 5,458 |
def binary_tail(n: int) -> int:
""" The last 1 digit and the following 0s of a binary representation, as a number """
return ((n ^ (n - 1)) + 1) >> 1 | 63460cef7b39b7e7ee2ec880810ff71d82be01e9 | 5,459 |
from datetime import datetime
def time_left(expire_date):
"""Return remaining days before feature expiration or 0 if expired."""
today_dt = datetime.today()
expire_dt = datetime.strptime(expire_date, "%d-%b-%Y")
# Calculate remaining days before expiration
days_left_td = expire_dt - today_dt
days_left = days_left_td.days
if days_left <= 0:
days_left = 0
return days_left | 652acd27b0d4fa9b21321df4ff8ce6ce15b97ed6 | 5,460 |
def video_id(video_id_or_url):
"""
Returns video id from given video id or url
Parameters:
-----------
video_id_or_url: str - either a video id or url
Returns:
--------
the video id
"""
if 'watch?v=' in video_id_or_url:
return video_id_or_url.split('watch?v=')[1]
else:
# assume we already have an video id
return video_id_or_url | 9f680ac621e1f5c6314a6a3e97093d786fa7ea33 | 5,461 |
import typing
def cast_to_str(some_value: typing.Any, from_type: typing.Any) -> typing.Any:
"""Just helper for creating suitable test assets."""
if from_type == bytes:
return some_value.decode()
return str(some_value) | 9157873a74d0d02b919d047710c1d4ccee4121a6 | 5,462 |
def sum_errors(dic):
"""Helper function to sum up number of failed jobs per host.
Assumes that dic is in the form
:param dict dic: {"error_code1":count1, "error_code2":count2, etc.}
:return int: Sum of all values in dic
"""
return sum(value for key, value in dic.iteritems()) | 0d2bc9df58e5bf9639a331d64061de4c0a5aa4ed | 5,463 |
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items | ed3b0c8a32de8d88fc24b8bb08012a0900b37823 | 5,464 |
def tex_initial_states(data):
"""Initial states are texed."""
initial_state = []
initial_state = [''.join(["\lstick{\ket{", str(data['init'][row]),"}}"]) for row in range(len(data['init']))]
return data, initial_state | cd1758b594ee854cfb7854ec742dc177a43b54b7 | 5,465 |
def FanOut(num):
"""Layer construction function for a fan-out layer."""
init_fun = lambda rng, input_shape: ([input_shape] * num, ())
apply_fun = lambda params, inputs, **kwargs: [inputs] * num
return init_fun, apply_fun | 7e6d07319be600dabf650a4b87f661bf20832455 | 5,466 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.