content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def binary_search(sorted_list, item):
"""
Implements a Binary Search, O(log n).
If item is is list, returns amount of steps.
If item not in list, returns None.
"""
steps = 0
start = 0
end = len(sorted_list)
while start < end:
steps += 1
mid = (start + end) // 2
# print("#", mid)
if sorted_list[mid] == item:
return steps
# If the item is lesser than the list
# item == 3 and sorted_list == [1, 2, 3, 4, 5, 6, 8]
# the END of my list becomes the middle (4), excluding all items from the middle to the end
# end == 4
# next time, when mid = (start + end) // 2 executes, mid == 2
if sorted_list[mid] > item:
end = mid
# If the item is bigger than the list
# item == 8 and sorted_list == [1, 2, 3, 4, 5, 6, 8]
# the START of my list will be the middle (4) plus 1, excluding all items from the middle to the begginning
# start == 5
# next time, when mid = (start + end) // 2 executes, mid == 8
if sorted_list[mid] < item:
start = mid + 1
return None | 30b1bba330752455d932b4c6cf1ad4dab5969db3 | 707,987 |
def unpack_batch(batch, use_cuda=False):
""" Unpack a batch from the data loader. """
input_ids = batch[0]
input_mask = batch[1]
segment_ids = batch[2]
boundary_ids = batch[3]
pos_ids = batch[4]
rel_ids = batch[5]
knowledge_feature = batch[6]
bio_ids = batch[1]
# knowledge_adjoin_matrix = batch[7]
# know_segment_ids = batch[6]
# know_input_ids = batch[7]
# know_input_mask = batch[8]
# knowledge_feature = (batch[6], batch[7], batch[8])
return input_ids, input_mask, segment_ids, boundary_ids, pos_ids, rel_ids, knowledge_feature,bio_ids | 6bc8bc9b3c8a9e2b40ac08e67c9fbcf84914e2eb | 707,989 |
def truncate(text: str, length: int = 255, end: str = "...") -> str:
"""Truncate text.
Parameters
---------
text : str
length : int, default 255
Max text length.
end : str, default "..."
The characters that come at the end of the text.
Returns
-------
truncated text : str
Examples
--------
.. code-block:: html
<meta property="og:title" content="^^ truncate(title, 30) ^^">"""
return f"{text[:length]}{end}" | f14605542418ca95e4752be7ec2fea189b9454ce | 707,990 |
def get_parameter_by_name(device, name):
""" Find the given device's parameter that belongs to the given name """
for i in device.parameters:
if i.original_name == name:
return i
return | 9669262a9bcac8b4c054e07b2c04b780b5f84f87 | 707,994 |
import mpmath
def pdf(x, nu, sigma):
"""
PDF for the Rice distribution.
"""
if x <= 0:
return mpmath.mp.zero
with mpmath.extradps(5):
x = mpmath.mpf(x)
nu = mpmath.mpf(nu)
sigma = mpmath.mpf(sigma)
sigma2 = sigma**2
p = ((x / sigma2) * mpmath.exp(-(x**2 + nu**2)/(2*sigma2)) *
mpmath.besseli(0, x*nu/sigma2))
return p | b2d96bc19fb61e5aaf542b916d06c11a0e3dea46 | 708,002 |
def make_adder(n):
"""Return a function that takes one argument k and returns k + n.
>>> add_three = make_adder(3)
>>> add_three(4)
7
"""
def adder(k):
return k + n
return adder | 64808cb857f7bd17c8c81bfd749ed96efcc88a9f | 708,004 |
import torch
from typing import Union
from typing import Tuple
def groupby_apply(
keys: torch.Tensor, values: torch.Tensor, bins: int = 95, reduction: str = "mean", return_histogram: bool = False
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Groupby apply for torch tensors
Args:
keys: tensor of groups (``0`` to ``bins``)
values: values to aggregate - same size as keys
bins: total number of groups
reduction: either "mean" or "sum"
return_histogram: if to return histogram on top
Returns:
tensor of size ``bins`` with aggregated values and optionally with counts of values
"""
if reduction == "mean":
reduce = torch.mean
elif reduction == "sum":
reduce = torch.sum
else:
raise ValueError(f"Unknown reduction '{reduction}'")
uniques, counts = keys.unique(return_counts=True)
groups = torch.stack([reduce(item) for item in torch.split_with_sizes(values, tuple(counts))])
reduced = torch.zeros(bins, dtype=values.dtype, device=values.device).scatter(dim=0, index=uniques, src=groups)
if return_histogram:
hist = torch.zeros(bins, dtype=torch.long, device=values.device).scatter(dim=0, index=uniques, src=counts)
return reduced, hist
else:
return reduced | 711acc0cf2eb30e978f7f30686dbf67644d51fb0 | 708,005 |
from typing import Tuple
def load_forcings_gauge_metadata(path: str) -> Tuple[float, float, float]:
"""
Loads gauge metadata from the header of a CAMELS-USE forcings file.
Parameters
----------
path: str
Path to the forcings file.
Returns
-------
tuple
(gauge latitude, gauge elevation, basin area [m²])
"""
with open(path, 'r') as file:
latitude = float(file.readline())
elevation = float(file.readline())
area = float(file.readline())
return latitude, elevation, area | c91c3bafb83709967d6dd480afd8e53ac9f94445 | 708,008 |
def append_artist(songs, artist):
"""
When the songs gathered from the description just contains the
titles of the songs usually means it's an artist's album.
If an artist was provided appends the song title to the artist
using a hyphen (artist - song)
:param list songs: List of song titles (only song title)
:param str artist: Artist to search for with the song names
:return list: song titles along with the artist
"""
songs_complete = []
for song in songs:
song_complete = f'{artist} - {song}'
songs_complete.append(song_complete)
return songs_complete | b3fbda311849f68ab01c2069f44ea0f694365270 | 708,010 |
def get_missing_columns(missing_data):
"""
Returns columns names as list that containes missing data
:param
missing_data : return of missing_data(df)
:return
list: list containing columns with missing data
"""
missing_data = missing_data[missing_data['percent'] > 0]
missing_columns = missing_data.index.tolist()
return missing_columns | 80feccec6148a417b89fb84f4c412d9ea4d0dd37 | 708,011 |
def get_function_name(fcn):
"""Returns the fully-qualified function name for the given function.
Args:
fcn: a function
Returns:
the fully-qualified function name string, such as
"eta.core.utils.function_name"
"""
return fcn.__module__ + "." + fcn.__name__ | ae186415225bd5420de7f7b3aef98480d30d59f8 | 708,012 |
def clean_cases(text):
"""
Makes text all lowercase.
:param text: the text to be converted to all lowercase.
:type: str
:return: lowercase text
:type: str
"""
return text.lower() | 9b0c931336dbf762e5e3a18d103706ddf1e7c14f | 708,013 |
def gradient_of_rmse(y_hat, y, Xn):
"""
Returns the gradient of the Root Mean Square error with respect to the
parameters of the linear model that generated the prediction `y_hat'.
Hence, y_hat should have been generated by a linear process of the form
Xn.T.dot(theta)
Args:
y_hat (np.array of shape N,): The predictions of the linear model
y (np.array of shape N,): The "ground-truth" values.
Returns:
The RMSE between y_hat and y
"""
N = y.shape[0]
assert N > 0, ('At least one sample is required in order to compute the '
'RMSE loss')
losses = y - y_hat
gradient = - 2 * Xn.T.dot(losses) / N
return gradient | 73a46197f90cf1b9c0a90a8ce2d2eae006c6d002 | 708,016 |
def align_down(x: int, align: int) -> int:
"""
Align integer down.
:return:
``y`` such that ``y % align == 0`` and ``y <= x`` and ``(x - y) < align``
"""
return x - (x % align) | 8144309badf601999f4c291ee3af5cfbd18397ea | 708,017 |
import glob
import csv
def write_colocated_data_time_avg(coloc_data, fname):
"""
Writes the time averaged data of gates colocated with two radars
Parameters
----------
coloc_data : dict
dictionary containing the colocated data parameters
fname : str
file name where to store the data
Returns
-------
fname : str
the name of the file where data has written
"""
filelist = glob.glob(fname)
if not filelist:
with open(fname, 'w', newline='') as csvfile:
csvfile.write('# Colocated radar gates data file\n')
csvfile.write('# Comment lines are preceded by "#"\n')
csvfile.write('#\n')
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
else:
with open(fname, 'a', newline='') as csvfile:
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
return fname | 2e786c6df8a617f187a7b50467111785342310c5 | 708,019 |
def get_trail_max(self, rz_array=None):
"""
Return the position of the blob maximum. Either in pixel or in (R,Z) coordinates if rz_array
is passed.
"""
if (rz_array is None):
return self.xymax
# Remember xycom[:,1] is the radial (X) index which corresponds to R
return rz_array[self.xymax[:,0].astype('int'), self.xymax[:,1].astype('int'), :] | 5456c95ba4cb02352aa69398f9fa5307f3dc8e06 | 708,027 |
from typing import Tuple
from typing import List
def create_annotation(annotation_id: int, image_id: int, category_id: int, is_crowd: int, area: int,
bounding_box: Tuple[int, int, int, int], segmentation: List[Tuple[int, int]]) -> dict:
"""
Converts input data to COCO annotation information storing format.
:param int annotation_id: unique identificator of the annotation
:param int image_id: identificator of related image
:param int category_id: identificator of related category (annotation class)
:param int is_crowd:
"iscrowd": 0 if your segmentation based on polygon (object instance)
"iscrowd": 1 if your segmentation based uncompressed RLE (crowd)
:param float area: area occupied by segmentation in pixels
:param Tuple[float, float, float, float] bounding_box:
coordinates of bbox in format (x,y,w,h)
:param list segmentation: polygon coordinates
:return: dict of the annotation information in COCO format
"""
return {
"id": annotation_id,
"image_id": image_id,
"category_id": category_id,
"iscrowd": is_crowd,
"area": area, # float
"bbox": bounding_box, # [x,y,width,height]
"segmentation": segmentation # [polygon]
} | 715a6204ed5dd9b081ac6e87541df3cd46d329a1 | 708,029 |
import zipfile
def zip_to_gdal_path(filepath):
"""
Takes in a zip filepath and if the zip contains files
ascii files, prepend '/viszip' to the path
so that they can be opened using GDAL without extraction.
"""
zip_file_list = []
if zipfile.is_zipfile(filepath):
try:
zip_file = zipfile.ZipFile(filepath)
zip_file_contents = ['/vsizip/{0}/{1}'.format(filepath, zip_info_object.filename) for zip_info_object in zip_file.filelist if zip_info_object.filename.endswith('.asc')]
zip_file_list.extend(zip_file_contents)
zip_file.close()
except zipfile.BadZipfile:
pass
return zip_file_list | 9e9e44d6eb3022ebe982cc44284da76f56a4ddeb | 708,034 |
def is_eval_epoch(cfg, cur_epoch):
"""
Determine if the model should be evaluated at the current epoch.
Args:
cfg (CfgNode): configs. Details can be found in
sgs/config/defaults.py
cur_epoch (int): current epoch.
"""
return (
cur_epoch + 1
) % cfg.TRAIN.EVAL_PERIOD == 0 or cur_epoch + 1 == cfg.SOLVER.MAX_EPOCH | d8abb04409879b88bdfd32cf323bcbea037ae630 | 708,035 |
def bin2hexstring(bin_str):
"""
二进制串转十六进制串,按照 4:1 比例转换
:param bin_str: 二进制串
:return: 十六进制串
"""
bin_len = len(bin_str)
left = 0
right = 4
re_str = hex(int(bin_str[left:right], 2))[2:]
for i in range(right, bin_len, 4):
left = right
right += 4
re_str += hex(int(bin_str[left:right], 2))[2:]
return re_str | 823ba4ef86ebcf7e30a29c3718768c6a654acad5 | 708,040 |
def prepare_hex_string(number, base=10):
"""
Gets an int number, and returns the hex representation with even length padded to the left with zeroes
"""
int_number = int(number, base)
hex_number = format(int_number, 'X')
# Takes the string and pads to the left to make sure the number of characters is even
justify_hex_number = hex_number.rjust((len(hex_number) % 2) + len(hex_number), '0')
return justify_hex_number | e6efeca87d5f0a603c8fdb65fd7e2d07cc491766 | 708,046 |
def plotly_figure(figure, id: str):
"""
:param figure: plotly graph object or px figure
:param id: unique id string of format 'id_xxx' with x representin a number
:return: html style string containing a plotly figure
"""
json_figure = figure.to_json()
html = """
<div id="""+id+"""></div>
<script>
var plotly_data = {}
Plotly.react("""+id+""", plotly_data.data, plotly_data.layout);
</script>
"""
local_text = html.format(json_figure)
return local_text | 949415c70d467c48ee3aa1f028c9e3539099febf | 708,047 |
def _add_resources_to_vault_obj(obj, data, columns):
"""Add associated resources to column and data tuples
"""
i = 0
for s in obj.resources:
if obj.resources[i].id:
name = 'resource_id_' + str(i + 1)
data += (obj.resources[i].id,)
columns = columns + (name,)
name = 'resource_type_' + str(i + 1)
data += (obj.resources[i].type,)
columns = columns + (name,)
i += 1
return data, columns | 3a6dd7541ac853a7c62b638abf4d0eeb21bb6cb2 | 708,048 |
def classify_helmet_belt_worn(x):
"""
This function returns a strinig representation of the int value of the field which specifies whether the
person was wearing a setabelt or a helmet. This specification is from the Road Crash Statistics Victoria , 2013 Edition
document.
:param x: int value representing the classify helmet belt worn field
:return: string representation of the integer value
"""
if x == 1:
return 'Seatbelt Worn'
elif x == 2:
return 'Seatbelt Not Worn'
elif x == 3:
return 'Child Restraint Worn'
elif x == 4:
return 'Child Restraint Not Worn'
elif x == 5:
return 'Seatbelt/restraint Not fitted'
elif x == 6:
return 'Crash Helmet Worn'
elif x == 7:
return 'Crash Helmet Not Worn'
elif x == 8:
return 'Not Appropriate'
else:
return 'Not Known' | cba05be8d03c933e767a75400032d07e296e0ec3 | 708,049 |
import collections
def sort_dataset_by_len(dataset):
"""
returns a dict mapping length -> list of items of that length
an OrderedDict is used to that the mapping is sorted from smallest to largest
"""
sorted_dataset = collections.OrderedDict()
lengths = sorted(list(set(len(x[1]) for x in dataset)))
for l in lengths:
sorted_dataset[l] = []
for item in dataset:
sorted_dataset[len(item[1])].append(item)
return sorted_dataset | 1e67da963c6d968fba39730cc33e100242fcafca | 708,050 |
import click
def optional_tools_or_packages_arg(multiple=False):
""" Decorate click method as optionally taking in the path to a tool
or directory of tools or a Conda package. If no such argument is given
the current working directory will be treated as a directory of tools.
"""
name = "paths" if multiple else "path"
nargs = -1 if multiple else 1
return click.argument(
name,
metavar="TARGET",
nargs=nargs,
) | 4a34da51b4a644df70c5ce3ea8afb8b86ae2281d | 708,052 |
def compare_files(file_name1, file_name2):
"""
Compare two files, line by line, for equality.
Arguments:
file_name1 (str or unicode): file name.
file_name2 (str or unicode): file name.
Returns:
bool: True if files are equal, False otherwise.
"""
with open(file_name1) as file1, open(file_name2) as file2:
for line1, line2 in zip(file1, file2):
if line1 != line2:
file1.close()
file2.close()
return False
file1.close()
file2.close()
return True | 3f77cf177ba60ddd121b95648379fff845d9877b | 708,055 |
import collections
def sort_dict(d, key=None, reverse=False):
"""
Sorts a dict by value.
Args:
d: Input dictionary
key: Function which takes an tuple (key, object) and returns a value to
compare and sort by. By default, the function compares the values
of the dict i.e. key = lambda t : t[1]
reverse: Allows to reverse sort order.
Returns:
OrderedDict object whose keys are ordered according to their value.
"""
kv_items = list(d.items())
# Sort kv_items according to key.
if key is None:
kv_items.sort(key=lambda t: t[1], reverse=reverse)
else:
kv_items.sort(key=key, reverse=reverse)
# Build ordered dict.
return collections.OrderedDict(kv_items) | 9ca904a5e0df3e3c50b29967adfe9061e778dfc9 | 708,058 |
def getReviewRedirect(entity, params):
"""Returns the redirect to review the specified entity.
"""
return '/%s/review/%s' % (
params['url_name'], entity.key().id_or_name()) | 959ff6d0297ec54248ee725e93a79702512d00d7 | 708,059 |
def read_number(dtype, prompt='', floor=None, ceil=None, repeat=False):
""" Reads a number within specified bounds. """
while True:
try:
result = dtype(input(prompt))
if floor is not None and result < floor:
raise ValueError(f'Number must be no less than {floor}.')
if ceil is not None and result > ceil:
raise ValueError(f'Number must be no greater than {ceil}.')
except ValueError as e:
print(e)
result = None
if result is not None or not repeat:
return result | a528b1f5912ba4bab0b87c87004311778eaa8187 | 708,066 |
def isID(value):
"""Checks if value looks like a Ulysses ID; i.e. is 22 char long.
Not an exact science; but good enougth to prevent most mistakes.
"""
return len(value) == 22 | 527db9446adc2b88c2117bd35c74474c3e7bad24 | 708,070 |
def decrement(x):
"""Given a number x, returns x - 1 unless that would be less than
zero, in which case returns 0."""
x -= 1
if x < 0:
return 0
else:
return x | 56b95324c147a163d3bdd0e9f65782095b0a4def | 708,073 |
def dump_tuple(tup):
"""
Dump a tuple to a string of fg,bg,attr (optional)
"""
return ','.join(str(i) for i in tup) | ffa4838e2794da9d525b60f4606633f8940480bb | 708,078 |
import json
def json_formatter(result, verbose=False, indent=4, offset=0):
"""Format result as json."""
string = json.dumps(result, indent=indent)
string = string.replace("\n", "\n" + " "*offset)
return string | 512847722fa36eff408ac28d6e3dc8fde5c52af1 | 708,080 |
from typing import Any
from typing import Counter
def calc_proportion_identical(lst: Any) -> float:
"""
Returns a value between 0 and 1 for the uniformity of the values
in LST, i.e. higher if they're all the same.
"""
def count_most_common(lst):
"""
Find the most common item in LST, and count how many times it occurs.
"""
# Counter(['a', 'b', 'a']).most_common(2) -> [
# ('a', 2),
# ('b', 1),
# ]
# so this gives the count of the most common (in this case 2 occurrences of 'a')
return Counter(lst).most_common(1)[0][1]
most_common = count_most_common(lst)
if most_common == 1:
return 0
else:
return most_common / len(lst) | adf467eba11694c5ea4583d7b53029110e59e25a | 708,081 |
def _singleton(name):
"""Returns a singleton object which represents itself as `name` when printed,
but is only comparable (via identity) to itself."""
return type(name, (), {'__repr__': lambda self: name})() | b07003e1716115864bf1914d4b523b36d0f0471f | 708,084 |
import pickle
def fetch_pickle(filename):
"""
Fetches any variable saved into a picklefile with the given filename.
Parameters:
filename (str): filename of the pickle file
Returns:
variable (any pickle compatible type): variable that was saved into the picklefile.
"""
with open(filename, 'rb') as picklefile:
variable = pickle.load(picklefile)
return variable | 172c18520619d102b520658949d2464d5ecfb05c | 708,085 |
def format_stats(stats):
"""Format statistics for printing to a table"""
result = ''
for key, value in stats.items():
result += f'{key} - {value}\n'
return result[:-1] | 2d01b6c48b83f8e8810f4609183b39fad871f942 | 708,087 |
def timestamp2str(ts):
""" Converts Timestamp object to str containing date and time
"""
date = ts.date().strftime("%Y-%m-%d")
time = ts.time().strftime("%H:%M:%S")
return ' '.join([date, time]) | 0e847a8af0cbbacf18df911e3070ac7c70e504b7 | 708,088 |
def parse_test_config(doc):
""" Get the configuration element. """
test_config = doc.documentElement
if test_config.tagName != 'configuration':
raise RuntimeError('expected configuration tag at root')
return test_config | c61c2f4e43c5501c461bb92b63609162b2918860 | 708,093 |
import textwrap
def _get_control_vars(control_vars):
"""
Create the section of control variables
Parameters
----------
control_vars: str
Functions to define control variables.
Returns
-------
text: str
Control variables section and header of model variables section.
"""
text = textwrap.dedent("""
##########################################################################
# CONTROL VARIABLES #
##########################################################################
def _init_outer_references(data):
for key in data:
__data[key] = data[key]
def time():
return __data['time']()
""")
text += control_vars
text += textwrap.dedent("""
##########################################################################
# MODEL VARIABLES #
##########################################################################
""")
return text | 614a6ca5bc8ac7354f63bfceabaff4eb4b93208a | 708,094 |
def rgb2hex(rgb):
"""Converts an RGB 3-tuple to a hexadeximal color string.
EXAMPLE
-------
>>> rgb2hex((0,0,255))
'#0000FF'
"""
return ('#%02x%02x%02x' % tuple(rgb)).upper() | 4c3323e34fcd2c1b4402ebe5f433c5fd9320cce9 | 708,098 |
def create_table(p, table_name, schema):
"""Create a new Prism table.
Parameters
----------
p : Prism
Instantiated Prism class from prism.Prism()
table_name : str
The name of the table to obtain details about. If the default value
of None is specified, details regarding first 100 tables is returned.
schema : list
A list of dictionaries containing the schema
Returns
-------
If the request is successful, a dictionary containing information about
the table is returned.
"""
p.create_bearer_token()
table = p.create_table(table_name, schema=schema)
return table | 43c8c789d4e212d2d98d68f4f22e3f0fb0a97552 | 708,100 |
def bdev_nvme_add_error_injection(client, name, opc, cmd_type, do_not_submit, timeout_in_us,
err_count, sct, sc):
"""Add error injection
Args:
name: Name of the operating NVMe controller
opc: Opcode of the NVMe command
cmd_type: Type of NVMe command. Valid values are: admin, io
do_not_submit: Do not submit commands to the controller
timeout_in_us: Wait specified microseconds when do_not_submit is true
err_count: Number of matching NVMe commands to inject errors
sct: NVMe status code type
sc: NVMe status code
Returns:
True on success, RPC error otherwise
"""
params = {'name': name,
'opc': opc,
'cmd_type': cmd_type}
if do_not_submit:
params['do_not_submit'] = do_not_submit
if timeout_in_us:
params['timeout_in_us'] = timeout_in_us
if err_count:
params['err_count'] = err_count
if sct:
params['sct'] = sct
if sc:
params['sc'] = sc
return client.call('bdev_nvme_add_error_injection', params) | 3833256e71f47a49eef2643bf8c244308795a0b1 | 708,108 |
def prune_deg_one_nodes(sampled_graph):
""" prune out degree one nodes from graph """
deg_one_nodes = []
for v in sampled_graph.nodes():
if sampled_graph.degree(v) == 1:
deg_one_nodes.append(v)
for v in deg_one_nodes:
sampled_graph.remove_node(v)
return sampled_graph | c4df72a66c6fb57d5d42a1b877a846338f32f42a | 708,110 |
def run_sgd(model, epochs):
"""
Runs SGD for a predefined number of epochs and saves the resulting model.
"""
print("Training full network")
weights_rand_init = model.optimize(epochs=epochs)
# weights_rand_init = model.optimize(epochs=epochs, batch_size=55000, learning_rate=0.1)
print("Model optimized!!!")
return [model.get_model_weights(), weights_rand_init] | 14c6fd1ffa8aab3a783b5738093d69771d036411 | 708,111 |
def processed_transcript(df):
"""
Cleans the Transcript table by splitting value fileds and replacing nan values, drop extra columns
PARAMETERS:
transcript dataframe
RETURNS:
Cleaned transcript dataframe
"""
#expand the dictionary to coulmns (reward, amount, offre id) from value field
df['offer_id'] = df['value'].apply(lambda x: x.get('offer_id'))
df['offer id'] = df['value'].apply(lambda x: x.get('offer id'))
df['reward'] = df['value'].apply(lambda x: x.get('reward'))
df['amount'] = df['value'].apply(lambda x: x.get('amount'))
#move 'offer id' values into 'offer_id'
df['offer_id'] = df.apply(lambda x : x['offer id'] if x['offer_id'] == None else x['offer_id'], axis=1)
#drop 'offer id' column
df.drop(['offer id' , 'value'] , axis=1, inplace=True)
#replace nan
df.fillna(0 , inplace=True)
return df | 452668d6d9616ca382f7968e0ac4dd52658be9f6 | 708,113 |
def matrixmult (A, B):
"""Matrix multiplication function
This function returns the product of a matrix multiplication given two matrices.
Let the dimension of the matrix A be: m by n,
let the dimension of the matrix B be: p by q,
multiplication will only possible if n = p,
thus creating a matrix of m by q size.
Parameters
----------
A : list
First matrix, in a 2D array format.
B : list
Second matrix, in a 2D array format.
Returns
-------
C : list
The product of the matrix multiplication.
Examples
--------
>>> from .pycgmStatic import matrixmult
>>> A = [[11,12,13],[14,15,16]]
>>> B = [[1,2],[3,4],[5,6]]
>>> matrixmult(A, B)
[[112, 148], [139, 184]]
"""
C = [[0 for row in range(len(A))] for col in range(len(B[0]))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k]*B[k][j]
return C | 98065981c8047d927bacb07877dbf173ba379159 | 708,116 |
def next_code(value: int, mul: int = 252533, div: int = 33554393) -> int:
"""
Returns the value of the next code given the value of the current code
The first code is `20151125`.
After that, each code is generated by taking the previous one, multiplying it by `252533`,
and then keeping the remainder from dividing that value by `33554393`
"""
return (value * mul) % div | a9e5183e405574cc56a138a244f14de08ea68d00 | 708,117 |
def read_csv_to_lol(full_path, sep=";"):
"""
Read csv file into lists of list.
Make sure to have a empty line at the bottom
"""
with open(full_path, 'r') as ff:
# read from CSV
data = ff.readlines()
# New line at the end of each line is removed
data = [i.replace("\n", "") for i in data]
# Creating lists of list
data = [i.split(sep) for i in data]
return data | e53c46c6a8eabaece788111530fbf859dd23133f | 708,118 |
import random
import string
def generate_random_string(N):
"""
Generate a random string
Parameters
-------------
N
length of the string
Returns
-------------
random_string
Random string
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N)) | 3e2e672140e18546260a0882fa6cf06073bdf8e7 | 708,121 |
import re
def extract_charm_name_from_url(charm_url):
"""Extract the charm name from the charm url.
E.g. Extract 'heat' from local:bionic/heat-12
:param charm_url: Name of model to query.
:type charm_url: str
:returns: Charm name
:rtype: str
"""
charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1])
return charm_name.split(':')[-1] | 9905d6b5c7a2f5047bc939d1b6e23d128ee8984d | 708,122 |
def class_name(service_name: str) -> str:
"""Map service name to .pyi class name."""
return f"Service_{service_name}" | b4bed8a677f9eedfcd66d6d37078075b0967ea20 | 708,123 |
def is_number(s):
"""
Check if it is a number.
Args:
s: The variable that needs to be checked.
Returns:
bool: True if float, False otherwise.
"""
try:
float(s)
return True
except ValueError:
return False | 071aeac26a5a907caf1764dc20d7de1c6408714b | 708,125 |
def _GetTombstoneData(device, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return device.old_interface.GetProtectedFileContents(
'/data/tombstones/' + tombstone_file) | 99322ea3d67e150f4433c713159eb7bc8069271f | 708,127 |
import random
def giveHint(indexValue, myBoard):
"""Return a random matching card given the index of a card
and a game board"""
validMatches = []
card = myBoard[indexValue]
for c in myBoard:
if (card[0] == c[0]) and (myBoard.index(c) != indexValue):
validMatches.append(myBoard.index(c))
return random.choice(validMatches) | e578f40e7d7e2e17ddac53f9cfdc219e47c861cd | 708,129 |
def _parse_none(arg, fn=None):
"""Parse arguments with support for conversion to None.
Args:
arg (str): Argument to potentially convert.
fn (func): Function to apply to the argument if not converted to None.
Returns:
Any: Arguments that are "none" or "0" are converted to None;
otherwise, returns the original value.
"""
if arg.lower() in ("none", "0"):
return None
return arg if fn is None else fn(arg) | 4ebd283eb9e2218e523ba185c4500c9879d5719d | 708,135 |
def get_unique_tokens(texts):
"""
Returns a set of unique tokens.
>>> get_unique_tokens(['oeffentl', 'ist', 'oeffentl'])
{'oeffentl', 'ist'}
"""
unique_tokens = set()
for text in texts:
for token in text:
unique_tokens.add(token)
return unique_tokens | f9c174b264082b65a328fd9edf9421e7ff7808a2 | 708,140 |
def moved_in(nn_orig, nn_proj, i, k):
"""Determine points that are neighbours in the projection space,
but were not neighbours in the original space.
nn_orig
neighbourhood matrix for original data
nn_proj
neighbourhood matrix for projection data
i
index of the point considered
k
size of the neighbourhood considered
Return a list of indices for points which are 'moved in' to point i
"""
pp = list(nn_proj[i, 1:k + 1])
oo = list(nn_orig[i, 1:k + 1])
for j in oo:
if (j in oo) and (j in pp):
pp.remove(j)
return pp | b63a9b0f53554032fc920aeaf6d3d76b93dd8ab3 | 708,141 |
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
val: float or int
src: tuple
dst: tuple
example: print(scale(99, (0.0, 99.0), (-1.0, +1.0)))
"""
return (float(val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0] | 26cfaccaeea861ccecb36697838710c0ab706520 | 708,145 |
def generate_paths(data, path=''):
"""Iterate the json schema file and generate a list of all of the
XPath-like expression for each primitive value. An asterisk * represents
an array of items."""
paths = []
if isinstance(data, dict):
if len(data) == 0:
paths.append(f'{path}')
else:
for key, val in data.items():
if key == 'type':
if isinstance(val, list):
types = set(val)
else:
types = {val}
if types.isdisjoint({'object', 'array'}):
paths.append(f'{path}')
elif key == 'properties':
paths.extend(generate_paths(val, path))
else:
if key == 'items':
key = '*'
paths.extend(generate_paths(val, f'{path}/{key}'))
return paths | 367f244b44c254b077907ff8b219186bd820fccd | 708,155 |
def make1d(u, v, num_cols=224):
"""Make a 2D image index linear.
"""
return (u * num_cols + v).astype("int") | 1f37c7ae06071ce641561eadc1d0a42a0b74508d | 708,158 |
import copy
def permutationwithparity(n):
"""Returns a list of all permutation of n integers, with its first element being the parity"""
if (n == 1):
result = [[1,1]]
return result
else:
result = permutationwithparity(n-1)
newresult = []
for shorterpermutation in result:
for position in range(1,n+1):
parity = shorterpermutation[0]
for swaps in range(n-position):
parity = - parity
newpermutation = copy.deepcopy(shorterpermutation)
newpermutation.insert(position,n)
newpermutation[0] = parity
newresult.append(newpermutation)
return newresult | 218b728c2118a8cca98c019dff036e0ae2593974 | 708,161 |
def to_list(obj):
"""List Converter
Takes any object and converts it to a `list`.
If the object is already a `list` it is just returned,
If the object is None an empty `list` is returned,
Else a `list` is created with the object as it's first element.
Args:
obj (any object): the object to be converted
Returns:
A list containing the given object
"""
if isinstance(obj, list):
return obj
elif isinstance(obj, tuple):
return list(obj)
elif obj is None:
return []
else:
return [obj, ] | 3ca373867ea3c30edcf7267bba69ef2ee3c7722e | 708,163 |
def train_test_split(df, frac):
"""
Create a Train/Test split function for a dataframe and return both
the Training and Testing sets.
Frac refers to the percent of data you would like to set aside
for training.
"""
frac = round(len(df)*frac)
train = df[:frac]
test = df[frac:]
return train, test | 8e233e017a261141f57f7b2bff9a527e275d2ed9 | 708,169 |
from typing import Dict
from typing import Any
from typing import Optional
def _add_extra_kwargs(
kwargs: Dict[str, Any], extra_kwargs: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Safely add additional keyword arguments to an existing dictionary
Parameters
----------
kwargs : dict
Keyword argument dictionary
extra_kwargs : dict, default None
Keyword argument dictionary to add
Returns
-------
dict
Keyword dictionary with added keyword arguments
Notes
-----
There is no checking for duplicate keys
"""
if extra_kwargs is None:
return kwargs
else:
kwargs_copy = kwargs.copy()
kwargs_copy.update(extra_kwargs)
return kwargs_copy | cfc4c17f608c0b7fe1ae3046dc220d385c890caa | 708,171 |
import torch
def exp2(input, *args, **kwargs):
"""
Computes the base two exponential function of ``input``.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.exp2(ttorch.tensor([-4.0, -1.0, 0, 2.0, 4.8, 8.0]))
tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02])
>>> ttorch.exp2(ttorch.tensor({
... 'a': [-4.0, -1.0, 0, 2.0, 4.8, 8.0],
... 'b': {'x': [[-2.0, 1.2, 0.25],
... [16.0, 3.75, -2.34]]},
... }))
<Tensor 0x7ff90a4c3af0>
├── a --> tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02])
└── b --> <Tensor 0x7ff90a4c3be0>
└── x --> tensor([[2.5000e-01, 2.2974e+00, 1.1892e+00],
[6.5536e+04, 1.3454e+01, 1.9751e-01]])
"""
return torch.exp2(input, *args, **kwargs) | 17cbc0917acf19932ec4d3a89de8d78545d02e31 | 708,179 |
import pprint
import json
def tryJsonOrPlain(text):
"""Return json formatted, if possible. Otherwise just return."""
try:
return pprint.pformat( json.loads( text ), indent=1 )
except:
return text | 2431479abf6ab3c17ea63356ec740840d2d18a74 | 708,180 |
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0 | 727e58e0d6d596cf4833ca3ca1cbcec6b9eedced | 708,193 |
import re
def error_038_italic_tag(text):
"""Fix the error and return (new_text, replacements_count) tuple."""
backup = text
(text, count) = re.subn(r"<(i|em)>([^\n<>]+)</\1>", "''\\2''", text, flags=re.I)
if re.search(r"</?(?:i|em)>", text, flags=re.I):
return (backup, 0)
else:
return (text, count) | b0c2b571ade01cd483a3ffdc6f5c2bbb873cd13c | 708,199 |
def get_A2_const(alpha1, alpha2, lam_c, A1):
"""Function to compute the constant A2.
Args:
alpha1 (float): The alpha1 parameter of the WHSCM.
alpha2 (float): The alpha2 parameter of the WHSCM.
lam_c (float): The switching point between the
two exponents of the double power-laws
in the WHSCM.
A1 (float): The A1 constant of the WHSCM.
Returns:
A2 (float): The A2 constant of the WHSCM.
"""
A2 = A1 * (lam_c**(alpha2 - alpha1))
return A2 | 16fe12e9ef9d72cfe7250cf840e222512409d377 | 708,205 |
def unique_list(a_list, unique_func=None, replace=False):
"""Unique a list like object.
- collection: list like object
- unique_func: the filter functions to return a hashable sign for unique
- replace: the following replace the above with the same sign
Return the unique subcollection of collection.
Example:
data = [(1, 2), (2, 1), (2, 3), (1, 2)]
unique_func = lambda x: tuple(sorted(x))
unique(data) -> [(1, 2), (2, 1), (2, 3)]
unique(data, unique_func) -> [(1, 2), (2, 3)]
unique(data, unique_func, replace=True) -> [(2, 1), (2, 3)]
"""
unique_func = unique_func or (lambda x: x)
result = {}
for item in a_list:
hashable_sign = unique_func(item)
if hashable_sign not in result or replace:
result[hashable_sign] = item
return list(result.values()) | 8d7957a8dffc18b82e8a45129ba3634c28dd0d52 | 708,206 |
from typing import Sequence
from typing import Any
def find(sequence: Sequence, target_element: Any) -> int:
"""Find the index of the first occurrence of target_element in sequence.
Args:
sequence: A sequence which to search through
target_element: An element to search in the sequence
Returns:
The index of target_element's first occurrence, -1 if it was not found or the sequence is empty
"""
if not sequence:
return -1
try:
return sequence.index(target_element)
except ValueError:
return -1 | 20edfae45baafa218d8d7f37e0409e6f4868b75d | 708,209 |
def format_non_date(value):
"""Return non-date value as string."""
return_value = None
if value:
return_value = value
return return_value | 9a7a13d7d28a14f5e92920cfef7146f9259315ec | 708,214 |
import functools
import math
def gcd_multiple(*args) -> int:
"""Return greatest common divisor of integers in args"""
return functools.reduce(math.gcd, args) | c686b9495cd45ff047f091e31a79bedcd61f8842 | 708,215 |
from typing import Dict
def merge(source: Dict, destination: Dict) -> Dict:
"""
Deep merge two dictionaries
Parameters
----------
source: Dict[Any, Any]
Dictionary to merge from
destination: Dict[Any, Any]
Dictionary to merge to
Returns
-------
Dict[Any, Any]
New dictionary with fields in destination overwritten
with values from source
"""
new_dict = {**destination}
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = new_dict.get(key, {})
new_dict[key] = merge(value, node)
else:
new_dict[key] = value
return new_dict | 4ffba933fe1ea939ecaa9f16452b74a4b3859f40 | 708,218 |
import ast
def is_string_expr(expr: ast.AST) -> bool:
"""Check that the expression is a string literal."""
return (
isinstance(expr, ast.Expr)
and isinstance(expr.value, ast.Constant)
and isinstance(expr.value.value, str)
) | f61418b5671c5e11c1e90fce8d90c583659d40e3 | 708,220 |
import re
def get_raw_code(file_path):
"""
Removes empty lines, leading and trailing whitespaces, single and multi line comments
:param file_path: path to .java file
:return: list with raw code
"""
raw_code = []
multi_line_comment = False
with open(file_path, "r") as f:
for row in f:
# remove leading and trailing whitespaces
line = row.strip()
# remove '/* comments */'
line = re.sub(r'''
^ # start of string
/\* # "/*" string
.* # any character (except line break) zero or more times
\*/ # "*/" string
\s* # zero or many whitespaces
''', '', line, 0, re.VERBOSE)
# remove '//comments'
line = re.sub(r'''
^ # start of string
// # "//" string
.* # any character (except line break) zero or more times
$ # end of string
''', '', line, 0, re.VERBOSE)
# ignore empty lines
if line != '':
# skip multi-line comments (/*)
if re.search(r'''
^ # start of string
/\* # "/*" string
.* # any character (except line break) zero or more times
''', line, re.VERBOSE):
multi_line_comment = True
continue
# check if multi-line comment was closed (*/)
elif re.search(r'''
.* # any character (except line break) zero or more times
\*/ # "*/" string
$ # end of string
''', line, re.VERBOSE):
multi_line_comment = False
line = re.sub(r'''
.* # any character (except line break) zero or more times
\*/ # "*/" string
\s* # zero or many whitespaces
''', '', line, 0, re.VERBOSE)
if line == '':
continue
# add line if it's not multi-line comment
if not multi_line_comment:
raw_code.append(line)
return raw_code | 6654a0423f024eaea3067c557984c3aa5e9494da | 708,222 |
import re
def format_ipc_dimension(number: float, decimal_places: int = 2) -> str:
"""
Format a dimension (e.g. lead span or height) according to IPC rules.
"""
formatted = '{:.2f}'.format(number)
stripped = re.sub(r'^0\.', '', formatted)
return stripped.replace('.', '') | 60001f99b5f107faba19c664f90ee2e9fb61fe68 | 708,224 |
def num_in_row(board, row, num):
"""True if num is already in the row, False otherwise"""
return num in board[row] | ca9ab9de4514740e25e0c55f3613d03b2844cdb8 | 708,225 |
def mod(a1, a2):
"""
Function to give the remainder
"""
return a1 % a2 | f5c03a952aed373e43933bafe37dbc75e796b74d | 708,227 |
def encode_string(s):
"""
Simple utility function to make sure a string is proper
to be used in a SQL query
EXAMPLE:
That's my boy! -> N'That''s my boy!'
"""
res = "N'"+s.replace("'","''")+"'"
res = res.replace("\\''","''")
res = res.replace("\''","''")
return res | 814822b9aa15def24f98b2b280ab899a3f7ea617 | 708,228 |
def get_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein | 88e42b84314593a965e7dd681ded612914e35629 | 708,231 |
def get_book_url(tool_name, category):
"""Get the link to the help documentation of the tool.
Args:
tool_name (str): The name of the tool.
category (str): The category of the tool.
Returns:
str: The URL to help documentation.
"""
prefix = "https://jblindsay.github.io/wbt_book/available_tools"
url = "{}/{}.html#{}".format(prefix, category, tool_name)
return url | daf6c8e0832295914a03b002b548a82e2949612a | 708,237 |
import hashlib
def game_hash(s):
"""Generate hash-based identifier for a game account based on the
text of the game.
"""
def int_to_base(n):
alphabet = "BCDFGHJKLMNPQRSTVWXYZ"
base = len(alphabet)
if n < base:
return alphabet[n]
return int_to_base(n // base) + alphabet[n % base]
return int_to_base(
int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16)
)[-7:] | c218a2607390916117921fe0f68fc23fedd51fc3 | 708,238 |
def determine_issues(project):
"""
Get the list of issues of a project.
:rtype: list
"""
issues = project["Issue"]
if not isinstance(issues, list):
return [issues]
return issues | 7b8b670e4ad5a7ae49f3541c87026dd603406c9f | 708,245 |
def genus_species_name(genus, species):
"""Return name, genus with species if present.
Copes with species being None (or empty string).
"""
# This is a simple function, centralising it for consistency
assert genus and genus == genus.strip(), repr(genus)
if species:
assert species == species.strip(), repr(species)
return f"{genus} {species}"
else:
return genus | 1fed57c5c87dfd9362262a69429830c7103b7fca | 708,247 |
def N(u,i,p,knots):
"""
u: point for which a spline should be evaluated
i: spline knot
p: spline order
knots: all knots
Evaluates the spline basis of order p defined by knots
at knot i and point u.
"""
if p == 0:
if knots[int(i)] < u and u <=knots[int(i+1)]:
return 1.0
else:
return 0.0
else:
try:
k = ((float((u-knots[int(i)])) / float((knots[int(i+p)] - knots[int(i)]) ))
* N(u,i,p-1,knots))
except ZeroDivisionError:
k = 0.0
try:
q = ((float((knots[int(i+p+1)] - u)) / float((knots[int(i+p+1)] - knots[int(i+1)])))
* N(u,i+1,p-1,knots))
except ZeroDivisionError:
q = 0.0
return float(k + q) | 0cd0756d558ee99b0ed32350860bc27f023fa88b | 708,248 |
def is_sim_f(ts_kname):
""" Returns True if the TSDist is actually a similarity and not a distance
"""
return ts_kname in ('linear_allpairs',
'linear_crosscor',
'cross_correlation',
'hsdotprod_autocor_truncated',
'hsdotprod_autocor_cyclic') | 11c18983d8d411714ba3147d4734ad77c40ceedf | 708,253 |
def modify_color(hsbk, **kwargs):
"""
Helper function to make new colors from an existing color by modifying it.
:param hsbk: The base color
:param hue: The new Hue value (optional)
:param saturation: The new Saturation value (optional)
:param brightness: The new Brightness value (optional)
:param kelvin: The new Kelvin value (optional)
"""
return hsbk._replace(**kwargs) | ecc5118873aaf0e4f63bad512ea61d2eae0f7ead | 708,258 |
def get_diff_list(small_list, big_list):
"""
Get the difference set of the two list.
:param small_list: The small data list.
:param big_list: The bigger data list.
:return: diff_list: The difference set list of the two list.
"""
# big_list有而small_list没有的元素
diff_list = list(set(big_list).difference(set(small_list)))
return diff_list | f92d20e6edd1f11ca6436a3ada4a6ba71da37457 | 708,267 |
def get_common_count(list1, list2):
"""
Get count of common between two lists
:param list1: list
:param list2: list
:return: number
"""
return len(list(set(list1).intersection(list2))) | c149b49e36e81237b775b0de0f19153b5bcf2f99 | 708,275 |
def parse_mimetype(mimetype):
"""Parses a MIME type into its components.
:param str mimetype: MIME type
:returns: 4 element tuple for MIME type, subtype, suffix and parameters
:rtype: tuple
Example:
>>> parse_mimetype('text/html; charset=utf-8')
('text', 'html', '', {'charset': 'utf-8'})
"""
if not mimetype:
return '', '', '', {}
parts = mimetype.split(';')
params = []
for item in parts[1:]:
if not item:
continue
key, value = item.split('=', 1) if '=' in item else (item, '')
params.append((key.lower().strip(), value.strip(' "')))
params = dict(params)
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = fulltype.split('/', 1) \
if '/' in fulltype else (fulltype, '')
stype, suffix = stype.split('+', 1) if '+' in stype else (stype, '')
return mtype, stype, suffix, params | a9abfde73528e6f76cca633efe3d4c881dccef82 | 708,282 |
import operator
def calculate_seat_district(district_deputy_number, parties, votes):
"""
Calculate seats for each party in list of parties for a district
Params:
- district_deputy_number: the number of seats for this district
- parties: list of parties
- votes: list of votes for each party in this district
Assume that parties and votes parameters have the same size
Return:
- A tuple represents number of seats for each party. This tuple has same size with parameter 'parties'
"""
party_count = len(parties)
# Each party has been initially allocated 0 seat
# Initialize a list with initial value is 0
# For example, if party_count = 5
# seats will be seats = [0, 0, 0, 0, 0]
seats = [0] * party_count
# N value for each party
# N= V/(s + 1)
# Init N as a copy of votes list
N = votes[:]
while sum(seats) < district_deputy_number:
# Get the maximum value in list of N value and the index of that maximum value
# Note: this below line uses the Python's builtin operator
max_index, max_value = max(enumerate(N), key=operator.itemgetter(1))
# Update the seats list
# increase the seat of the party that has maximum by 1
seats[max_index] += 1
# Update the lagest N with new value
# using the formal: N= V/(s + 1)
N[max_index] = votes[max_index] / (seats[max_index] + 1)
# return as tuple
# Note: It can be returned as list, however, the tuple is better because it's immutable
return tuple(seats) | 035a167c623d14857dcefe01e4304523959857a6 | 708,287 |
def get_clean_url(url):
""" Get a url without the language part, if i18n urls are defined
:param url: a string with the url to clean
:return: a string with the cleaned url
"""
url = url.strip('/')
url = '/' if not url else url
return '/'.join(url.split('/')[1:]) | 9e5d396086d6cc5169c26f6d1645dafd23a3b8d7 | 708,288 |
from datetime import datetime
def cid_to_date(cid):
"""Converts a cid to date string YYYY-MM-DD
Parameters
----------
cid : int
A cid as it is generated by the function ``utils.create_cid()``
Returns
-------
str
A string formated date (e.g. YYYY-MM-DD, 2018-10-01)
"""
return datetime.utcfromtimestamp(
cid/10000000.0
).strftime("%Y-%m-%d") | ab919f9cfd5c56f6fb6b65cbae8731687fc42faf | 708,289 |
def get_num_channels(inputs):
""" Get number of channels in one tensor. """
return inputs.shape[1] | 6fb42e60714dc81f03b29ad87b73b41027056472 | 708,295 |
import math
def round_vzeros(v,d=10) :
"""Returns input vector with rounded to zero components
which precision less than requested number of digits.
"""
prec = pow(10,-d)
vx = v[0] if math.fabs(v[0]) > prec else 0.0
vy = v[1] if math.fabs(v[1]) > prec else 0.0
vz = v[2] if math.fabs(v[2]) > prec else 0.0
return vx,vy,vz | aa16175bf1176383ef255460767502104be2566e | 708,296 |
def get_specific_label_dfs(raw_df, label_loc):
"""
Purpose: Split the instances of data in raw_df based on specific labels/classes
and load them to a dictionary structured -> label : Pandas Dataframe
Params: 1. raw_df (Pandas Dataframe):
- The df containing data
2. label_loc (String):
- The location where the output labels are stored in 1. raw_df
Returns: A dictionary structured -> label : Pandas Dataframe
"""
labels = list(raw_df[label_loc].unique())
# a list of dataframes storing only instances of data belonging to one specific class/label
label_dataframes = {}
for label in labels:
label_dataframes[label] = raw_df.loc[raw_df[label_loc] == label]
return label_dataframes | 756f03f845da64f6fd5534fb786966edb8610a13 | 708,298 |
def user_dss_clients(dss_clients, dss_target):
"""
Fixture that narrows down the dss clients to only the ones that are relevant considering the curent DSS target.
Args:
dss_clients (fixture): All the instanciated dss client for each user and dss targets
dss_target (fixture): The considered DSS target for the test to be executed
Returns:
A dict of dss client instances for the current DSS target and each of its specified users.
"""
return dss_clients[dss_target] | 7d418b49b68d7349a089046837f3c8351c0dcc67 | 708,306 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.