content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _filter_out_disabled(d):
"""
Helper to remove Nones (actually any false-like type) from the scrubbers.
This is needed so we can disable global scrubbers in a per-model basis.
"""
return {k: v for k, v in d.items() if v} | 64a4577d6e5998e647ef82f126c50360388aba9a | 704,219 |
def points(start, end):
"""
Bresenham's Line Drawing Algorithm in 2D
"""
l = []
x0, y0 = start
x1, y1 = end
dx = abs(x1 - x0)
dy = abs(y1 - y0)
if x0 < x1:
sx = 1
else:
sx = -1
if y0 < y1:
sy = 1
else:
sy = -1
err = dx - dy
while True:
l.append((x0, y0))
if x0 == x1 and y0 == y1:
break
e2 = 2 * err
if e2 > -dy:
# overshot in the y direction
err = err - dy
x0 = x0 + sx
if e2 < dx:
# overshot in the x direction
err = err + dx
y0 = y0 + sy
return l | ffa8be5eb09e2b454242e4095883bfee239e5319 | 704,222 |
def ab_from_mv(m, v):
"""
estimate beta parameters (a,b) from given mean and variance;
return (a,b).
Note, for uniform distribution on [0,1], (m,v)=(0.5,1/12)
"""
phi = m*(1-m)/v - 1 # z = 2 for uniform distribution
return (phi*m, phi*(1-m)) | 0326c165e44c1ab9df091e0344f12b9fab8c0e19 | 704,223 |
def fds_remove_crc_gaps(rom):
"""Remove each block's CRC padding so it can be played by FDS
https://wiki.nesdev.org/w/index.php/FDS_disk_format
"""
offset = 0x0
def get_block(size, crc_gap=2):
nonlocal offset
block = rom[offset : offset + size]
offset += size + crc_gap
return block
disk_info_block = get_block(0x38)
file_amount_block = get_block(0x2)
assert file_amount_block[0] == 0x02
n_files = file_amount_block[1]
blocks = [disk_info_block, file_amount_block]
for i in range(n_files):
file_header_block = get_block(0x10)
assert file_header_block[0] == 3
blocks.append(file_header_block)
file_size = int.from_bytes(file_header_block[13 : 13 + 2], "little")
file_data_block = get_block(file_size + 1)
blocks.append(file_data_block)
out = b"".join(blocks)
# Zero pad to be 65500 bytes long
padding = b"\x00" * (65500 - len(out))
out += padding
return out | 935ecb4ac01c1256ec074f6888704fdd1db63ea4 | 704,224 |
def truncatewords(base, length, ellipsis="..."):
"""Truncate a string by words"""
# do we need to preserve the whitespaces?
baselist = base.split()
lenbase = len(baselist)
if length >= lenbase:
return base
# instead of collapsing them into just a single space?
return " ".join(baselist[:length]) + ellipsis | f6472c7511e7e9abf03d4da3ed10c94ef070f78a | 704,225 |
import random
def random_permutation(iterable, r=None):
"""Random selection from itertools.permutations(iterable, r)"""
pool = tuple(iterable)
if r is None:
r = len(pool)
return list(random.sample(pool, r)) | 09e9f22def2c1125bf0ffc50db73659eaac65105 | 704,226 |
def get_hms(t_sec):
"""Converts time in seconds to hours, minutes, and seconds.
:param t_sec: time in seconds
:return: time in hours, minutes, and seconds
:rtype: list
"""
h = t_sec//3600
m = (t_sec - h*3600)//60
s = t_sec%60
return h,m,s | f873ea04905ebcc5b41a394a4dd880a566623c83 | 704,227 |
def current_velocity(x_new, x_prev, h):
""" returns current velocity of a particle from next
position at timestep. """
"""
parameters
----------
x_new : array
new x-position of particle
x_prev : array
previous x-position of particle
h : float
simulation timestep
"""
vel = (x_new - x_prev) / 2*h
return vel | 33d47f901be44fed20613957459aca1eecd5ea2c | 704,229 |
import csv
def csvReadCallback(inputFile, **kw):
"""Read callback for CSV data"""
inputFile.readline() # skip header
reader = csv.reader(inputFile, lineterminator='\n', **kw)
return [row for row in reader] | e36c92e5792e905da22438a58c8ce810c2a22e2a | 704,230 |
def load_word():
""" return the sql and values of the insert queuery."""
sql = """
INSERT INTO Spanglish_Test.Word
(
`word`, `language_id`, `category_id`
)
VALUES (%s, %s, %s)
"""
values = [
(
'Ir', 2, 1
),
(
'Lunes', 2, 2
),
(
'Hola', 2, 3
),
(
'Ver', 2, 1
),
(
'Comer', 2, 1
),
(
'Saber', 2, 1
),
]
return {
'sql': sql,
'values': values
} | f0d836f5ca912865f9d75a5e6c10c13cf554674b | 704,231 |
import torch
def reconstruct_from_patches_2d(patches, img_shape, step=[1.0,1.0], batch_first=False):
"""Given patches generated from extract_patches_2d function, creates the original unpatched image. We keep track of the
overlapped regions and average them in the end.
Parameters:
patches - Patches from a larger image
img_shape - Shape of the original image (2d)
step - step size along the x and y directions
batch_first - a flag to indicate if the current img is the first batch
Returns:
A Tensor (Torch.Tensor) which contains a reconstructed image from the patches.
"""
if(batch_first):
patches = patches.permute(1,0,2,3,4)
patch_H, patch_W = patches.size(3), patches.size(4)
img_size = (patches.size(1), patches.size(2),max(img_shape[0], patch_H), max(img_shape[1], patch_W))
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
nrow, ncol = 1 + (img_size[-2] - patch_H)//step_int[0], 1 + (img_size[-1] - patch_W)//step_int[1]
r_nrow = nrow + 1 if((img_size[2] - patch_H) % step_int[0] != 0) else nrow
r_ncol = ncol + 1 if((img_size[3] - patch_W) % step_int[1] != 0) else ncol
patches = patches.reshape(r_nrow,r_ncol,img_size[0],img_size[1],patch_H,patch_W)
img = torch.zeros(img_size, device = patches.device)
overlap_counter = torch.zeros(img_size, device = patches.device)
for i in range(nrow):
for j in range(ncol):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += patches[i,j,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[2] - patch_H) % step_int[0] != 0):
for j in range(ncol):
img[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += patches[-1,j,]
overlap_counter[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[3] - patch_W) % step_int[1] != 0):
for i in range(nrow):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += patches[i,-1,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += 1
if((img_size[2] - patch_H) % step_int[0] != 0 and (img_size[3] - patch_W) % step_int[1] != 0):
img[:,:,-patch_H:,-patch_W:] += patches[-1,-1,]
overlap_counter[:,:,-patch_H:,-patch_W:] += 1
img /= overlap_counter
if(img_shape[0]<patch_H):
num_padded_H_Top = (patch_H - img_shape[0])//2
num_padded_H_Bottom = patch_H - img_shape[0] - num_padded_H_Top
img = img[:,:,num_padded_H_Top:-num_padded_H_Bottom,]
if(img_shape[1]<patch_W):
num_padded_W_Left = (patch_W - img_shape[1])//2
num_padded_W_Right = patch_W - img_shape[1] - num_padded_W_Left
img = img[:,:,:,num_padded_W_Left:-num_padded_W_Right]
return img | 35b03ee61dd5a2749601e839d7ade75d1b686383 | 704,232 |
def _LJ_rminepsilon_to_ab(coeffs):
"""
Convert rmin/epsilon representation to AB representation of the LJ
potential
"""
A = coeffs['epsilon'] * coeffs['Rmin']**12.0
B = 2 * coeffs['epsilon'] * coeffs['Rmin']**6.0
return {"A": A, "B": B} | 0963c0e8b949d35842660a499ce80a388485773f | 704,233 |
def calculate_SI(aggregated_df):
""" calculates suspicion of infection as per Sepsis-3 on aggregated hourly dataframe and saves it under the column `suspicion_of_infection`.
Note:
aggregated_df must contain `antibiotics` and `microbio-sample` columns.
"""
df = aggregated_df[['hadm_id', 'hour', 'antibiotics', 'microbio-sample']] # reduce data, speeds up computation
df['antibiotics'].fillna(0, inplace=True)
def _fix_columns(antibiotics_window_df):
"""Fixes resulting columns/index from GroupBy.rolling so that there are just hadm_id, hour, and antibiotics cols"""
if 'hadm_id' in antibiotics_window_df.index.names and 'hadm_id' in df.columns:
antibiotics_window_df.drop(columns='hadm_id', inplace=True)
if 'hour' in antibiotics_window_df.index.names and 'hour' in df.columns:
antibiotics_window_df.drop(columns='hour', inplace=True)
antibiotics_window_df = antibiotics_window_df.reset_index()[['hadm_id', 'hour', 'antibiotics']]
return antibiotics_window_df
antibiotics_last_24h = df.groupby('hadm_id').rolling(on='hour', window=24, min_periods=1).antibiotics.sum()
antibiotics_last_24h = _fix_columns(antibiotics_last_24h)
antibiotics_last_24h = antibiotics_last_24h.rename(columns={'antibiotics': 'antibiotics_last_24h'})
antibiotics_next_72h = df[::-1].groupby('hadm_id').rolling(on='hour', window=72, min_periods=1).antibiotics.sum()[::-1]
antibiotics_next_72h = _fix_columns(antibiotics_next_72h)
antibiotics_next_72h = antibiotics_next_72h.rename(columns={'antibiotics': 'antibiotics_next_72h'})
df = df.merge(antibiotics_last_24h, on=['hadm_id', 'hour'])
df = df.merge(antibiotics_next_72h, on=['hadm_id', 'hour'])
microbio_sample = df['microbio-sample'] == 1
suspicion_of_infection = microbio_sample & (df['antibiotics_last_24h'] > 0)
suspicion_of_infection |= microbio_sample & (df['antibiotics_next_72h'] > 0)
aggregated_df['suspicion_of_infection'] = suspicion_of_infection
return aggregated_df | 88f0fb6285c3fc2826168f01416e1e825b2ed4cc | 704,235 |
def _get_reduce_batch_axis(axis, x_dim, x_ndim):
"""get batch_axis for reduce* operation."""
if not isinstance(axis, tuple):
axis = (axis,)
batch_axis = ()
if axis:
for index in axis:
if index < x_dim:
batch_axis = batch_axis + (index,)
else:
batch_axis = batch_axis + (index + 1,)
else:
batch_axis_list = [index for index in range(x_ndim)]
del batch_axis_list[x_dim]
batch_axis = tuple(batch_axis_list)
return batch_axis | b2a41f5e03c0388c70d2690793329d922f2d3248 | 704,236 |
def evalrawexp(context, mapping, arg):
"""Evaluate given argument as a bare template object which may require
further processing (such as folding generator of strings)"""
func, data = arg
return func(context, mapping, data) | dc443da540bef0fe1198b12c0205921f0de66b2e | 704,237 |
import os
def get_file_list(path):
"""
获取文件夹下的所有文件,返回list
:param path:
:return:
"""
file_paths = []
get_dir = os.listdir(path)
for dir in get_dir:
tmp_path = os.path.join(path,dir)
if os.path.isdir(tmp_path):
file_paths.append({str(dir):get_file_list(tmp_path)})
else:
file_paths.append(dir)
return file_paths | 47f183479fb9304d33677fc811509f1801fa0130 | 704,238 |
def find_stab(state, xs, zs):
"""
Find a stabilizer in the stabilizer group.
Args:
state:
logical_circuit:
delogical_circuit:
Returns:
"""
stabs = state.stabs
destabs = state.destabs
# Find the destabilizer generators that anticommute with the stabilizer indicated by xs and zs.
# First the destabilizer generators that could *possibly* anticommute:
possible_antidestabs = set([])
for q in xs:
possible_antidestabs.update(destabs.col_z[q])
for q in zs:
possible_antidestabs.update(destabs.col_x[q])
# Now we will confirm if they anticommute or not.
antidestabs = set([])
for d in possible_antidestabs:
if (len(xs & destabs.row_z[d]) + len(zs & destabs.row_x[d])) % 2 == 1:
# They anticommute an odd number of times.
antidestabs.add(d)
# Now we will confirm that the supplied stabilizer is actually in the stabilizer group.
confirm_xs = set([])
confirm_zs = set([])
for d in antidestabs:
confirm_xs ^= stabs.row_x[d]
confirm_zs ^= stabs.row_z[d]
found = confirm_xs == xs and confirm_zs == zs
return found, antidestabs | 07987377192cb4a4fa8cc35bd13d7566a838778c | 704,239 |
def CycleTarget_to_c(self):
"""Syntax for a target of a cycle."""
return f"cycle_{self.targetID}: continue;" | 12cc7a57e5a24a62aba43ac99879d5a5d364ee29 | 704,240 |
def disable_layer_logging():
"""
Disable the shape logging for all layers from this moment on. Can be
useful when creating multiple towers.
"""
class ContainEverything:
def __contains__(self, x):
return True
# can use nonlocal in python3, but how
globals()['_LAYER_LOGGED'] = ContainEverything() | e05785f1ade46903c2e66efc35d4fc5f0e9d4fbd | 704,241 |
def _remap_keypoints(keypoints, padded_w, padded_h, expand, data_shape, ratio):
"""
Remap bboxes in (x0, y0, x1, y1) format into the input image space
Parameters
----------
bboxes
padded_w
padded_h
expand
Returns
-------
"""
keypoints[:, 0::2] *= padded_w / (data_shape * ratio)
keypoints[:, 1::2] *= padded_h / data_shape
keypoints[:, 0::2] -= expand[0]
keypoints[:, 1::2] -= expand[1]
return keypoints | 1b8d2520f0df1847967e8db9c565598d6b5ee2b6 | 704,242 |
def get_pipeline_lines(input_pipeline):
"""Returns a list with the lines in the .cppipe file"""
with open(input_pipeline) as f:
lines = f.readlines()
return lines | 403e7531b1cadfe25f519d2b176b97ac344cde6b | 704,243 |
def maxmin(*args):
"""
Returns timed ((t,max),(t,min)) values from a (t,v) dataset
When used to filter an array the winndow will have to be doubled to
allocate both values (or just keep the one with max absdiff from previous).
"""
data = args[0]
t = sorted((v,t) for t,v in data)
mn,mx = (t[0][1],t[0][0]),(t[-1][1],t[-1][0])
return sorted((mx,mn)) | 8f76ee029d04a4a35688054512d0a0212adbfede | 704,244 |
def from_none(exc):
"""raise from_none(ValueError('a')) == raise ValueError('a') from None"""
exc.__cause__ = None
exc.__suppress_context__ = True
return exc | 86e45ba2df0020c85f13b85d8a98ee693422e922 | 704,245 |
def get_interface_type(interface):
"""Gets the type of interface
"""
if interface.upper().startswith("ET"):
return "ethernet"
elif interface.upper().startswith("VL"):
return "svi"
elif interface.upper().startswith("LO"):
return "loopback"
elif interface.upper().startswith("MG"):
return "management"
elif interface.upper().startswith("MA"):
return "management"
elif interface.upper().startswith("PO"):
return "portchannel"
elif interface.upper().startswith("NV"):
return "nve"
else:
return "unknown" | f770a3ef1c43574d22630a5c4fff2f25d4975279 | 704,246 |
def rgb2gray(image):
"""Convert 3-channel RGB image into grayscale"""
if image.ndim == 3:
return (0.299 * image[:, :, 0] + 0.587 * image[:, :, 1] +
0.114 * image[:, :, 2])
elif image.ndim == 4:
return (0.299 * image[:, :, :, 0] + 0.587 * image[:, :, :, 1] +
0.114 * image[:, :, :, 2]) | f87ed301dfd9c13ebfbabf99ad4b56c959a91e46 | 704,247 |
def create_variable(workflow_stat):
"""
Generates the javascript variables used to generate the chart.
@param workflow_stat the WorkflowInfo object reference
"""
number_of_jobs = workflow_stat.total_job_instances
# Adding variables
var_str = "<script type='text/javascript'>\nvar initMaxX = " + str(workflow_stat.workflow_run_time) + ";\n"
var_str +="var bar_spacing = 20;\n\
var inner_bar_margin = 4;\n\
var line_width =2;\n\
var inner_bar_width = bar_spacing-2*inner_bar_margin;\n\
var nameMargin = 400;\n\
var scaleMargin = 15;\n"
var_str += "var initMaxY = "+str(number_of_jobs) + "*bar_spacing;\n"
color_name_str = "var color =['darkblue','yellow','orange' ,'steelblue', 'purple'"
desc_name_str = "var desc=['pre script','condor job','resource delay', 'job runtime as seen by dagman','post script '"
for k,v in workflow_stat.transformation_color_map.items():
if k in workflow_stat.transformation_statistics_dict:
color_name_str += ",'"+v +"'"
desc_name_str +=",'"+k +"'"
color_name_str += "];\n"
desc_name_str +="];\n"
var_str += color_name_str
var_str += desc_name_str
if number_of_jobs < 5:
var_str +="var h = " +str(number_of_jobs) +"*bar_spacing*2 + scaleMargin + bar_spacing;\n"
else:
var_str +="var h = 840;\n"
var_str +="var w = 1460;\n\
var toolbar_width = 550;\n\
var containerPanelPadding = 20;\n\
var chartPanelWidth = w+ containerPanelPadding*2;\n\
var chartPanelHeight = h + containerPanelPadding*2;\n\
var curX = 0;\n\
var curY = 0;\n\
var curEndX = initMaxX;\n\
var curEndY = initMaxY;\n\
var xScale = pv.Scale.linear(curX, curEndX).range(0, w-nameMargin);\n\
var yScale = pv.Scale.linear(curY, curEndY).range(0, h -scaleMargin);\n\
var xLabelPos = containerPanelPadding + nameMargin;\n\
var yLabelPos = 40;\n\
var labelWidth = 200;\n\
var panXFactor = 10;\n\
var panYFactor = 10;\n\
var isNewWindow = false;\n\
var condorTime = false;\n\
var kickstart = false;\n\
var condorRuntime = false;\n\
var resourceDelay = false;\n\
var preScript = false;\n\
var postScript = false;\n\
var showName = false;\n\
var headerPanelWidth = w+ containerPanelPadding*2;\n\
var headerPanelHeight = 100;\n\
var footerPanelWidth = w+ containerPanelPadding*2;\n\
var footerPanelHeight = "+ str(45 + len(workflow_stat.transformation_statistics_dict)/3*15) + ";\n\
</script>\n"
return var_str | d0b55b00952f28242775a297fedab6cf1a69169e | 704,248 |
def aten_le(mapper, graph, node):
""" 构造对比大小的PaddleLayer。
TorchScript示例:
%80 : bool = aten::le(%78, %79)
参数含义:
%80 (bool): 输出,第一个元素是否小于等于第二个元素。
%78 (-): 需对比的输入1。
%79 (-): 需对比的输入2。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%78
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
layer_inputs["x"] = inputs_name[0]
# 处理输入1,即%79
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["y"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer("prim.le", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
return current_inputs, current_outputs | 45797abf1ec5a97619578dd734c1d9d7fb448aec | 704,249 |
from pathlib import Path
def filter_files(names: list[Path]) -> list[Path]:
"""只要文件,不要文件夹"""
return [x for x in names if x.is_file()] | 25c1258891e2df7c35f700a26cadf01013329337 | 704,250 |
def move_ship_waypoint(instructions: list) -> list:
"""Move the ship using the waypoint movement rules
:param instructions: List of movement instructions
:return: Final position of the ship
"""
waypoint = [10, 1]
ship = [0, 0]
for instruction in instructions:
cmd, val = instruction
if cmd == 'F':
ship[0] += val * waypoint[0]
ship[1] += val * waypoint[1]
if cmd == 'N':
waypoint[1] += val
elif cmd == 'S':
waypoint[1] -= val
elif cmd == 'E':
waypoint[0] += val
elif cmd == 'W':
waypoint[0] -= val
elif cmd == 'L' or cmd == 'R':
rotation = (2 * (cmd == 'L') - 1) * val % 360
if rotation == 90:
waypoint[0], waypoint[1] = -waypoint[1], waypoint[0]
elif rotation == 180:
waypoint[0] *= -1
waypoint[1] *= -1
elif rotation == 270:
waypoint[0], waypoint[1] = waypoint[1], -waypoint[0]
return ship | 7202392e4826d522287455d94f7b06c0e2f931ee | 704,251 |
import itertools
def get_hyperparams_combinations(hyperparams):
"""Get list of hyperparmeter (dict) combinations."""
# transforms tuning hyperparams to a list of dict params for each option
return [
{k:v for k,v in zip(hyperparams.keys(), hypms)}
for hypms
in itertools.product(*[vals for vals in hyperparams.values()])
] | e5f52a8eddb8a2a476e0daa47f63161d440263f2 | 704,252 |
from datetime import datetime
import json
import logging
def convert_from_poolfile_to_sequence_set_and_back(inp_fp_path,
op_path, conversion_type, description="", run_id=None):
"""
In this function we take either pool file or Sequence Set
and convert from one to the other. Sequence Set is output
and input as a JSON file, pool file is output and input
as a TSV file. Conversion_type is an int:
0 -> going from pool file to Sequence Set
1 -> going from Sequence Set to pool file
Args:
inp_fp_path: Path to either pool file (TSV) or Sequence Set (JSON)
op_path: Output path to the conversion output
conversion_type: (int) from [0,1]. 0 -> Pool file to Sequence Set
1 -> Sequence Set to Pool File.
description: (string) Optional string describing the set
Poolfile:
barcode(str):rcbarcode(str):nTot(int):n(int):scaffold(str):strand(str +/-):pos(int):
n2(int):scaffold2(int):strand2(str +/-):pos2(int):nPastEnd(int)
"""
if conversion_type not in [0,1]:
raise Exception("Cannot recognize conversion type: Must be int." + \
"Val: {}".format(conversion_type))
if conversion_type == 0:
# Going from poolfile to Sequence Set
if run_id is None:
run_id = "MapTnSeq_Barcodes_run_on_" + \
str(datetime.now()).replace(' ', '_'),
# output dict
sequence_set = {
"sequence_set_id": run_id,
"description": "MapTnSeq (RBTNSEQ) mapping of barcodes to a " + \
"genome. Explanations of values: 'nTot' is total " + \
"number of times this barcode was counted." + \
" 'n' is number of times this barcode was counted" + \
" at this location. 'scf' is scaffold name." + \
" 'strand' is strand (+ or -). 'pos' is nucleotide" + \
" position. 'n2' is number of times found at second" + \
" highest counted location. 'scf2' is second highest" + \
" location scaffold, 'strand2' similarly, etc." + \
" 'nPastEnd' means number of times this barcode was" + \
" found next to the next part of the plasmid (barcode" + \
" wasn't inserted into the genome without the rest " + \
" of the plasmid).\n" + \
" User Description (optional): {}".format(description)
}
sequence_list = []
pool_FH = open(inp_fp_path, "r")
header = pool_FH.readline().rstrip()
c_line = pool_FH.readline()
i = 1
while c_line != "":
c_lst = c_line.split('\t')
nPastEnd = c_lst[-1].rstrip()
barcode, rcbarcode, nTot, n, scf, strnd, pos = c_lst[:7]
n2, scf2, strnd2, pos2 = c_lst[7:-1]
# desc_str holds all the information needed to reconstruct pool file
desc_str = "nTot:{};n:{};scf:{};strand:{};pos:{};".format(
nTot, n, scf, strnd, pos)
desc_str += "n2:{};scf2:{};strnd2:{};pos2:{};".format(
n2, scf2, strnd2, pos2)
desc_str += "nPastEnd:" + nPastEnd
sequence_list.append(
{
"sequence_id": "MapTnSeq_barcode_" + str(i),
"description": desc_str,
"sequence": barcode
})
c_line = pool_FH.readline()
i += 1
pool_FH.close()
sequence_set["sequences"] = sequence_list
with open(op_path, "w") as g:
g.write(json.dumps(sequence_set, indent=2))
logging.info("Wrote Sequence Set JSON file to " + op_path)
elif conversion_type == 1:
# Going from Sequence Set to Pool File
if inp_fp_path.split(".")[-1] != "json":
raise Exception("Sequence Set indicated but not JSON file")
sequence_set_d = json.loads(open(inp_fp_path).read())
out_pool_FH = open(op_path, "w")
out_pool_FH.write("barcode\trcbarcode\tnTot\tn\tscaffold\tstrand\tpos\t" + \
"n2\tscaffold2\tstrand2\tpos2\tnPastEnd\n")
seq_list = sequence_set_d["sequences"]
for seq in seq_list:
desc_str = seq["description"]
barcode = seq["sequence"]
tsl_d = {"A":"T", "T":"A", "G":"C", "C":"G"}
rcbc1 = [tsl_d[x] for x in list(barcode)]
rcbc1.reverse()
rcbarcode = "".join(rcbc1)
out_pool_FH.write(barcode + "\t" + rcbarcode + "\t")
items = [x.split(":")[1] for x in desc_str.split(";")]
out_pool_FH.write("\t".join(items) + "\n")
out_pool_FH.close()
logging.info("Wrote Pool File from Sequence Set at " + op_path)
return None | ad46eb491840aa75764b013495a113ae746144ba | 704,253 |
from typing import List
def read_basis_format(basis_format: str) -> List[int]:
"""Read the basis set using the specified format."""
s = basis_format.replace('[', '').split(']')[0]
fss = list(map(int, s.split(',')))
fss = fss[4:] # cp2k coefficient formats start in column 5
return fss | 9701309ab43eb7a0227aa141653688dbdce40811 | 704,254 |
def p_to_stars(p, thres=(0.1, 0.05, 0.01)):
"""Return stars for significance values."""
stars = []
for t in thres:
if p < t:
stars.append("*")
return "".join(stars) | d88c2fd6c1b4e2d75a9cb664dfc10fab308bc6ee | 704,255 |
def correct_dcm(dcm):
""" Correct DCM image which were actually signed data, but were treated as unsigned """
x = dcm.pixel_array + 1000
px_mode = 4096
x[x>=px_mode] = x[x>=px_mode] - px_mode
dcm.PixelData = x.tobytes()
dcm.RescaleIntercept = -1000
return dcm.pixel_array, dcm.RescaleIntercept | 0186ab3fc4b606902da3a50a5835eb227a1b7733 | 704,256 |
def factorial_r(number):
"""
Calculates the factorial of a number, using a recursive process.
:param number: The number.
:return: n!
"""
# Check to make sure the argument is valid.
if number < 0:
raise ValueError
# This is the recursive part of the function.
if number == 0:
# If the argument is 0, then we simply return the value of 0!, which is 1.
return 1
else:
# Otherwise, return n multiplied by the (n - 1)!
return number * factorial_r(number - 1) | e5c28edac93b965f438bd61c5bb1c0a935c96700 | 704,257 |
def make_perturbed_cmtsolution(py, src_frechet_directory, cmtsolution_directory, output_directory):
"""
make the pertured cmtsolution based on src_frechet.
"""
script = f"ibrun -n 1 {py} -m seisflow.scripts.source_inversion.make_perturbed_cmtsolution --src_frechet_directory {src_frechet_directory} --cmtsolution_directory {cmtsolution_directory} --output_directory {output_directory}; \n"
return script | 07bb69751ddaee9d7aa6389c6cab9bc6021758ed | 704,258 |
def weight_correct_incorrect(rslt):
"""Return a pair of floating-point numbers denoting the weight of
(correct, incorrect) instances in EvaluationResult rslt.
>>> listInstCorrect = [Instance([],True,0.25)]
>>> listInstIncorrect = [Instance([],False,0.50)]
>>> rslt = EvaluationResult(listInstCorrect, listInstIncorrect, None)
>>> weight_correct_incorrect(rslt)
(0.25, 0.5)"""
correctInst = sum([inst.dblWeight for inst in rslt.listInstCorrect])
incorrectInst = sum([inst.dblWeight for inst in rslt.listInstIncorrect])
return (correctInst , incorrectInst) | 5a7ef1d338821f10b58ba06224059e532180c50d | 704,259 |
import pathlib
import csv
def get_data_info(path):
"""
Get metadata of the iamges.
"""
samples = []
# the data is in subfolders
parent = pathlib.Path(path)
for csv_file in parent.glob('**/*.csv'):
with open(str(csv_file), 'r') as f:
reader = csv.reader(f)
for line in reader:
try:
samples.append({'img_center': str(csv_file.resolve().parent.joinpath('IMG',pathlib.Path(line[0]).name)),
'img_left': str(csv_file.resolve().parent.joinpath('IMG',pathlib.Path(line[1]).name)),
'img_right': str(csv_file.resolve().parent.joinpath('IMG',pathlib.Path(line[2]).name)),
'steering_angle': float(line[3]),
'throttle': float(line[4]),
'brake': float(line[5]),
'speed': float(line[6])})
except Exception as e:
print(e)
return samples | 53f3dd1b6ff18d43a656f4a3f6da26ab1e60a6c2 | 704,260 |
import random
def shuffle(x, y):
""" Shuffle the datasets. """
for n in range(len(x) - 1):
rnd = random.randint(0, (len(x) - 1))
x1 = x[rnd]
x2 = x[rnd - 1]
y1 = y[rnd]
y2 = y[rnd - 1]
x[rnd - 1] = x1
x[rnd] = x2
y[rnd - 1] = y1
y[rnd] = y2
return x, y | c9f198d3796c5d64eba818753701957ea1a0e924 | 704,261 |
def get_data_names(data, data_names):
"""
Get default names for data fields if none are given based on the data.
Examples
--------
>>> import numpy as np
>>> east, north, up = [np.arange(10)]*3
>>> get_data_names((east,), data_names=None)
('scalars',)
>>> get_data_names((east, north), data_names=None)
('east_component', 'north_component')
>>> get_data_names((east, north, up), data_names=None)
('east_component', 'north_component', 'vertical_component')
>>> get_data_names((up, north), data_names=('ringo', 'george'))
('ringo', 'george')
"""
if data_names is not None:
if len(data) != len(data_names):
raise ValueError(
"Data has {} components but only {} names provided: {}".format(
len(data), len(data_names), str(data_names)
)
)
return data_names
data_types = [
("scalars",),
("east_component", "north_component"),
("east_component", "north_component", "vertical_component"),
]
if len(data) > len(data_types):
raise ValueError(
" ".join(
[
"Default data names only available for up to 3 components.",
"Must provide custom names through the 'data_names' argument.",
]
)
)
return data_types[len(data) - 1] | e2097d6dbf2c8cc52fd4a60124727cad5fe9fbc4 | 704,262 |
import pandas
def get_labels_from_file(filename):
"""Get labels on the last column from file.
Args:
filename: file name
Returns:
List[str]: label list
"""
data_frame = pandas.read_csv(filename)
labels = data_frame['summary'].tolist()
return labels | 605e9a464eb9fc007d2421fadcab362b7c22ebf5 | 704,263 |
from typing import Any
def create_result_scalar(name: str, item_type: str, value: Any) -> dict:
"""
Create a scalar result for posting to EMPAIA App API.
:param name: Name of the result
:param item_type: Type of result
:param value: Value of the result
"""
result = {"name": name, "type": item_type, "value": value}
return result | 3fb16c540cc8c76cfc42e4a906e4be280346b802 | 704,264 |
import sympy
def replace_heaviside(formula):
"""Set Heaviside(0) = 0
Differentiating sympy Min and Max is giving Heaviside:
Heaviside(x) = 0 if x < 0 and 1 if x > 0, but
Heaviside(0) needs to be defined by user.
We set Heaviside(0) to 0 because in general there is no sensitivity. This
done by setting the second argument to zero.
"""
if not isinstance(formula, sympy.Expr):
return formula
w = sympy.Wild("w")
return formula.replace(sympy.Heaviside(w), sympy.Heaviside(w, 0)) | d1aff5e4a2dd68ba53ced487b665e485dab4b54d | 704,265 |
import os
def is_dicom(filename):
"""Returns True if the file in question is a DICOM file, else False. """
# Per the DICOM specs, a DICOM file starts with 128 reserved bytes
# followed by "DICM".
# ref: DICOM spec, Part 10: Media Storage and File Format for Media
# Interchange, 7.1 DICOM FILE META INFORMATION
if os.path.isfile(filename):
f = open(filename, "rb")
s = f.read(132)
f.close()
pattern = "DICM"
binary_pattern = pattern.encode()
return s.endswith(binary_pattern)
else:
return False | 014c15481224413d6757c950a0888fb60e0f94d5 | 704,266 |
def exact_change_recursive(amount,coins):
""" Return the number of different ways a change of 'amount' can be
given using denominations given in the list of 'coins'
>>> exact_change_recursive(10,[50,20,10,5,2,1])
11
>>> exact_change_recursive(100,[100,50,20,10,5,2,1])
4563
"""
assert amount >= 0
if amount==0:
# no amount left -> 1 solution exists with no coins
return 1
elif not coins:
# amount>0, no coins left -> no solution possibe
return 0
else:
current_denomination = coins[0]
remaining_coins = coins[1:]
running_total = 0
solutions_count = 0
# Create solutions with increasing numbers of coins of the current_denomination
while running_total<= amount:
# reaming_amount needs to be achieved with remaining_coins
remaining_amount = amount - running_total
solutions_count += exact_change_recursive(remaining_amount,remaining_coins)
running_total += current_denomination
return solutions_count | f18cd10802ba8e384315d43814fcb1dcd6472d78 | 704,267 |
def knapsack(val,wt,W,n):
"""
Consider W=5,n=4
wt = [5, 3, 4, 2]
val = [60, 50, 70, 30]
So, for any value we'll consider between maximum of taking wt[i] and not taking it at all.
taking 0 to W in column 'line 1' taking wt in rows 'line 2'
two cases ->
* cur_wt<=total wt in that column otherwise matrix[cur_wt][max_wt]= matrix[cur_wt-1][max_wt] # not taking wt[cur_wt] at all
* take matrix[cur_wt][max_wt] = max(matrix[cur_wt-1][max_wt-wt[cur_wt]] , matrix[cur_wt-1][max_wt]) # max b/w not taking wt[cur_wt] & considering it
* ^> means taken just above value
| | 0 | 1 | 2 | 3 | 4 | 5 |
| 5 | 0 | 0 | 0 | 0 | 0 | 60 |
| 3 | ^>0 | ^>0 | ^>0 | max(0,0+50) | 50 | max(0+50,60)=60 |
| 4 | ^>0 | ^>0 | ^>0 | ^50 | max(50,0+70) | max(60,0+70) |
| 2 | ^>0 | ^>0 | max(0,0+30) | max(50,0+30) | max(70,0+30) | max(70,50+30) |
ans = max(70,50+30)=80
"""
matrix = [[0 for i in range(W+1)] for i in range(n)]
for i in range(W+1):
if wt[0]<=i:
matrix[0][i]=val[0]
else:
matrix[0][i]=0
for i in range(1,n): #line 2
for j in range(W+1): # line 1
if j==0:
matrix[i][j]=0
continue
if j>=wt[i]:
matrix[i][j] = max(matrix[i-1][j],matrix[i-1][j-wt[i]]+val[i])
else:
matrix[i][j]=matrix[i-1][j]
return matrix[n-1][W] | d030a57e8c7040cbd1f7a3556f21d599ac049428 | 704,268 |
def filter_graph(graph, n_epochs):
"""
Filters graph, so that no entry is too low to yield at least one sample during optimization.
:param graph: sparse matrix holding the high-dimensional similarities
:param n_epochs: int Number of optimization epochs
:return:
"""
graph = graph.copy()
graph.data[graph.data < graph.data.max() / float(n_epochs)] = 0
graph.eliminate_zeros()
return graph | 04af2804e208b8ce582440b2d0306fe651a583b0 | 704,269 |
def rev_find(revs, attr, val):
"""Search from a list of TestedRev"""
for i, rev in enumerate(revs):
if getattr(rev, attr) == val:
return i
raise ValueError("Unable to find '{}' value '{}'".format(attr, val)) | 6b9488023d38df208013f51ed3311a28dd77d9b8 | 704,270 |
def untempering(p):
"""
see https://occasionallycogent.com/inverting_the_mersenne_temper/index.html
>>> mt = MersenneTwister(0)
>>> mt.tempering(42)
168040107
>>> untempering(168040107)
42
"""
e = p ^ (p >> 18)
e ^= (e << 15) & 0xEFC6_0000
e ^= (e << 7) & 0x0000_1680
e ^= (e << 7) & 0x000C_4000
e ^= (e << 7) & 0x0D20_0000
e ^= (e << 7) & 0x9000_0000
e ^= (e >> 11) & 0xFFC0_0000
e ^= (e >> 11) & 0x003F_F800
e ^= (e >> 11) & 0x0000_07FF
return e | 4118b55fd24008f9e96a74db937f6b41375484c3 | 704,271 |
def multiplicar(a, b):
"""
MULTIPLICAR realiza la multiplicacion de dos numeros
Parameters
----------
a : float
Valor numerico `a`.
b : float
Segundo valor numerico `b`.
Returns
-------
float
Retorna la suma de `a` + `b`
"""
return a*b | 2d1a56924e02f05dcf20d3e070b17e4e602aecf6 | 704,272 |
def pascal_row(n):
"""returns the pascal triangle row of the given integer n"""
def triangle(n, lst):
if lst ==[]:
lst = [[1]]
if n == 1:
return lst
else:
oldRow = lst[-1]
def helpRows(lst1, lst2):
if lst1 == [] or lst2 == []:
return []
return [(lst1[0], lst2[0])] + helpRows(lst1[1:], lst2[1:])
def summation(lst):
if lst == []:
return []
return [sum(lst[0])] + summation(lst[1:])
newRow = [1] + summation(helpRows(oldRow, oldRow[1:])) + [1]
return triangle(n - 1, lst + [newRow])
return triangle(n + 1, [])[n] | 030fe3e574f4261c862a882e7fdeee836a1dffb7 | 704,273 |
def can_comment(request, entry):
"""Check if current user is allowed to comment on that entry."""
return entry.allow_comments and \
(entry.allow_anonymous_comments or
request.user.is_authenticated()) | 04bcd019af083cff0367e236e720f4f7b00f7a65 | 704,274 |
def single(mjd, hist=[], **kwargs):
"""cadence requirements for single-epoch
Request: single epoch
mjd: float or int should be ok
hist: list, list of previous MJDs
"""
# return len(hist) == 0
sn = kwargs.get("sn", 0)
return sn <= 1600 | 8734916221f0976d73386ac662f6551c25accfc3 | 704,276 |
def accuracies(diffs, FN, FP, TN, TP):
"""INPUT:
- np.array (diffs), label - fault probability
- int (FN, FP, TN, TP) foor keeping track of false positives, false negatives, true positives and true negatives"""
for value in diffs:
if value < 0:
if value < -0.5:
FP+=1
else:
TN +=1
else:
if value < 0.5:
TP+=1
else:
FN+=1
return FN, FP, TN, TP | 001cebd169589f9f1494d9833c1fc49d8ba9964b | 704,277 |
def data_scaling(Y):
"""Scaling of the data to have pourcent of baseline change columnwise
Parameters
----------
Y: array of shape(n_time_points, n_voxels)
the input data
Returns
-------
Y: array of shape(n_time_points, n_voxels),
the data after mean-scaling, de-meaning and multiplication by 100
mean: array of shape(n_voxels), the data mean
"""
mean = Y.mean(0)
Y = 100 * (Y / mean - 1)
return Y, mean | 94b550386b8411a96b9ccd3f5e93098560c327e1 | 704,278 |
import json
def load_json(path: str):
"""Load json file from given path and return data"""
with open(path) as f:
data = json.load(f)
return data | d165d087c78a0ba88d318a6dbe8b2ac8f9a8c4b5 | 704,279 |
def _get_int_val(val, parser):
"""Get a possibly `None` single element list as an `int` by using
the given parser on the element of the list.
"""
if val is None:
return 0
return parser.parse(val[0]) | d2e029657b3424027e83ee8e1e2be76e3abf8fda | 704,280 |
def gcd(a, b):
"""Compute greatest common divisor of a and b.
This function is used in some of the functions in PyComb module.
"""
r = a % b
while r != 0:
a = b
b = r
r = a % b
return b | 10f09e979b525dffe480ca870726459ad0420c0d | 704,281 |
def get_n_p(A_A, n_p_in='指定しない'):
"""付録 C 仮想居住人数
Args:
A_A(float): 床面積
n_p_in(str): 居住人数の入力(「1人」「2人」「3人」「4人以上」「指定しない」)
Returns:
float: 仮想居住人数
"""
if n_p_in is not None and n_p_in != '指定しない':
return {
'1人': 1.0,
'2人': 2.0,
'3人': 3.0,
'4人以上': 4.0
}[n_p_in]
if A_A < 30:
return 1.0
elif A_A < 120:
return A_A / 30
else:
return 4.0 | db257abdb76ee35f16b07e5baccec82211737971 | 704,282 |
def make_mmvt_boundary_definitions(cv, milestone):
"""
Take a Collective_variable object and a particular milestone and
return an OpenMM Force() object that the plugin can use to monitor
crossings.
Parameters
----------
cv : Collective_variable()
A Collective_variable object which contains all the information
for the collective variable describine this variable. In fact,
the boundaries are contours of the function described by cv.
This variable contains information like the groups of atoms
involved with the CV, and the expression which describes the
function.
milestone : Milestone()
A Milestone object which describes the boundary between two
Voronoi cells. This variable contains information like the
values of the variables which will be entered into the
Force() object.
Returns
-------
myforce : openmm.Force()
An OpenMM force object which does not affect atomic motion, but
allows us to conveniently monitor a function of atomic
position.
"""
myforce = cv.make_force_object()
myforce.setForceGroup(1)
variable_names_list = cv.add_parameters(myforce)
cv.add_groups_and_variables(myforce, cv.get_variable_values_list(
milestone))
return myforce | 45baaaa70ea24cb564c529cd885597415561a25d | 704,283 |
def is_leaf_module(module):
"""Utility function to determine if the given module is a leaf module - that is, does not have children modules
:return:
True if the module is a leaf, False otherwise
"""
module_list = list(module.modules())
return bool(len(module_list) == 1) | f34cbd4e961a467117a980ab0b7829f4a8245d2f | 704,284 |
def _is_fix_comment(line, isstrict):
""" Check if line is a comment line in fixed format Fortran source.
References
----------
:f2008:`3.3.3`
"""
if line:
if line[0] in '*cC!':
return True
if not isstrict:
i = line.find('!')
if i!=-1:
start = line[:i].lstrip()
if not start:
if i==5:
# line continuation
return False
return True
else:
# inline comment or ! is used in character context
# inline comments are handled elsewhere
pass
elif line=='':
return True
return False | 8ac7f74f2b4e57b9fb65183a46ed3dbfc0f7ef79 | 704,285 |
def parseConfigFile(configFilePath):
"""
:param configFilePath:
:return: a hash map of the parameters defined in the given file.
Each entry is organized as <parameter name, parameter value>
"""
# parse valid lines
lines = []
with open(configFilePath) as f:
for line in f:
line = line.strip()
if line == '': # ignore empty line
continue
if line.startswith('#'): # ignore the comment in config file
continue
lines.append(line)
params = {}
for line in lines:
if not line.__contains__("="):
raise Exception("Invalid parameter definition as \"" + line + "\" in file " + configFilePath)
paramName = line.split("=")[0].strip()
value = line.split("=")[1].strip()
params[paramName] = value
return params | aee6a1da052f4c2ef907bf41b2cfaa4b93612a5e | 704,286 |
def has_annotations(doc):
""" Check if document has any mutation mention saved. """
for part in doc.values():
if len(part['annotations']) > 0:
return True
return False | 6b57893bc35af45950ec2eeb5008b663028d48bf | 704,287 |
def DEFAULT_APPLICANT_SCRUBBER(raw):
"""Remove all personal data."""
return {k: v for k, v in raw.items() if k in ("id", "href", "created_at")} | 16fa853551cd03bcf1124639e23b0c72ec9db75d | 704,288 |
def partition(my_list: list, part: int) -> list:
""" Function which performs Partition """
begin = 0
end = len(my_list) - 1
while begin < end:
check_lower = my_list[begin] < part
check_higher = my_list[end] >= part
if not check_lower and not check_higher:
# Swap
my_list[begin], my_list[end] = my_list[end], my_list[begin]
else:
if check_lower:
begin += 1
if check_higher:
end -= 1
return my_list | 754a039eede5e143400b0fd58b622d57e083a671 | 704,289 |
def _sparse_elm_mul(spmat_csr, col):
"""
spmat (n, m)
col (n,)
"""
for i in range(spmat_csr.shape[0]):
i0, i1 = spmat_csr.indptr[i], spmat_csr.indptr[i+1]
if i1 == i0:
continue
spmat_csr.data[i0:i1] *= col[i]
return spmat_csr | 55c7ca7f848989eaa95a5917cfa40edf2d7e1372 | 704,290 |
import os
def make_file_path(file, args):
"""Create any directories and subdirectories needed to store data in the specified file,
based on inputs_dir and inputs_subdir arguments. Return a pathname to the file."""
# extract extra path information from args (if available)
# and build a path to the specified file.
path = os.path.join(args.get('inputs_dir', ''), args.get('inputs_subdir', ''))
if path != '' and not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, file)
return path | 58ac390734f60daf67adcd6e05b3bf721f4b2383 | 704,292 |
import hashlib
def calc_checksum(filename):
"""
Calculates a checksum of the contents of the given file.
:param filename:
:return:
"""
try:
f = open(filename, "rb")
contents = f.read()
m = hashlib.md5()
m.update(contents)
checksum = m.hexdigest()
return checksum
except IOError:
return None | 080e3686279ae126951cd1b66efdb9a0d2448011 | 704,295 |
def __filter_card_id(cards: list[str]):
"""Filters an list with card ids to remove
repeating ones and non-ids"""
ids = list()
for c in cards:
try:
int(c)
except ValueError:
continue
else:
if c not in ids:
ids.append(c)
return ids | 53f7cfa979ac8c7bc5357216eb903f5fe5abc02b | 704,296 |
import time
def get_elapsed_time(start_time) -> str:
""" Gets nicely formatted timespan from start_time to now """
end = time.time()
hours, rem = divmod(end-start_time, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds) | d75a1873254e1b1cc9ffc714e65d3a9ed95e5803 | 704,297 |
import time
def gmt_time():
"""
Return the time in the GMT timezone
@rtype: string
@return: Time in GMT timezone
"""
return time.strftime('%Y-%m-%d %H:%M:%S GMT', time.gmtime()) | 6009f0c3bc185bca9209827f2c89e39917c1e418 | 704,298 |
import os
def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
try:
relative = os.path.relpath(path_a, path_b)
except ValueError:
# Different mounts on Windows:
# ValueError: path is on mount 'c:', start on mount 'd:'
#
return False
return not relative.startswith(os.pardir + os.sep) | 935a5897ff447cc3c6e757d6528f795732bed56f | 704,300 |
def clean_string_columns(df):
"""Clean string columns in a dataframe."""
try:
df.email = df.email.str.lower()
df.website = df.website.str.lower()
except AttributeError:
pass
str_columns = ["name", "trade_name", "city", "county"]
for column in str_columns:
try:
df[column] = df[column].astype(str).str.title()
df[column] = df[column].astype(str).str.replace("Llc", "LLC")
df[column] = df[column].astype(str).str.replace("L.L.C.", "LLC")
df[column] = df[column].astype(str).str.strip()
except (AttributeError, KeyError):
pass
return df | eb9aaa474fe517b346eaa8cd93e669b3fcc3459d | 704,302 |
def ordinal(n: int) -> str:
"""
from: https://codegolf.stackexchange.com/questions/4707/outputting-ordinal-numbers-1st-2nd-3rd#answer-4712
"""
result = "%d%s" % (n, "tsnrhtdd"[((n / 10 % 10 != 1) * (n % 10 < 4) * n % 10)::4])
return result.replace('11st', '11th').replace('12nd', '12th').replace('13rd', '13th') | 97eecb539ae37c89ea3e4e76c5c913877fbf2365 | 704,304 |
def part_2():
"""Function which calculates the solution to part 2
Arguments
---------
Returns
-------
"""
return None | 7b454841494c9f717868eda727b6273fabbf8222 | 704,305 |
import glob
def expand_files(files):
"""Expands a wildcard to a list of paths for Windows compatibility"""
# Split at whitespace
files = files.split()
# Handle wildcard expansion
if len(files) == 1 and '*' in files[0]:
files = glob.glob(files[0])
# Convert to Path objects
return files | 46e2d6e7ee1609c144d04a2d429dc07ff1786cf1 | 704,306 |
def split_exon(exon, cds):
"""Takes an exon and a CDS, and returns a map of regions for each
feature (UTR5/3, CDS) that may be inferred from the arguments.
Note that the CDS is simply returned as is, to simplify
downstream handling of these features."""
results = [cds]
if exon["start"] < cds["start"]:
utr = dict(exon)
utr.update(
end=cds["start"] - 1, feature=(exon["strand"] == "+" and "UTR5" or "UTR3")
)
results.append(utr)
if exon["end"] > cds["end"]:
utr = dict(exon)
utr.update(
start=cds["end"] + 1, feature=(exon["strand"] == "+" and "UTR3" or "UTR5")
)
results.append(utr)
return results | e2bb12a688bbe3e5c79039c2a9cce4e5aa9e9a1b | 704,307 |
def state_dict_to_cpu(state_dict):
"""Make a copy of the state dict onto the cpu."""
# .state_dict() references tensors, so we detach and copy to cpu
return {key: par.detach().cpu() for key, par in state_dict.items()} | 2d1fcc07ab8eac192a846cbcdb8d7363ffd8e9e8 | 704,308 |
import unicodedata
def normalize_str(text):
"""
Normalizes unicode input text (for example remove national characters)
:param text: text to normalize
:type text: unicode
"""
# unicodedata NFKD doesn't convert properly polish ł
trans_dict = {
u'ł': u'l',
u'Ł': u'L'
}
trans = dict((ord(k), ord(v)) for k, v in trans_dict.items())
text = text.translate(trans)
return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore') | 40c8f77cdbf08b12a3867cd4a9d9bb91b323b50b | 704,309 |
def get_context_data(data) -> dict:
"""Look for 'context' item in 'queries' item."""
if "queries" in data:
if "context" in data["queries"]:
if isinstance(data["queries"], list):
return data["queries"][0]["context"]
else:
return data["queries"]["context"]
return dict() | 4e05d3d9041a8199f32b4201dcfc69d3adef1034 | 704,310 |
import copy
def merge(src: list, dst: list) -> list:
"""Merge `src` into `dst` and return a copy of `dst`."""
# Avoid side effects.
dst = copy.deepcopy(dst)
def find_dict(data: list, key: str) -> dict:
"""Find and return the dictionary in `data` that has the `key`."""
tmp = [_ for _ in data if isinstance(_, dict) and set(_.keys()) == {key}]
assert len(tmp) == 1
return tmp[0]
def _update(src, dst):
# Add all string keys from `src` to `dst` if they are not yet in `dst`.
str_keys = [_ for _ in src if isinstance(_, str) and _ not in dst]
dst.extend(str_keys)
# Find the all dicts and their one and only key.
dict_src = {tuple(_.keys())[0] for _ in src if isinstance(_, dict)}
dict_dst = {tuple(_.keys())[0] for _ in dst if isinstance(_, dict)}
# Recursively merge the dictionaries.
for key in dict_src:
if key not in dict_dst:
# `dst` does not have the dict at all - just copy it.
dst.append(find_dict(src, key))
else:
# `dst` already has a dict for `key` -> recursively update it.
src_val = find_dict(src, key)[key]
dst_val = find_dict(dst, key)[key]
_update(src_val, dst_val)
# Sort the list alphabetically (if the entry is a dict then use its one
# and only key as the comparative element).
dst.sort(key=lambda _: _ if isinstance(_, str) else tuple(_.keys())[0])
_update(src, dst)
return dst | 84976322fda7306d6bc15507750314b6b4fcad44 | 704,311 |
def grab_receivers(apk) :
"""
@param apk : an APK instance
@rtype : the android:name attribute of all receivers
"""
return apk.get_elements("receiver", "android:name") | 632d6903b63ca9d815a9f9d81ff19b8d6dc12a84 | 704,312 |
def itos(x):
"""Converts intergers to strings"""
if type(x) != int:
raise ValueError("Input value not an integer!")
return '{}'.format(x) | 96efe311cade41b37c4f671ed0b7e5e2a74f3d0b | 704,313 |
import json
import re
def translate_reference_entities(ref_entities, mappings=None):
"""Transform MaaS reference data for comparison with test deployment.
Positional arguments:
ref_entities -- the reference entity data
Keyword arguments:
mappings -- describe the relationship between the reference and the test
To compare the entities being tested with the reference data, the reference
data must be manipulated so that the hostnames used match those of the
entities being tested.
"""
if mappings is None:
return ref_entities
checks_by_host_type = {}
for mapping in mappings:
test_label, reference_labels = mapping.split(':')
reference_labels = reference_labels.split(',')
for label in reference_labels:
json_blob = json.dumps(ref_entities[label])
json_blob = re.sub(label, test_label, json_blob)
entity = json.loads(json_blob)
if test_label in checks_by_host_type:
checks = checks_by_host_type[test_label]['checks']
checks.update(entity['checks'])
else:
checks_by_host_type[test_label] = entity
return checks_by_host_type | 8e7a6144b5d51fb25908a70100e0d1e03b10b3d5 | 704,314 |
import os
def file_size_feed(filename):
"""file_size_feed(filename) -> function that returns given file's size"""
def sizefn(filename=filename,os=os):
try:
return os.stat(filename)[6]
except:
return 0
return sizefn | da6c5d15df0f3d99022f3d42c95bb33a82065e32 | 704,315 |
import logging
def getLogger(name='root') -> logging.Logger:
"""Method to get logger for tests.
Should be used to get correctly initialized logger. """
return logging.getLogger(name) | dde177b07f9d8528d216fbc4c719e5bff9c67939 | 704,316 |
def public_dict(obj):
"""Same as obj.__dict__, but without private fields."""
return {k: v for k, v in obj.__dict__.items() if not k.startswith('_')} | 2edee1a17d0dad6ab4268f80eb565406656a77b4 | 704,317 |
def wilight_to_opp_position(value):
"""Convert wilight position 1..255 to opp.format 0..100."""
return min(100, round((value * 100) / 255)) | 4f6e4298a77c29ff0375d0ce5e5fd23e77e30622 | 704,319 |
def get_task_link(task_id, task_df):
"""Get the link from the PYBOSSA task."""
try:
task = task_df.loc[int(task_id)]
except KeyError:
return None
return task['info']['link'] | d90e994d2f0a4718bbedf8fd5fd534f6d5d32549 | 704,320 |
from typing import List
from typing import Tuple
def partwise_function(function: str, parts: List[Tuple[str, str]], add_zero_otherwise: bool = True) -> str:
"""
Returns a string representing the definition a part-wise mathematical function.
**Parameters**
- `function`: str
The name of the function.
- `parts`: list
Each element is a tuple yields whose 1st element is the value of the function and whose second is a condition stating where the 1st applies.
- `add_zero_otherwise`: bool
If True, one last part stating "0, otherwise" is added.
**Returns**
`out`: str
TeX compatible string.
"""
res = f'{function}='
res += '\\begin{cases}\n'
for p in parts:
res += f'{p[0]},& {p[1]} \\\\'
if add_zero_otherwise:
res += r'0,& \text{otherwise}'
res += r'\end{cases}'
return res | b2954a9c947add4cf4b4740ac62f4ca16d3e1d70 | 704,321 |
def read_restrictions_file(file):
"""
<Purpose>
Reads in the contents of a restrictions file.
<Arguments>
file: name/path of the file to open
<Returns>
A list, where each element is a line in the file
"""
# Get the file object, read mode with universal newlines
fileo = open(file,"rU")
# Read in all the contents
contents = fileo.readlines()
# Close the file object
fileo.close()
return contents | df7207ea3bab49af47fcfbdaa9cc51f54692bb85 | 704,322 |
import re
def uniescape(text: str) -> str:
"""
Escapes all non-ASCII printable characters with JavaScript Unicode escapes.
"""
def escape(match):
character = match.group(0)
assert len(character) == 1
code_point = ord(character)
assert code_point <= 0xFFFF
return f"\\u{code_point:04X}"
return re.sub("[^\u0020-\u007F]", escape, text) | 0ce57acb1dc8dc88ad366844f323e6527eb655af | 704,323 |
from typing import Dict
from typing import List
from typing import Union
def aggregate_collate_fn(insts) -> Dict[str, List[List[Union[int, str, List[int]]]]]:
"""aggregate the instance to the max seq length in batch
Args:
insts: list of sample
Returns:
"""
subtree_spans, children_spans, snts = [], [], []
for inst in insts:
subtree_spans.append(inst['subtree_span'])
snts.append(inst['snt'])
children_spans.append(inst['children_span'])
assert len(subtree_spans) == len(snts) == len(children_spans)
return {'subtree_spans': subtree_spans, 'snts': snts, 'children_spans': children_spans} | 5b3cbb71876b9814a9664f0d99396308a218c3aa | 704,324 |
import re
def clean(s):
"""
remove symbols and lowercase
"""
regex = re.compile('\W+')
s = regex.sub(' ', s).strip()
return s.lower() | f0f71dacac0d792c10480f9eec605bc85bf58be0 | 704,325 |
def trueifset(xeval, typematch=False):
"""return True if @xeval is set, otherwise False"""
if typematch:
if not (xeval is False or xeval is None): return True
else: return False
else:
if xeval: return True
else: return False | ff9ab55f869edc0fc784d9a34f55fe46652f22b5 | 704,326 |
def get_file_content(file: str) -> str:
"""
Get file content.
"""
try:
with open(file, 'r') as f:
content = f.read()
return content
except IOError as e:
print(e)
print('Exiting...')
exit(1) | c10407d73ba2cd2d84eb99c0f131d3895ede460d | 704,327 |
def Any(x):
"""The Any type; can also be used to cast a value to type Any."""
return x | 88abecb27317e5bf16c5bd27c306ce800c7ac760 | 704,328 |
def resource_name_for_resource_type(resource_type, row):
"""Return the resource name for the resource type.
Each returned row contains all possible changed fields. This function
returns the resource name of the changed field based on the
resource type. The changed field's parent is also populated but is not used.
Args:
resource_type: the string equivalent of the resource type
row: a single row returned from the service
Returns:
The resource name of the field that changed.
"""
resource_name = '' # default for UNSPECIFIED or UNKNOWN
if resource_type == 'AD_GROUP':
resource_name = row.change_status.ad_group.value
elif resource_type == 'AD_GROUP_AD':
resource_name = row.change_status.ad_group_ad.value
elif resource_type == 'AD_GROUP_CRITERION':
resource_name = row.change_status.ad_group_criterion.value
elif resource_type == 'CAMPAIGN':
resource_name = row.change_status.campaign.value
elif resource_type == 'CAMPAIGN_CRITERION':
resource_name = row.change_status.campaign_criterion.value
return resource_name | 500bc32be1765f1e516f4f7cd386b24c3c4f373f | 704,329 |
import re
def to_yw7(text):
"""Convert html tags to yWriter 6/7 raw markup.
Return a yw6/7 markup string.
"""
# Clean up polluted HTML code.
text = re.sub('</*font.*?>', '', text)
text = re.sub('</*span.*?>', '', text)
text = re.sub('</*FONT.*?>', '', text)
text = re.sub('</*SPAN.*?>', '', text)
# Put everything in one line.
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
text = text.replace('\t', ' ')
while ' ' in text:
text = text.replace(' ', ' ').rstrip().lstrip()
# Replace HTML tags by yWriter markup.
text = text.replace('<i>', '[i]')
text = text.replace('<I>', '[i]')
text = text.replace('</i>', '[/i]')
text = text.replace('</I>', '[/i]')
text = text.replace('</em>', '[/i]')
text = text.replace('</EM>', '[/i]')
text = text.replace('<b>', '[b]')
text = text.replace('<B>', '[b]')
text = text.replace('</b>', '[/b]')
text = text.replace('</B>', '[/b]')
text = text.replace('</strong>', '[/b]')
text = text.replace('</STRONG>', '[/b]')
text = re.sub('<em.*?>', '[i]', text)
text = re.sub('<EM.*?>', '[i]', text)
text = re.sub('<strong.*?>', '[b]', text)
text = re.sub('<STRONG.*?>', '[b]', text)
# Remove orphaned tags.
text = text.replace('[/b][b]', '')
text = text.replace('[/i][i]', '')
text = text.replace('[/b][b]', '')
return text | 59b9b961f7a94d23e2829b9d940f63c32207600b | 704,330 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.