content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def write_hdfs_site(ctx):
"""
Add required entries to conf/hdfs-site.xml
"""
hdfs_site_file = "{tdir}/apache_hadoop/conf/hdfs-site.xml".format(
tdir=teuthology.get_testdir(ctx))
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, hdfs_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
''')
log.info("wrote file: " + hdfs_site_file + " to host: " + str(remote)) | 3,800 |
def rotate(
img: torch.Tensor,
boxes: np.ndarray,
angle: float,
) -> Tuple[torch.Tensor, np.ndarray]:
"""Rotate image around the center, interpolation=NEAREST, pad with 0 (black)
Args:
img: image to rotate
boxes: array of boxes to rotate as well
angle: angle in degrees. +: counter-clockwise, -: clockwise
Returns:
A tuple of rotated img (tensor), rotated boxes (np array)
"""
rotated_img = F.rotate(img, angle=angle, fill=0) # Interpolation NEAREST by default
_boxes = deepcopy(boxes)
if boxes.dtype == int:
# Compute relative boxes
_boxes = _boxes.astype(float)
_boxes[:, [0, 2]] = _boxes[:, [0, 2]] / img.shape[2]
_boxes[:, [1, 3]] = _boxes[:, [1, 3]] / img.shape[1]
# Compute rotated bboxes: xmin, ymin, xmax, ymax --> x, y, w, h, alpha
r_boxes = rotate_boxes(_boxes, angle=angle, min_angle=0)
if boxes.dtype == int:
# Back to absolute boxes
r_boxes[:, [0, 2]] *= img.shape[2]
r_boxes[:, [1, 3]] *= img.shape[1]
return rotated_img, r_boxes | 3,801 |
def population(state_data):
""" Sums state populations """
population = 1
sum_ = 0
num_states = len(state_data)
for state in range(0,num_states):
sum_ = sum_ + state_data[state][population]
print("The total population of this list of states is",sum_)
print("There are",num_states,"states in this list of states.") | 3,802 |
def _letterbox_image(img, w_in, h_in):
"""To get the image in boxed format."""
imc, imh, imw = img.shape
if (w_in / imw) < (h_in / imh):
new_w = w_in
new_h = imh * w_in // imw
else:
new_h = h_in
new_w = imw * h_in // imh
resized = _resize_image(img, new_w, new_h)
boxed = np.full((imc, h_in, w_in), 0.5, dtype=float)
_, resizedh, resizedw = resized.shape
boxed[:, int((h_in - new_h) / 2)
:int((h_in - new_h) / 2) + resizedh, int((w_in - new_w) / 2)
:int((w_in - new_w) / 2) + resizedw] = resized
return boxed | 3,803 |
def step_get_token(context, user_key):
"""Start to hit get-token api."""
use_user_key = parse_token_clause(user_key)
perform_get_token_call(context, use_user_key) | 3,804 |
def test_value():
"""Check if values are set okay
"""
piezo = jena.NV40(port)
for setpoint in [0,100,200,300]
piezo.set_position(setpoint)
achieved = piezo.get_position()
assert(np.isclose(achieved, setpoint,rtol=0,atol=1)) | 3,805 |
def assert_not_in(a, b, msg=None):
""" Assert that the first argument is not in second one. """
if a not in b:
return
assert False, str(a) + ' is in ' + str(b) + _get_msg(msg) | 3,806 |
def to_RRDB(**kwargs):
"""
Residual in Residual Dense Blocks
"""
kwargs["n_filer"] = (" ",) * len(kwargs["n_filer"]) # remove x label
return _Box(fill="{rgb:white,1;black,3}", **kwargs) | 3,807 |
def continuous_agg_dict_features(n, n_feats, ks):
"""Listdict-like continuous aggregated features.
Parameters
----------
n: int
the number of the elements to create their features.
n_feats: int
the number of features.
ks: int
the number of perturbations.
Returns
-------
features: list
the random features we want to compute.
"""
features = []
for k in range(ks):
features.append(continuous_dict_features(n, n_feats))
return features | 3,808 |
async def invite(ctx: commands.Context):
"""Gets a link to invite Lopez."""
perms = discord.Permissions.none()
perms.view_audit_log = True
perms.manage_roles = True
perms.manage_channels = True
perms.create_instant_invite = True
perms.send_messages = True
perms.manage_messages = True
perms.embed_links, perms.attach_files, perms.read_message_history = True, True, True
perms.external_emojis, perms.add_reactions = True, True
await ctx.send(
"Use this link to invite Lopez into your Discord server!\n"
+ discord.utils.oauth_url("436251140376494080", perms)
) | 3,809 |
def mk_table_of_point_for_point_performance(RFR_dict=None, df=None,
testset='Test set (strat. 20%)',
inc_ensemble=False,
var2use='RFR(Ensemble)',
target='Iodide'):
"""
Make a table to summarise point-for-point performance
Parameters
-------
target (str): Name of the target variable (e.g. iodide)
var2use (str): variable name to use for ensemble prediction
testset (str): Testset to use, e.g. stratified sampling over quartiles for 20%:80%
inc_ensemble (bool), include the ensemble (var2use) in the analysis
RFR_dict (dict): dictionary of core variables and data
df (pd.DataFrame): dataframe containing target and feature variables
Returns
-------
(None)
"""
# Get data objects as dictionary and extract dataframe if not provided.
if isinstance(RFR_dict, type(None)):
RFR_dict = build_or_get_models()
if isinstance(df, type(None)):
df = RFR_dict['df']
# Get stats on model tuns runs
stats = get_stats_on_models(RFR_dict=RFR_dict, df=df, analysis4coastal=True,
var2use=var2use,
inc_ensemble=inc_ensemble, verbose=False)
# Select param values of interest (and give updated title names )
rename_titles = {u'Chance2014_STTxx2_I': 'Chance et al. (2014)',
u'MacDonald2014_iodide': 'MacDonald et al. (2014)',
var2use : var2use,
'Iodide': 'Obs.',
}
# Set the stats to use
first_columns = [
'mean', 'std', '25%', '50%', '75%',
'RMSE ({})'.format(testset), 'RMSE (all)',
]
stats = stats[first_columns]
# Rename columns (50% to median and ... )
cols2rename = {
'50%': 'median', 'std': 'std. dev.',
'RMSE ({})'.format(testset): 'RMSE (withheld)'
}
stats.rename(columns=cols2rename, inplace=True)
# Only select params of interest
stats = stats.T[rename_titles.values()].T
# Rename
stats.rename(index=rename_titles, inplace=True)
# Set filename and save detail on models
csv_name = 'Oi_prj_point_for_point_comp4tabale.csv'
stats.round(1).to_csv(csv_name) | 3,810 |
def counts_matrix(x, quantiles):
"""Count samples in strata
Get eta, the number of samples in ``x`` binned by ``quantiles`` in each
variable, for continuous variables. The shape of eta is the same as the
shape of ``x``, and the shape of ``quantiles`` should be
(``numpy.shape(x)[0] + 1``, ``numpy.shape(x)[1]``) for 2D, or
(``numpy.size(x) + 1``,) for 1D
Parameters
----------
x : :class:`numpy.ndarray` (Nx,) or (Nx, Npredictors)
The sampled predictors, with observations as rows and predictors (if
more than 1) as columns
quantiles : :class:`numpy.ndarray` (Nx + 1,) or (Nx + 1, Npredictors)
The quantiles which mark the edge of strata. The 0th axis must be
one element longer than the 0th axis of ``x``
Returns
-------
eta : :class:`numpy.ndarray`[``int``] (Nx,) or (Nx, Npredictors)
The matrix of counts in strata, with the same shape as ``x``
"""
if np.ndim(quantiles) == 1:
eta = np.histogram(np.squeeze(x), bins=quantiles)[0].astype(int)
else:
eta = np.array([
np.histogram(xj, bins=qj)[0].astype(int) for xj, qj in zip(
np.asarray(x).T, np.asarray(quantiles).T)]).T
return eta | 3,811 |
def get_xml_string(stream_pointer):
""" This function checks for valid xml in a stream
and skips bytes until it hits something that looks like
xml. In general, this 'skipping' should never be used, as
we expect to see well-formed XML from the server.
stream_pointer: input stream
returns: string of xml
"""
# This function avoid stream_pointer.seek() for the vast majority
# of cases (when xml is formatted correctly) just because i don't
# like using 'seek' (never know when you're getting non-rewindable
# streams
c = stream_pointer.read(1)
count = 0
while c != '<' and c != '':
count = count + 1
c = stream_pointer.read(1)
if c == '':
stream_pointer.seek(0)
logging.error("Poorly formatted schema - no '<' found", \
extra={'xml':stream_pointer.read()})
return
xml_string = "<" + stream_pointer.read()
if count > 0:
stream_pointer.seek(0)
logging.error("Poorly formatted schema", \
extra={'xml':stream_pointer.read()})
return xml_string | 3,812 |
def score_estimator(estimator, df_test):
"""Score an estimator on the test set."""
y_pred = estimator.predict(df_test)
print(
"MSE: %.3f"
% mean_squared_error(
df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"]
)
)
print(
"MAE: %.3f"
% mean_absolute_error(
df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"]
)
)
# Ignore non-positive predictions, as they are invalid for
# the Poisson deviance.
mask = y_pred > 0
if (~mask).any():
n_masked, n_samples = (~mask).sum(), mask.shape[0]
print(
"WARNING: Estimator yields invalid, non-positive predictions "
f" for {n_masked} samples out of {n_samples}. These predictions "
"are ignored when computing the Poisson deviance."
)
print(
"mean Poisson deviance: %.3f"
% mean_poisson_deviance(
df_test["Frequency"][mask],
y_pred[mask],
sample_weight=df_test["Exposure"][mask],
)
) | 3,813 |
def parse_header(header):
"""Parse header div for pub. title, authors journal, year, and doi."""
# TITLE
title = header.find('h1').text.strip()
# JOURNAL
journal = header.find('button').text.strip()
# PUBLICATION YEAR
pub_date = header.find('span', attrs={'class': "cit"}).text
year = re.search(r"(\d{4}).*?[\.;]", pub_date).group(1)
# DOI
doi_cit = header.find(attrs={'class': "citation-doi"})
doi = doi_cit.text.strip().lstrip("doi: ").rstrip(".") if doi_cit else ""
# AUTHORS
authors = [parse_author(a) for a in header.find_all(
'span', attrs={'class': "authors-list-item"})]
authors = [a for a in authors if a]
return (title, journal, year, doi, authors) | 3,814 |
def parse_arguments():
"""Parse user args
There are three subparsers, one for each mode: full, visit, and moab.
Full mode runs both the visit and moab steps. Each parser should have a
full help message, simplified usage statement, and examples.
"""
mode_examples = """
To view full options for each mode, use 'generate_isogeom MODE -h'.
Example usage:
(1) Run all the steps start to finish (full mode) starting with meshfile
'cw_mesh', scalar data 'wwn', and defining 3 values for the level
information at runtime:
generate_isogeom full cw_mesh wwn -lv 0.1 5.2 12.3
(2) Run just the first step (visit mode), generating logarithmically spaced
levels between 0.1 and 1e+14 and specifying where to write the
generated database:
generate_isogeom visit cw_mesh wwn -gl log -lx 0.1 1e14 -db my_database
(3) Run only the second step (moab mode), using the levelfile and database
from the MOAB step, and specifying a file name for file produced:
generate_isogeom moab -lf my_database/levelfile -db my_database
-g geom1.h5m
"""
mode_description = """
Use this to generate a full isosurface geometry from a starting Cartesian mesh
file containing scalar data using VisIt and MOAB. This tool can be run in three
different modes:
full: run both steps starting from the Cartesian mesh file to produce
a full DAGMC-compliant isosurface geom. This step first runs the visit
step then the moab step.
visit: run only the first step using VisIt. This will generate a database
of individual mesh isosurfaces from the Cartesian mesh fileself.
moab: run only the second step using MOAB. This will generate a full DAGMC-
compliant isosurface geometry starting from the database generated from
the visit step.
"""
parser = argparse.ArgumentParser(description=mode_description,
usage='generate_isogeom MODE [OPTIONS]',
epilog=mode_examples,
formatter_class=formatter)
subparsers = parser.add_subparsers(title='Modes',
help='Select which steps to run for ' +
'generating the geometry.')
# set full mode options
full_description = """
Start-to-finish generation from a Cartesian mesh file to a DAGMC-compliant
geometry.
Levels information must be provided with either the -lf, -lv, or -gl option.
If using the -gl option (generate levels), then options -lx and -N must also be
provided.
"""
full_usage = \
'generate_isogeom full meshfile dataname [-lf/-lv/-gl] [OPTIONS]'
full_examples = """
Example Usage:
(1) Create an isosurface geometry called 'my_isogeom.h5m' with assigned
level values of 0.1 0.4 and 1.0, and tag the surfaces with data for
vizualization:
generate_isogeom full meshfile my_data -lv 0.1 0.4 1.0
-g my_isogeom.h5m --viz
(2) Generate a geometry with 5 levels lograthmically spaced from 1e-5 and
1e+3. Also tag the geometry two metadata tags called E1 and E2 with
values of 1.0 and 10.0, respectively:
generate_isogeom full meshfile my_data -gl log -lx 1e-5 1e+3 -N 5
-t E1 1.0 -t E2 10.0
(3) Store the generated database in a different folder called 'my_isogeom/'
and read level information from a file called 'levelfile' located in
the current directory:
generate_isogeom full meshfile my_data -lf levelfile -db my_isogeom/
"""
full_parser = subparsers.add_parser('full',
description=full_description,
usage=full_usage,
epilog=full_examples,
formatter_class=formatter)
set_visit_only_options(full_parser)
set_shared_options(full_parser)
set_moab_only_options(full_parser)
full_parser.set_defaults(which='full')
# set visit only mode options
visit_description = """
Only generate the isosurface mesh file database using VisIt.
Levels information must be provided with either the -lf, -lv, or -gl option.
If using the -gl option (generate levels), then options -lx and -N must also be
provided.
"""
visit_usage = \
'generate_isogeom visit meshfile dataname [-lf/-lv/-gl] [OPTIONS]'
visit_examples = """
Example Usage:
(1) Generate a database located at 'my_database/' with assigned
level values of 0.1 0.4 and 1.0:
generate_isogeom visit meshfile my_data -lv 0.1 0.4 1.0
-db my_isogeom/
(2) Generate a database in the default location using levels between 1.0
2e+4 that are spaced with a ratio of 20:
generate_isogeom visit meshfile my_data -gl ratio -lx 1.0 2.e4 -N 20
(3) Generate a database in the default location using 15 levels between 1.0
2e+4 that are spaced logarithmically:
generate_isogeom visit meshfile my_data -gl log -lx 1.0 2.e4 -N 15
(4) Generate a database in a folder called 'my_isogeom/' and read the level
information from a file in the current directory called 'levelfile':
generate_isogeom visit meshfile my_data -lf levelfile -db my_isogeom/
"""
visit_parser = subparsers.add_parser('visit',
description=visit_description,
usage=visit_usage,
epilog=visit_examples,
formatter_class=formatter)
set_visit_only_options(visit_parser)
set_shared_options(visit_parser)
visit_parser.set_defaults(which='visit')
# set moab only mode options
moab_description = """
Only generate the DAGMC-compliant geometry with MOAB starting from the VisIt
mesh file database.
Levels information must be provided with either the -lf or -lv option.
"""
moab_usage = 'generate_isogeom moab dataname [-lf/-lv] [OPTIONS]'
moab_examples = """
Example Usage:
(1) Create an isosurface geometry called 'my_isogeom.h5m' with assigned
level values of 0.1 0.4 and 1.0, and tag the surfaces with data for
vizualization (assume default database location):
generate_isogeom moab -lv 0.1 0.4 1.0 -g my_isogeom.h5m --viz
(2) Generate a geometry from a database located in 'my_isogeom/', read the
level info from a file called 'levelinfo', mutliply all data by a
factor of 2e4, and save the file as 'my_isogeom.vtk' in a new folder
called 'output_folder/':
generate_isogeom moab -db my_isogeom/ -lf levelinfo -n 2e4
-g my_isogeom.vtk -sp output_folder/
(3) Generate a geometry from a database in the default location, read
levels from a file called 'levelfile' located in the database, tag the
geometry two metadata tags called E1 and E2 with values of 1.0 and
10.0, respectively, and tag the geometry with the level information for
vizualization:
generate_isogeom moab -lf tmp/levelfile -t E1 1.0 -t E2 10.0 -v
"""
moab_parser = subparsers.add_parser('moab',
description=moab_description,
usage=moab_usage,
epilog=moab_examples,
formatter_class=formatter)
set_shared_options(moab_parser, moab=True)
set_moab_only_options(moab_parser)
moab_parser.set_defaults(which='moab')
args = parser.parse_args()
return args | 3,815 |
def put_this_into_the_db(query, param):
"""put this value into the database
see : find_by_exactly_this_query()
Arguments:
query {[type]} -- [description]
param {[type]} -- [description]
Returns:
bool -- [description]
"""
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='(drElizabeth)',
db='communications',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# Create a new record
sql = query
cursor.execute(sql, param)
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
except Exception as e:
print(e)
connection.close()
return False
connection.close()
return True | 3,816 |
def _gen_new_aux_page(label: str, is_title: bool) -> str:
"""Generate latex for auxillary pages"""
page = []
if is_title:
page.append("\\thispagestyle{empty}")
page.append("\\begin{center}")
page.append("\t\\vfil")
page.append("\t\\vspace*{0.4\\textheight}\n")
page.append("\t\\Huge")
page.append(f"\t\\bf{{{label}}}\n")
page.append("\t\\normalsize")
page.append("\\end{center}")
return "\n".join(page) | 3,817 |
def expand_matrix_col(matrix, max_size, actual_size):
"""
add columns of zeros to the right of the matrix
"""
return np.append(matrix,
np.zeros((matrix.shape[0], max_size - actual_size), dtype=matrix.dtype), axis=1) | 3,818 |
def vms_list(access_token, config_id):
"""List FlexVM Virtual Machines"""
logging.info("--> List FlexVM Virtual Machines...")
uri = FLEXVM_API_BASE_URI + "vms/list"
headers = COMMON_HEADERS.copy()
headers["Authorization"] = f"Bearer {access_token}"
body = {"configId": config_id}
results = requests_post(uri, body, headers)
return results | 3,819 |
def add(name, value):
"""
Add a value to an ipset.
"""
futils.check_call(["ipset", "add", name, value, "-exist"]) | 3,820 |
def formulate_hvdc_flow(problem: LpProblem, angles, Pinj, rates, active, Pt, control_mode, dispatchable, r, F, T,
logger: Logger = Logger(), inf=999999):
"""
:param problem:
:param nc:
:param angles:
:param Pinj:
:param t:
:param logger:
:param inf:
:return:
"""
nhvdc, nt = rates.shape
flow_f = np.zeros((nhvdc, nt), dtype=object)
overload1 = np.zeros((nhvdc, nt), dtype=object)
overload2 = np.zeros((nhvdc, nt), dtype=object)
hvdc_control1 = np.zeros((nhvdc, nt), dtype=object)
hvdc_control2 = np.zeros((nhvdc, nt), dtype=object)
for t, i in product(range(nt), range(nhvdc)):
if active[i, t]:
_f = F[i]
_t = T[i]
hvdc_control1[i, t] = LpVariable('hvdc_control1_{0}_{1}'.format(i, t), 0, inf)
hvdc_control2[i, t] = LpVariable('hvdc_control2_{0}_{1}'.format(i, t), 0, inf)
P0 = Pt[i, t]
if control_mode[i] == HvdcControlType.type_0_free:
if rates[i, t] <= 0:
logger.add_error('Rate = 0', 'HVDC:{0} t:{1}'.format(i, t), rates[i, t])
# formulate the hvdc flow as an AC line equivalent
bk = 1.0 / r[i] # TODO: yes, I know... DC...
flow_f[i, t] = P0 + bk * (angles[_f, t] - angles[_t, t]) + hvdc_control1[i, t] - hvdc_control2[i, t]
# add the injections matching the flow
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
# rating restriction in the sense from-to: eq.17
overload1[i, t] = LpVariable('overload_hvdc1_{0}_{1}'.format(i, t), 0, inf)
problem.add(flow_f[i, t] <= (rates[i, t] + overload1[i, t]), "hvdc_ft_rating_{0}_{1}".format(i, t))
# rating restriction in the sense to-from: eq.18
overload2[i, t] = LpVariable('overload_hvdc2_{0}_{1}'.format(i, t), 0, inf)
problem.add((-rates[i, t] - overload2[i, t]) <= flow_f[i, t], "hvdc_tf_rating_{0}_{1}".format(i, t))
elif control_mode[i] == HvdcControlType.type_1_Pset and not dispatchable[i]:
# simple injections model: The power is set by the user
flow_f[i, t] = P0 + hvdc_control1[i, t] - hvdc_control2[i, t]
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
elif control_mode[i] == HvdcControlType.type_1_Pset and dispatchable[i]:
# simple injections model, the power is a variable and it is optimized
P0 = LpVariable('hvdc_pf_{0}_{1}'.format(i, t), -rates[i, t], rates[i, t])
flow_f[i, t] = P0 + hvdc_control1[i, t] - hvdc_control2[i, t]
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
return flow_f, overload1, overload2, hvdc_control1, hvdc_control2 | 3,821 |
def method_mock(cls, method_name, request):
"""
Return a mock for method *method_name* on *cls* where the patch is
reversed after pytest uses it.
"""
_patch = patch.object(cls, method_name)
request.addfinalizer(_patch.stop)
return _patch.start() | 3,822 |
def get_eps_float32():
"""Return the epsilon value for a 32 bit float.
Returns
-------
_ : np.float32
Epsilon value.
"""
return np.finfo(np.float32).eps | 3,823 |
def distributions_to_params(nest):
"""Convert distributions to its parameters, keep Tensors unchanged.
Only returns parameters that have tf.Tensor values.
Args:
nest (nested Distribution and Tensor): Each Distribution will be
converted to dictionary of its Tensor parameters.
Returns:
A nest of Tensor/Distribution parameters. Each leaf is a Tensor or a
dict corresponding to one distribution, with keys as parameter name and
values as tensors containing parameter values.
"""
def _to_params(dist_or_tensor):
if isinstance(dist_or_tensor, tfp.distributions.Distribution):
params = dist_or_tensor.parameters
return {
k: params[k]
for k in params if isinstance(params[k], tf.Tensor)
}
elif isinstance(dist_or_tensor, tf.Tensor):
return dist_or_tensor
else:
raise ValueError(
"Only Tensor or Distribution is allowed in nest, ",
"got %s. nest is %s" % (dist_or_tensor, nest))
return tf.nest.map_structure(_to_params, nest) | 3,824 |
def _return_xarray_system_ids(xarrs: dict):
"""
Return the system ids for the given xarray object
Parameters
----------
xarrs
Dataset or DataArray that we want the sectors from
Returns
-------
list
system identifiers as string within a list
"""
return list(xarrs.keys()) | 3,825 |
def sub(a, b):
"""Subtracts b from a and stores the result in a."""
return "{b} {a} ?+1\n".format(a=a, b=b) | 3,826 |
def metrics_cluster(models = None, ytrain = None, ytest = None,
testlabels = None,
trainlabels = None,
Xtrain = None, Xtest = None):
"""
Calculates Metrics such as accuracy, balanced accuracy,
specificity, sensitivity, precision, True Positives,
True Negatives etc.
These metrics are calculated for each cluster:
models: predictive models trained in each cluster
ytrain: Target labels of training set
ytest: target labels of test set
testlabels: a matrix with numbers from 0 to c-1 number of clusters
indicating in which cluster each data point belongs
in the test set
trainlabels: the same as testlabels but for training data
Xtrain: trainiing data
Xtest: testing data
"""
# matrix with metrics for each cluster
metricsTrain = []
#metrics for test data in each cluster
metricsTest = []
columns = ['cluster', 'size', 'high_cost%','low_cost%',
'TP', 'TN', 'FP', 'FN',
'FPR', 'specificity', 'sensitivity', 'precision',
'accuracy', 'balanced accuracy', 'f1', 'auc']
#Calculate the Metrics for Each Cluster
for cluster in np.arange( len( models ) ):
#INDEXES OF CLUSTER "cluster"
inC = np.where( trainlabels == cluster )[0]
inCT = np.where( testlabels == cluster )[0]
#predict probabilities of data in cluster "cluster"
#to be 1
probTrain = models[cluster].predict_proba(Xtrain[inC])[:, 1]
probTest = models[cluster].predict_proba(Xtest[inCT])[:, 1]
#calculate optimal tau based on F1
try:
tau = optimalTau(probTrain, ytrain[inC])
except:
tau = 0.5
print(" Warning tau setted to 0.5 due to error(s) \
in <<optimalTau>> function" )
#CALCULATE METRICS : ACCURACY, RECALL, PRECISION ,
#BALANCED ACCURACY ETC
metTrain , _= calc_metrics( custom_prob = probTrain,
y = ytrain[inC],
cluster = cluster,
tau = tau )
metTest, _ = calc_metrics( custom_prob = probTest,
y = ytest[inCT],
cluster = cluster,
tau = tau)
metricsTrain.append( metTrain )
metricsTest.append( metTest )
#Create a dataframe with metrics for better Visualization
metricsTrain = pd.DataFrame ( metricsTrain, columns = columns )
metricsTest = pd.DataFrame( metricsTest, columns = columns )
return metricsTrain, metricsTest | 3,827 |
def prune_cloud_borders (numpy_cloud, clearance=1.2 ):
"""Delete points at the clouds' borders in range of distance, restricting the x-y plane (ground)"""
# get min/max of cloud
cloud_max_x = np.max (numpy_cloud[:, 0])
cloud_min_x = np.min (numpy_cloud[:, 0])
cloud_max_y = np.max (numpy_cloud[:, 1])
cloud_min_y = np.min (numpy_cloud[:, 1])
# define 4 borders
borders = [cloud_max_x - clearance, cloud_min_x + clearance,
cloud_max_y - clearance, cloud_min_y + clearance]
# index all points within borders
numpy_cloud = numpy_cloud[numpy_cloud[:, 0] < borders[0]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 0] > borders[1]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 1] < borders[2]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 1] > borders[3]]
return numpy_cloud | 3,828 |
def factory(name, Base, Deriveds):
"""Find the base or derived class by registered name.
Parameters
----------
Base: class
Start the lookup here.
Deriveds: iterable of (name, class)
A list of derived classes with their names.
Returns
-------
class
"""
Derived = Base
for (nm, NmCl) in Deriveds:
if nm == name:
Derived = NmCl
break
return Derived | 3,829 |
def test_nsxt_ip_blocks_state_module(nsxt_config, salt_call_cli):
"""
Tests NSX-T IP Blocks State module to verify the present and absent state
in NSX-T Manager
"""
hostname, username, password = _get_server_info(nsxt_config)
display_name = "IP_Block_Salt_State_FT"
description = "Created from IP Block Salt State FT"
# Test present to create IP Address Block
cidr = "192.168.0.1/24"
changes, comment = _execute_present_state(
hostname, username, password, salt_call_cli, display_name, description, cidr
)
assert dict(changes)["old"] is None
assert dict(changes)["new"]["display_name"] == display_name
assert dict(changes)["new"]["description"] == description
assert comment == "Created IP Block {}".format(display_name)
# Test present to update with identical fields
changes, comment = _execute_present_state(
hostname, username, password, salt_call_cli, display_name, description, cidr
)
assert not changes
assert comment == "IP Address Block exists already, no action to perform"
# Test present to update with updated description
updated_description = "Updated from IP Block Salt State FT"
updated_cidr = "192.168.0.2/24"
changes, comment = _execute_present_state(
hostname, username, password, salt_call_cli, display_name, updated_description, updated_cidr
)
assert dict(changes)["old"]["description"] == description
assert dict(changes)["old"]["cidr"] == cidr
assert dict(changes)["new"]["description"] == updated_description
assert dict(changes)["new"]["cidr"] == updated_cidr
assert comment == "Updated IP Block {}".format(display_name)
# Test absent to delete IP Address Block
changes, comment = _execute_absent_state(
hostname, username, password, salt_call_cli, display_name
)
assert dict(changes)["new"] is None
assert dict(changes)["old"]["display_name"] == display_name
assert dict(changes)["old"]["description"] == updated_description
assert comment == "Deleted IP Block {}".format(display_name)
# Test absent to delete non existing IP Address Block
changes, comment = _execute_absent_state(
hostname, username, password, salt_call_cli, display_name
)
assert not changes
assert comment == "No IP Address Block found with name {}".format(display_name) | 3,830 |
def ex12():
""" Collect principal, rate, and term from the user
Print the principal plus interest
"""
while True:
try:
principal = decimal.Decimal(input('Enter the principal: '))
break
except decimal.InvalidOperation:
print('Enter a valid principal')
while True:
try:
rate = decimal.Decimal(input('Enter the rate of interest: '))
break
except decimal.InvalidOperation:
print('Enter a valid rate of interest')
while True:
try:
term = int(input('Enter the number of years: '))
term = decimal.Decimal(term)
break
except ValueError:
print('Enter a valid number of years')
amount = principal * (1 + ((rate / 100) * term))
print('After {} years at {}% your investment will'.format(term, rate))
print('be worth ${0:.2f}'.format(amount)) | 3,831 |
def root_sum_square(values, ax_val, index, Nper, is_aper, is_phys, unit):
"""Returns the root sum square (arithmetic or integral) of values along given axis
Parameters
----------
values: ndarray
array to derivate
ax_val: ndarray
axis values
index: int
index of axis along which to derivate
Nper: int
number of periods to replicate
is_aper: bool
True if values is anti-periodic along axis
is_phys: bool
True if physical quantity (time/angle/z)
Returns
-------
values: ndarray
root sum square of values
"""
# To sum dB or dBA
if "dB" in unit:
return my_sum(values, index, Nper, is_aper, unit)
else:
if is_aper and Nper is not None:
# Remove anti-periodicity since values is squared
is_aper = False
if ax_val.size == 1: # Do not use integrate for single point axes
is_phys = False
if is_phys:
values = integrate(values ** 2, ax_val, index, Nper, is_aper, is_phys)
else:
values = my_sum(values ** 2, index, Nper, is_aper, unit)
return np.sqrt(values) | 3,832 |
def comment_on_tweet():
""""
http://127.0.0.1:5000/user/comment_on_tweet
body = {
"id": "5da61dbed78b3b2b10a53582",
"comments" : {
"commenter" : "[email protected]",
"comment" : "comments against tweet : 7"
}
}
"""
data = request.get_json()
tweet_id = data['id']
record = tweetDB.find({'_id': ObjectId(tweet_id)})
if record is None:
return json.dumps({'error': "No collaborations to update matched id"})
else:
try:
if 'comments' in data and isinstance(data['comments'], object):
result = tweetDB.update(
{"_id": ObjectId(tweet_id)},
{
'$push': {
"comments": data['comments']
}
}
)
return json.dumps({"success": True})
except Exception as e:
return json.dumps({"error": "Exception found"}) | 3,833 |
def generate_git_api_header(event, sig):
"""
Create header for GitHub API Request, based on header information from https://developer.github.com/webhooks/.
:param event: Name of the event type that triggered the delivery.
:param sig: The HMAC hex digest of the response body. The HMAC hex digest is generated
using the sha1 hash function and the secret as the HMAC key.
"""
return Headers([
('X-GitHub-Event', event),
('X-GitHub-Delivery', "72d3162e-cc78-11e3-81ab-4c9367dc0958"),
('X-Hub-Signature', f"sha1={sig}"),
('User-Agent', "GitHub-Hookshot/044aadd"),
('Content-Type', "application/json"),
('Content-Length', 6615)
]) | 3,834 |
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0)) | 3,835 |
def lnglat_to_tile(lon, lat, zoom):
"""Get the tile which contains longitude and latitude.
:param lon: longitude
:param lat: latitude
:param zoom: zoom level
:return: tile tuple
"""
lon, lat = truncate(lon, lat)
n = 1 << zoom
tx = int((lon + 180.0) / 360.0 * n)
ty = int((1.0 - math.asinh(math.tan(math.radians(lat))) / math.pi) / 2.0 * n)
return Tile(tx, ty, zoom) | 3,836 |
def group_obs_annotation(
adata: AnnData,
gdata: AnnData,
*,
groups: Union[str, ut.Vector],
name: str,
formatter: Optional[Callable[[Any], Any]] = None,
method: str = "majority",
min_value_fraction: float = 0.5,
conflict: Optional[Any] = None,
inplace: bool = True,
) -> Optional[ut.PandasSeries]:
"""
Transfer per-observation data from the per-observation (cell) ``adata`` to the
per-group-of-observations (metacells) ``gdata``.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes, and the
``gdata`` containing the per-metacells summed data.
**Returns**
Observations (Cell) Annotations
``<name>``
The per-group-observation annotation computed based on the per-observation annotation.
If ``inplace`` (default: {inplace}), this is written to the ``gdata``, and the function returns
``None``. Otherwise this is returned as a pandas series (indexed by the group observation
names).
**Computation Parameters**
1. Iterate on all the observations (groups, metacells) in ``gdata``.
2. Consider all the cells whose ``groups`` annotation maps them into this group.
3. Consider all the ``name`` annotation values of these cells.
4. Compute an annotation value for the whole group of cells using the ``method``. Supported
methods are:
``unique``
All the values of all the cells in the group are expected to be the same, use this
unique value for the whole groups.
``majority``
Use the most common value across all cells in the group as the value for the whole
group. If this value doesn't have at least ``min_value_fraction`` (default:
{min_value_fraction}) of the cells, use the ``conflict`` (default: {conflict}) value
instead.
"""
group_of_cells = ut.get_o_numpy(adata, groups, formatter=ut.groups_description)
values_of_cells = ut.get_o_numpy(adata, name, formatter=formatter)
value_of_groups = np.empty(gdata.n_obs, dtype=values_of_cells.dtype)
assert method in ("unique", "majority")
if method == "unique":
with ut.timed_step(".unique"):
value_of_groups[group_of_cells] = values_of_cells
else:
assert method == "majority"
with ut.timed_step(".majority"):
for group_index in range(gdata.n_obs):
cells_mask = group_of_cells == group_index
cells_count = np.sum(cells_mask)
assert cells_count > 0
values_of_cells_of_group = values_of_cells[cells_mask]
unique_values_of_group, unique_counts_of_group = np.unique(values_of_cells_of_group, return_counts=True)
majority_index = np.argmax(unique_counts_of_group)
majority_count = unique_counts_of_group[majority_index]
if majority_count / cells_count < min_value_fraction:
value_of_groups[group_index] = conflict
else:
majority_value = unique_values_of_group[majority_index]
value_of_groups[group_index] = majority_value
if inplace:
ut.set_o_data(gdata, name, value_of_groups)
return None
return ut.to_pandas_series(value_of_groups, index=gdata.obs_names) | 3,837 |
def test_python_module_ctia_positive_attack_pattern(
module_headers, module_tool_client):
"""Perform testing for attack pattern entity of custom threat intelligence
python module
ID: CCTRI-160-86d8f8ef-fbf4-4bf4-88c2-a57f4fe6b866
Steps:
1. Send POST request to create new attack pattern entity using custom
python module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Update attack pattern entity using custom python module
6. Repeat GET request using python module and validate that entity was
updated
7. Delete entity from the system
Expectedresults: Attack pattern entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
attack_pattern = module_tool_client.private_intel.attack_pattern
payload = {
'description': (
'A bootkit is a malware variant that modifies the boot sectors of'
' a hard drive'
),
'name': 'Bootkit',
'schema_version': SERVER_VERSION,
'type': 'attack-pattern'
}
# Create new entity using provided payload
post_tool_response = attack_pattern.post(payload=payload,
params={'wait_for': 'true'})
values = {
key: post_tool_response[key] for key in [
'description',
'name',
'schema_version',
'type'
]
}
assert values == payload
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = attack_pattern.get(entity_id)
get_direct_response = ctia_get_data(
target_url=ATTACK_PATTERN,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Update entity values
put_tool_response = delayed_return(
attack_pattern.put(
id_=entity_id,
payload={
'name': 'Worm',
'description': (
'A standalone malware that replicates itself in order to'
' spread to other computers'
)
}
)
)
assert put_tool_response['name'] == 'Worm'
get_tool_response = attack_pattern.get(entity_id)
assert get_tool_response['name'] == 'Worm'
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(attack_pattern.delete(entity_id))
with pytest.raises(HTTPError):
attack_pattern.get(entity_id) | 3,838 |
def load_pipeline(path, tunables=True, defaults=True):
"""Load a d3m json or yaml pipeline."""
if not os.path.exists(path):
base_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join('templates', path)
path = os.path.join(base_path, path)
if not os.path.isfile(path):
raise ValueError('Could not find pipeline: {}'.format(path))
LOGGER.warn('Loading pipeline from %s', path)
with open(path) as pipeline:
if path.endswith('yml'):
data = yaml.safe_load(pipeline)
else:
data = json.load(pipeline)
pipeline = Pipeline.from_json_structure(data)
if tunables:
# extract tunable hyperparameters
tunable_hyperparameters = extract_tunable_hyperparams(pipeline)
return pipeline, tunable_hyperparameters
return pipeline | 3,839 |
def generate_languages(request):
"""
Returns the languages list.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = GenerateLanguagesRequest(data=request.data)
if request_serializer.is_valid():
get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
list_of_languages = Language.objects.all()
return Response({'detail': 'successful',
'data': [language.language for language in list_of_languages]},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer) | 3,840 |
def all_stocks():
"""
#查询当前所有正常上市交易的股票列表
:return:
"""
data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
return data["symbol"].values | 3,841 |
def route_transfer(host,route):
""" Save a certain route as static HTML file to production """
path = root # default path
text = urllib2.urlopen(host+route).read() # grab html codes from route
# format html code and fix css/js/anchor for static file
soup = BeautifulSoup(text).prettify()
anchors = re.compile(r'<a href="/[a-zA-Z0-9/]*"') #/ at 9
for anchor in anchors.findall(soup):
if anchor[10:-1] not in routes:
continue
if anchor[10:-1] == '':
soup = soup.replace(anchor,
(anchor[:9]+anchor[10:-1]+'index.html"'))
else:
soup = soup.replace(anchor,
(anchor[:9]+anchor[10:-1]+'.html"'))
# for '/' route, save as 'index.html'
filename = path+'index.html' if not route else path+route+'.html'
try:
html = open(filename,'w')
except: # create directory if doesn't exist
newpath = filename.split('/')
os.makedirs('/'.join(newpath[:-1]))
html = open(filename,'w')
html.write(unicode(soup).encode('utf-8')) # appropriate encode for saving
html.close() | 3,842 |
def getBotHash(userID, isCompile=False):
"""Gets the checksum of a user's bot's zipped source code"""
params = {"apiKey": API_KEY, "userID": userID}
if isCompile:
params["compile"] = 1
result = requests.get(MANAGER_URL+"botHash", params=params)
print("Getting bot hash:")
print(result.text)
return json.loads(result.text).get("hash") | 3,843 |
def next_remote_buffer_uuid(number=1):
"""Return the next uuid of a remote buffer."""
global remote_buffer_counter
if number == 1:
ret = remote_buffer_counter
else:
ret = np.arange(remote_buffer_counter, remote_buffer_counter + number)
remote_buffer_counter = (remote_buffer_counter + number) % (1 << 60)
return ret | 3,844 |
def ball_collide(i):
"""
This function will handle the ball collide interaction between brick and paddle
:param i: (int) The index of the ball to interact
:return: (Bool) If this ball collide with brick or paddle
"""
global score
collide = False
for j in range(2):
for k in range(2):
object_get = graphics.window.get_object_at(graphics.ball[i].x + graphics.ball[i].width * j,
graphics.ball[i].y + graphics.ball[i].height * k)
if object_get in graphics.brick:
# brick lose life when being hit by ball
index = graphics.brick.index(object_get)
graphics.brick_collide(index)
score += 1
collide = True
elif object_get is graphics.paddle:
collide = True
return collide | 3,845 |
def gen_sparse_graph(destination_folder: Path,
vertices_number: int,
edge_probability: float) -> Path:
"""
Generates sparse graph
:param destination_folder: directory to save the graph
:type destination_folder: Path
:param vertices_number: number of vertices in the graph
:type vertices_number: int
:param edge_probability: probability of edge existence in the graph
:type edge_probability: float
:return: path to generated graph
:rtype: Path
"""
tmp_graph = nx.generators.fast_gnp_random_graph(vertices_number, edge_probability)
output_graph = rdflib.Graph()
edges = list()
for v, to in tmp_graph.edges():
edges.append((v, 'A', to))
edges.append((v, 'AR', to))
for subj, pred, obj in tqdm(
edges,
desc=f'G{vertices_number}-{edge_probability} generation'
):
add_rdf_edge(subj, pred, obj, output_graph)
target = destination_folder / f'G{vertices_number}-{edge_probability}.xml'
write_to_rdf(target, output_graph)
return target | 3,846 |
def multivariate_hierarchical_barycentric_lagrange_interpolation(
x,
abscissa_1d,
barycentric_weights_1d,
fn_vals,
active_dims,
active_abscissa_indices_1d):
"""
Parameters
----------
x : np.ndarray (num_vars, num_samples)
The samples at which to evaluate the interpolant
abscissa_1d : [np.ndarray]
List of interpolation nodes in each active dimension. Each array
has ndim==1
barycentric_weights_1d : [np.ndarray]
List of barycentric weights in each active dimension, corresponding to
each of the interpolation nodes. Each array has ndim==1
fn_vals : np.ndarray (num_samples, num_qoi)
The function values at each of the interpolation nodes
Each column is a flattened array that assumes the nodes
were created with the same ordering as generated by
the function cartesian_product.
if active_abscissa_1d is not None the fn_vals must be same size as
the tensor product of the active_abscissa_1d.
Warning: Python code takes fn_vals as num_samples x num_qoi
but c++ code takes num_qoi x num_samples. Todo change c++ code
also look at c++ code to compute barycentric weights. min() on line 154
seems to have no effect.
active_dims : np.ndarray (num_active_dims)
The dimensions which have more than one interpolation node. TODO
check if this can be simply extracted in this function by looking
at abscissa_1d.
active_abscissa_indices_1d : [np.ndarray]
The list (over each dimension) of indices for which we will compute
barycentric basis functions. This is useful when used with
heirarchical interpolation where the function values will be zero
at some nodes and thus there is no need to compute associated basis
functions
Returns
-------
result : np.ndarray (num_samples,num_qoi)
The values of the interpolant at the samples x
"""
num_act_dims = active_dims.shape[0]
num_abscissa_1d, num_active_abscissa_1d, shifts, abscissa_and_weights, \
active_abscissa_indices_1d = \
barycentric_lagrange_interpolation_precompute(
num_act_dims, abscissa_1d, barycentric_weights_1d,
active_abscissa_indices_1d)
try:
from pyapprox.cython.barycentric_interpolation import \
multivariate_hierarchical_barycentric_lagrange_interpolation_pyx
result = \
multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
x, fn_vals, active_dims,
active_abscissa_indices_1d.astype(np.int_),
num_abscissa_1d.astype(np.int_),
num_active_abscissa_1d.astype(np.int_),
shifts.astype(np.int_), abscissa_and_weights)
if np.any(np.isnan(result)):
raise ValueError('Error values not finite')
except (ImportError, ModuleNotFoundError) as e:
msg = 'multivariate_hierarchical_barycentric_lagrange_interpolation extension failed'
trace_error_with_msg(msg, e)
result = __multivariate_hierarchical_barycentric_lagrange_interpolation(
x, abscissa_1d, fn_vals, active_dims, active_abscissa_indices_1d,
num_abscissa_1d, num_active_abscissa_1d, shifts,
abscissa_and_weights)
return result | 3,847 |
def deaths(path):
"""Monthly Deaths from Lung Diseases in the UK
A time series giving the monthly deaths from bronchitis, emphysema and
asthma in the UK, 1974-1979, both sexes (`deaths`),
P. J. Diggle (1990) *Time Series: A Biostatistical Introduction.*
Oxford, table A.3
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `deaths.csv`.
Returns:
Tuple of np.ndarray `x_train` with 72 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'deaths.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/MASS/deaths.csv'
maybe_download_and_extract(path, url,
save_file_name='deaths.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 3,848 |
def test_random_affine_exception_negative_degrees():
"""
Test RandomAffine: input degrees in negative, expected to raise ValueError
"""
logger.info("test_random_affine_exception_negative_degrees")
try:
_ = py_vision.RandomAffine(degrees=-15)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input degrees is not within the required interval of (0 to inf)." | 3,849 |
def read_csv(path):
"""Reads the CSV file at the indicated path and returns a list of rows.
Parameters:
path (str): The path to a CSV file.
Returns:
list[row]: A list of rows. Each row is a list of strings and numbers.
"""
with open(path, 'rb') as f:
return decode_csv(f.read()) | 3,850 |
def obj_mask(im):
"""Computes the mask for an image with transparent background
Keyword arguments:
im -- the input image (must be RGBA)
"""
A = im.split()[-1]
T = ImageOps.invert(A)
return Image.merge("RGBA", (T, T, T, A)) | 3,851 |
def main(argv: t.List[str] = sys.argv):
"""Wrapper for pgsql-dump.bash script.
:param argv: Command line arguments, second one needs to be the uri to a configuration file.
:raises sys.SystemExit:
"""
if len(argv) < 2:
usage_message(
argv,
additional_params='[ARG1, ARG2]',
additional_line='All arguments are passed to pg_dump command'
)
config_uri = get_config_uri(argv)
request = init_websauna(config_uri)
# Export all secrets and settings
bash_env = create_settings_env(request.registry)
# subprocess.check_output([DUMP_SCRIPT] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
args = argv[2:]
cmd = [DUMP_SCRIPT] + args
logger.info("Running %s", " ".join(cmd))
with subprocess.Popen(cmd, stdout=subprocess.PIPE, bufsize=1, env=bash_env, universal_newlines=True) as p:
for line in p.stdout:
print(line, end='') | 3,852 |
def rnn(rnn_type, inputs, length, hidden_size, layer_num=1,
dropout_keep_prob=None, concat=True):
"""
Implements (Bi-)LSTM, (Bi-)GRU and (Bi-)RNN
在这个module中,rnn是主要的接口,所以把rnn放在上面
Args:
rnn_type: the type of rnn, such as lstm
inputs: padded inputs into rnn, usually a d*p or l*p matrix
length: the valid length of the inputs,
usually the length of the sentence
hidden_size: the size of hidden units
layer_num: multiple rnn layer are stacked if layer_num > 1
dropout_keep_prob: dropout in RNN
concat: When the rnn is bidirectional, the forward outputs and backward
outputs are concatenated (such as a 2l*p matrix) if this is True,
else we add them (add two matrices).
Returns:
RNN outputs and final state (such as the state of lstm)
"""
if not rnn_type.startswith('bi'):
cell = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# 得到cell,在z轴、y轴已经展开,但是在x轴上并没有延展
outputs, state = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=length,
dtype=tf.float32)
# 利用dynamic_rnn函数对cell在x轴方向上进行延展,并且把cell的inputs输入
# outputs的维度是hidden_size*length, state的维度是hidden_size*layer_num*2
if rnn_type.endswith('lstm'):
c, h = state
state = h
# 把hidden state作为state
else: # bidirectional rnn
cell_fw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# forward cell
cell_bw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# backward cell
outputs, state = tf.nn.bidirectional_dynamic_rnn(
cell_bw, cell_fw, inputs, sequence_length=length, dtype=tf.float32
)
# 双向rnn相比单向rnn,在hidden_size这个维度上变成了之前的2倍
state_fw, state_bw = state
# 首先把state分离成forward state和backward state
if rnn_type.endswith('lstm'):
c_fw, h_fw = state_fw
c_bw, h_bw = state_bw
state_fw, state_bw = h_fw, h_bw
# 对于lstm来说,我们要的state是hidden state
if concat:
outputs = tf.concat(outputs, 2)
# 把两个tensor沿着hidden_size的维度连起来
state = tf.concat([state_fw, state_bw], 1)
# state同样要沿着hidden_size的维度连起来
else:
outputs = outputs[0] + outputs[1]
state = state_fw + state_bw
# 简单向量(张量)相加或者做平均处理
return outputs, state | 3,853 |
def find_expired(bucket_items, now):
"""
If there are no expired items in the bucket returns
empty list
>>> bucket_items = [('k1', 1), ('k2', 2), ('k3', 3)]
>>> find_expired(bucket_items, 0)
[]
>>> bucket_items
[('k1', 1), ('k2', 2), ('k3', 3)]
Expired items are returned in the list and deleted from
the bucket
>>> find_expired(bucket_items, 2)
['k1']
>>> bucket_items
[('k2', 2), ('k3', 3)]
"""
expired_keys = []
for i in range(len(bucket_items) - 1, -1, -1):
key, expires = bucket_items[i]
if expires < now:
expired_keys.append(key)
del bucket_items[i]
return expired_keys | 3,854 |
def find_sprites(image=None, background_color=None):
""" Find sprites
@image: MUST be an Image object
@background_color: optinal, whether tuple (RGB/ RGBA) or int (grayscale)
"""
def find_sprites_corners(sprite, label_map, numpy_array):
columns = set()
rows = set()
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = label_map[row_index][column_index]
if current_pixel.label == sprite:
columns.add(current_pixel.column)
rows.add(current_pixel.row)
return min(columns), min(rows), max(columns), max(rows)
def collect_sprites(exist_sprites_label, label_map, numpy_array):
""" Return A dictionary with key:the label of a sprite and value:it's Sprite object
"""
sprites = {}
for sprite in exist_sprites_label:
top_left_column, top_left_row, bottom_right_column, bottom_right_row = find_sprites_corners(sprite, label_map, numpy_array)
sprites[sprite] = Sprite(sprite, top_left_column, top_left_row, bottom_right_column, bottom_right_row)
return sprites
def search_exist_sprites_label(pixels_to_sprites):
""" Return a set of exist sprite's label inside the map
"""
exist_sprites = set()
for key in pixels_to_sprites:
exist_sprites.add(pixels_to_sprites[key])
return exist_sprites
def unify_sprites(pixels_to_sprites, unified_matrix, numpy_array):
""" Unify all pixels that are in a same sprite
Return a 2D-array map of sprites
"""
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = pixels_matrix[row_index][column_index]
current_label = current_pixel.label
# Ignore background pixels
if current_label == 0 or current_label not in pixels_to_sprites:
continue
current_pixel.label = pixels_to_sprites[current_label]
return unified_matrix
def analyze_connected_sprites(connected_sprites):
""" Find all pixels that are connected (belong to a same sprite)
Return a dict:
key: pixel'label
value: sprite's label that key belong to
"""
pixels_to_sprites = {}
for key in list(connected_sprites.keys()):
if key not in connected_sprites or len(connected_sprites[key]) == 1:
continue
in_progress = True
old_length = len(connected_sprites[key])
while in_progress:
for value in connected_sprites[key]:
if value not in connected_sprites:
continue
connected_sprites[key] = connected_sprites[key] | connected_sprites[value]
if value in connected_sprites and value != key:
del connected_sprites[value]
if old_length == len(connected_sprites[key]):
in_progress = False
else:
old_length = len(connected_sprites[key])
for key in connected_sprites:
for value in connected_sprites[key]:
pixels_to_sprites[value] = key
return pixels_to_sprites
def is_new_sprite(current_row, current_column, pixels_matrix, background_color):
""" Return False if there is a non-background pixel adjacent to current pixel
Ignores background pixels.
"""
neighbor_coordinates = [(-1, -1), (-1, 0), (-1, 1), (0, -1)]
current_pixel = pixels_matrix[current_row][current_column]
is_new_sprite = True
# Ignore background pixels
if current_pixel.is_background_pixel:
return False
# Check 4 neighbor of current pixels
for coordinate in neighbor_coordinates:
neighbor_row = current_row + coordinate[0]
neighbor_column = current_column + coordinate[1]
if 0 <= neighbor_row < image_height and 0 <= neighbor_column < image_width:
neighbor_pixel = pixels_matrix[neighbor_row][neighbor_column]
if neighbor_pixel.label == 0:
continue
if current_pixel.label != 0 and current_pixel.label != neighbor_pixel.label:
connected_sprites.setdefault(current_pixel.label, set()).add(neighbor_pixel.label)
else:
pixels_matrix[current_row][current_column].label = neighbor_pixel.label
is_new_sprite = False
return is_new_sprite
def is_ignored_pixel(current_pixel, numpy_array):
""" Check if that pixel is considered background pixel
Return False by default
"""
if (background_color == (0,0,0,0) and current_pixel[-1] == 0) or (current_pixel == array(background_color)).all() or (image.mode == "L" and current_pixel == background_color):
return True
return False
def analyze_numpy_array(background_color):
""" Convert image to numpy array then analyze each pixel
@background_color: RGBA or RGB or grayscale formats
Return Maps of pixels under format matrix and numpy array (multi-dimensional)
"""
numpy_array = array(image)
pixels_matrix = zeros(numpy_array.shape, dtype=int).tolist()
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = numpy_array[row_index, column_index]
pixels_matrix[row_index][column_index] = Pixel(row_index, column_index, is_ignored_pixel(current_pixel, numpy_array))
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
if is_new_sprite(row_index, column_index, pixels_matrix, background_color):
new_label = sprites_label[-1] + 1
pixels_matrix[row_index][column_index].label = new_label
sprites_label.append(new_label)
connected_sprites.setdefault(new_label, set()).add(new_label)
return pixels_matrix, numpy_array
def is_valid_background_color():
""" Check if arg @background_color is valid
Return True by default
"""
# Not int or tuple
if type(background_color) not in (int, tuple):
return False
# Invalid grayscale format
if type(background_color) == int:
if not 255 >= background_color >= 0 or image.mode != "L":
return False
# Invalid RGB/ RGBA format
if type(background_color) == tuple:
if len(background_color) not in (3,4) or image.mode == "L":
return False
for element in background_color:
if type(element) != int or not 255 >= element >= 0:
return False
return True
if background_color:
pass
elif image.mode == "RGBA":
background_color = (0,0,0,0)
else:
background_color = find_most_common_color(image)
# Check validation of arg background_color
if not is_valid_background_color() or not image:
print("Invalid arguments! Please try again!")
return
image_width, image_height = image.size
# Store all connected sprites that can be unified latter
connected_sprites = {}
# List of pixels label exist inside the map
sprites_label = [0]
# Maps of pixels under format matrix and numpy array
pixels_matrix, numpy_array = analyze_numpy_array(background_color)
# Dict of pixels'label corresponding to sprite's label
pixels_to_sprites = analyze_connected_sprites(connected_sprites)
# Map of sprites under format 2D-matrix
label_map = unify_sprites(pixels_to_sprites, pixels_matrix, numpy_array)
# Set of sprite-label that exist inside the map
exist_sprites_label = search_exist_sprites_label(pixels_to_sprites)
# A dictionary with key:the label of a sprite and value:it's Sprite object
sprites = collect_sprites(exist_sprites_label, label_map, numpy_array)
return (sprites, label_map) | 3,855 |
def get_java_package(path):
"""Extract the java package from path"""
segments = path.split("/")
# Find different root start indecies based on potential java roots
java_root_start_indecies = [_find(segments, root) for root in ["java", "javatests"]]
# Choose the root that starts earliest
start_index = min(java_root_start_indecies)
if start_index == len(segments):
fail("Cannot find java root: " + path)
return ".".join(segments[start_index + 1:]) | 3,856 |
def generate_seekr2_model_and_filetree(model_input, force_overwrite):
"""
Using the Model_input from the user, prepare the Model
object and the filetree. Then prepare all building files
for each anchor and serialize the Model to XML.
"""
model = common_prepare.model_factory(model_input)
common_prepare.prepare_model_cvs_and_anchors(model, model_input)
root_directory = os.path.expanduser(model_input.root_directory)
xml_path = os.path.join(root_directory, "model.xml")
if os.path.exists(xml_path):
# then a model file already exists at this location: update
# the anchor directories.
old_model = base.Model()
old_model.deserialize(xml_path)
common_prepare.modify_model(old_model, model, root_directory,
force_overwrite)
filetree.generate_filetree(model, root_directory)
filetree.copy_building_files(model, model_input, root_directory)
common_prepare.generate_bd_files(model, root_directory)
model.serialize(xml_path)
return model, xml_path | 3,857 |
def get_plugin():
"""Return the filter."""
return TextFilter | 3,858 |
def caltech256(root):
"""Caltech256 dataset from http://www.vision.caltech.edu/Image_Datasets/Caltech256
Pictures of objects belonging to 256 categories.
About 80 to 800 images per category.
Collected in September 2003 by Fei-Fei Li, Marco Andreetto,
and Marc 'Aurelio Ranzato.
The size of each image is roughly 300 x 200 pixels.
We have carefully clicked outlines of each object in these pictures,
these are included under the 'Annotations.tar'.
There is also a matlab script to view the annotaitons, 'show_annotations.m'.
Attention: if exist dirs `root/caltech256`, api will delete it and create it.
Data storage directory:
root = `/user/.../mydata`
caltech256 data:
`root/caltech256/train/007.bat/xx.jpg`
`root/caltech256/train/010.beer-mug/xx.ipg`
`root/caltech256/train/064.elephant-101/xx.jpg`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/caltech256`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/caltech256`.
"""
start = time.time()
task_path = assert_dirs(root, 'caltech256', make_root_dir=False)
url = "http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar"
rq.files(url, gfile.path_join(root, url.split('/')[-1]))
un_tar(gfile.path_join(root, url.split('/')[-1]), task_path)
gfile.rename(gfile.path_join(task_path, '256_ObjectCategories'), gfile.path_join(task_path, 'train'))
gfile.remove(gfile.path_join(root, '256_ObjectCategories.tar'))
print('caltech256 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path | 3,859 |
def make_highlight(sel, *, highlight_kwargs):
"""
Create a highlight for a `Selection`.
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
f"Highlight support for {type(sel.artist).__name__} is missing") | 3,860 |
def draw__mask_with_edge(cv2_image: np.ndarray, edge_size: int = 10) -> np.ndarray:
"""
From a color image, get a black white image each instance separated by a border.
1. Change a color image to black white image.
2. Get edge image from `cv2_image`, then invert it to separate instance by a border.
3. Merge 1 and 2.
.. image:: https://i.imgur.com/YAHVVSl.png
:width: 2496px
:height: 1018px
:scale: 25%
:alt: mask_with_edge
:align: center
Parameters
----------
cv2_image : np.ndarray
BGR color Image
edge_size : int
Edge size, by default 10
Returns
-------
np.ndarray
Grayscale image each instance separated by a border.
Examples
--------
>>> cv2_img: np.ndarray = cv2.imread("...")
>>> edge_masked_image: np.ndarray = mask_with_edge(cv2_img, edge_size=10)
"""
img_edge = draw__edge_only(cv2_image, edge_size)
not_img_edge = cv2.bitwise_not(img_edge)
bw_image = img_color_to_bw(cv2_image)
return mask_image(bw_image, mask_image=not_img_edge) | 3,861 |
def test_stacer_binary_exists(host):
"""
Tests if stacer binary file exists.
"""
assert host.file(PACKAGE_BINARY).exists | 3,862 |
def test_list_id_length_nistxml_sv_iv_list_id_length_1_5(mode, save_output, output_format):
"""
Type list/ID is restricted by facet length with value 5.
"""
assert_bindings(
schema="nistData/list/ID/Schema+Instance/NISTSchema-SV-IV-list-ID-length-1.xsd",
instance="nistData/list/ID/Schema+Instance/NISTXML-SV-IV-list-ID-length-1-5.xml",
class_name="Out",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 3,863 |
def cba_post_process(output_dir, curtailment_tolerance=0.0001):
"""Perform post-processing of CBA tables."""
add_curtailment_columns(output_dir, curtailment_tolerance) | 3,864 |
def get_functions(pdb_file):
"""Get the offset for the functions we are interested in"""
methods = {'ssl3_new': 0,
'ssl3_free': 0,
'ssl3_connect': 0,
'ssl3_read_app_data': 0,
'ssl3_write_app_data': 0}
try:
# Do this the hard way to avoid having to load
# the types stream in mammoth PDB files
pdb = pdbparse.parse(pdb_file, fast_load=True)
pdb.STREAM_DBI.load()
pdb._update_names()
pdb.STREAM_GSYM = pdb.STREAM_GSYM.reload()
if pdb.STREAM_GSYM.size:
pdb.STREAM_GSYM.load()
pdb.STREAM_SECT_HDR = pdb.STREAM_SECT_HDR.reload()
pdb.STREAM_SECT_HDR.load()
# These are the dicey ones
pdb.STREAM_OMAP_FROM_SRC = pdb.STREAM_OMAP_FROM_SRC.reload()
pdb.STREAM_OMAP_FROM_SRC.load()
pdb.STREAM_SECT_HDR_ORIG = pdb.STREAM_SECT_HDR_ORIG.reload()
pdb.STREAM_SECT_HDR_ORIG.load()
except AttributeError:
pass
try:
sects = pdb.STREAM_SECT_HDR_ORIG.sections
omap = pdb.STREAM_OMAP_FROM_SRC
except AttributeError:
sects = pdb.STREAM_SECT_HDR.sections
omap = DummyOmap()
gsyms = pdb.STREAM_GSYM
if not hasattr(gsyms, 'globals'):
gsyms.globals = []
#names = []
for sym in gsyms.globals:
try:
name = sym.name.lstrip('_').strip()
if name.startswith('?'):
end = name.find('@')
if end >= 0:
name = name[1:end]
#names.append(name)
if name in methods:
off = sym.offset
virt_base = sects[sym.segment-1].VirtualAddress
addr = omap.remap(off+virt_base)
if methods[name] == 0:
methods[name] = addr
else:
methods[name] = -1
except IndexError:
pass
except AttributeError:
pass
#with open('names.txt', 'wb') as f_out:
# for name in names:
# f_out.write(name + "\n")
return methods | 3,865 |
def empty_iterable() -> typing.Iterable:
"""
Return an empty iterable, i.e., an empty list.
:return: an iterable
:Example:
>>> from flpy.iterators import empty_iterable
>>> empty_iterable()
[]
"""
return list() | 3,866 |
def create_namespace(ctx, config):
"""
Updates kubernetes deployment to use specified version
"""
settings_dict = get_settings()
config_dict = settings_dict['configs'][config]
set_context(ctx, config)
ctx.run('kubectl create namespace {namespace}'
.format(namespace=config_dict['namespace']),
echo=True) | 3,867 |
def nearest_pow_2(x):
"""
Finds the nearest integer that is a power of 2.
In contrast to :func:`next_pow_2` also searches for numbers smaller than
the input and returns them if they are closer than the next bigger power
of 2.
"""
a = M.pow(2, M.ceil(M.log(x, 2)))
b = M.pow(2, M.floor(M.log(x, 2)))
if abs(a - x) < abs(b - x):
return int(a)
else:
return int(b) | 3,868 |
def timestamp_preprocess(ds, column, name):
"""This function takes the timestamp in the dataset and create from it features according to the settings above
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
ts = pd.to_datetime(ds[name])
for feature in TIMESTAMP_FEATURES.keys():
if TIMESTAMP_FEATURES[feature] is not None:
if feature == "timestamp":
ds[feature] = ts
elif feature == "day_of_week":
ds[feature] = ts.apply(lambda X: X.day_of_week)
elif feature == "day_of_month":
ds[feature] = ts.apply(lambda X: X.day)
elif feature == "month":
ds[feature] = ts.apply(lambda X: X.month)
elif feature == "hour":
ds[feature] = ts.apply(lambda X: X.hour)
elif feature == "minute":
ds[feature] = ts.apply(lambda X: X.minute)
elif feature == "year":
ds[feature] = ts.apply(lambda X: X.year)
return ds | 3,869 |
def test_001_echo(expect):
"""
This case is about receiving parameters in URLs.
URL example: http://localhost:8888/echo?msg=hello
"""
response = requests.get(URL + '/echo', params={'msg': expect})
response.raise_for_status()
assert expect == response.content.decode() | 3,870 |
def make_taubin_loss_function(x, y):
"""closure around taubin_loss_function to make
surviving pixel positions availaboe inside.
x, y: positions of pixels surviving the cleaning
should not be quantities
"""
def taubin_loss_function(xc, yc, r):
"""taubin fit formula
reference : Barcelona_Muons_TPA_final.pdf (slide 6)
"""
upper_term = (((x - xc) ** 2 + (y - yc) ** 2 - r ** 2) ** 2).sum()
lower_term = (((x - xc) ** 2 + (y - yc) ** 2)).sum()
return np.abs(upper_term) / np.abs(lower_term)
return taubin_loss_function | 3,871 |
def init_rf_estimator():
"""
Instantiate a Random forest estimator with the optimized hyper-parameters.
:return: The RandomForest estimator instance.
"""
rf = RandomForestClassifier(
criterion=RF_CRIT,
min_samples_leaf=RF_MIN_SAMPLES_LEAF,
max_features='auto',
n_estimators=RF_N_ESTS,
n_jobs=-1)
return rf | 3,872 |
def dict_filter(d, exclude=()):
"""
Exclude specified keys from a nested dict
"""
def fix_key(k):
return str(k) if isinstance(k, builtin_str) else k
if isinstance(d, list):
return [dict_filter(e, exclude) for e in d]
if isinstance(d, dict):
items = ((fix_key(k), v) for k, v in d.items())
return {
k: dict_filter(v, exclude) for k, v in items if k not in exclude
}
return d | 3,873 |
async def mongoengine_multiple_objects_exception_handler(request, exc):
"""
Error handler for MultipleObjectsReturned.
Logs the MultipleObjectsReturned error detected and returns the
appropriate message and details of the error.
"""
logger.exception(exc)
return JSONResponse(
Response(success=False, error_code=422, message=str(exc)).dict()
) | 3,874 |
def _get_sample_times(*traces, **kwargs):
"""Get sample times for all the traces."""
# Set the time boundaries for the DataFrame.
max_stop_time = max(
[trace.stop_time() for trace in traces if isinstance(trace, Trace)]
)
stop_time = kwargs.pop("stop_time", max_stop_time)
min_start_time = min(
[trace.start_time() for trace in traces if isinstance(trace, Trace)]
)
start_time = kwargs.pop("start_time", min_start_time)
# Get all the sample times of all the traces between the start and stop times.
times = set([start_time, stop_time])
for trace in traces:
times.update(
set(trace.get_sample_times(start_time=start_time, stop_time=stop_time))
)
# If requested, fill in additional times between sample times.
step = kwargs.pop("step", 0)
if step:
times.update(set(range(start_time, stop_time + 1, step)))
# Sort sample times in increasing order.
times = sorted(list(times))
return times | 3,875 |
def get_weak_model(op, diff_type, nonzero2nonzero_weight, zero2zero_weight=0,
zero2nonzero_weight=math.inf, nonzero2zero_weight=math.inf, precision=0):
"""Return the weak model of the given bit-vector operation ``op``.
Given the `Operation` ``op``, return the
`WeakModel` of ``op`` for the `Difference` type ``diff_type``
with given class attributes ``nonzero2nonzero_weight``,
``zero2zero_weight``,
``zero2nonzero_weight``, ``nonzero2zero_weight`` and
``precision`` (see `WeakModel`).
The returned model is a subclass of `WeakModel` and `OpModel`.
.. note::
To link the returned model ``MyModel`` to ``op``
such that ``MyModel`` is used in ``propagate``,
set the ``xor_model`` or ``rx_model`` attribute of ``op``
to ``MyModel`` (e.g., ``op.xor_model = MyModel``).
See also `differential.difference.XorDiff.propagate`
or `differential.difference.RXDiff.propagate`.
::
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.secondaryop import LutOperation
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.opmodel import get_weak_model
>>> class MyLut(LutOperation): pass # a 2-bit function
>>> XorWeakModelMyLut = get_weak_model(MyLut, XorDiff, decimal.Decimal(1.5), precision=1)
>>> alpha, beta = XorDiff(Variable("a", 2)), XorDiff(Variable("b", 2))
>>> f = XorWeakModelMyLut(alpha)
>>> print(f.vrepr())
XorWeakModelMyLut(XorDiff(Variable('a', width=2)))
>>> f.validity_constraint(beta)
(((a == 0b00) & (b == 0b00)) == 0b1) | ((~(a == 0b00) & ~(b == 0b00)) == 0b1)
>>> f.bv_weight(beta)
Ite(((a == 0b00) & (b == 0b00)) == 0b1, 0b00, 0b11)
>>> f.max_weight(), f.weight_width(), f.error(), f.num_frac_bits()
(3, 2, 0, 1)
"""
assert issubclass(op, operation.Operation)
if diff_type == difference.XorDiff:
prefix = "Xor"
assert zero2zero_weight == 0
# for XOR differentials with Pr. 1, an input property propagates to a unique output property
assert zero2nonzero_weight == math.inf
elif diff_type == difference.RXDiff:
prefix = "RX"
else:
raise ValueError(f"invalid diff_type {diff_type}")
_op, _diff_type = op, diff_type
_zero2zero_weight = zero2zero_weight
_nonzero2nonzero_weight = nonzero2nonzero_weight
_zero2nonzero_weight, _nonzero2zero_weight = zero2nonzero_weight, nonzero2zero_weight
_precision = precision
class MyWeakModel(abstractproperty.opmodel.WeakModel, OpModel):
op, diff_type = _op, _diff_type
zero2zero_weight = _zero2zero_weight
nonzero2nonzero_weight = _nonzero2nonzero_weight
zero2nonzero_weight = _zero2nonzero_weight
nonzero2zero_weight = _nonzero2zero_weight
precision = _precision
# def error(self): # maximum weight of a differential with n-bit input is n
# return sum(p.val.width for p in self.input_prop)
MyWeakModel.__name__ = f"{prefix}{abstractproperty.opmodel.WeakModel.__name__}{op.__name__}"
return MyWeakModel | 3,876 |
def get_temp():
"""
読み込んだ温度を返す
"""
return sensor.t | 3,877 |
def load_clean_yield_data(yield_data_filepath):
"""
Cleans the yield data by making sure any Nan values in the columns we care about
are removed
"""
important_columns = ["Year", "State ANSI", "County ANSI", "Value"]
yield_data = pd.read_csv(yield_data_filepath).dropna(
subset=important_columns, how="any"
)
return yield_data | 3,878 |
async def test_temperature_conversion(
hass,
enable_custom_integrations,
unit_system,
native_unit,
state_unit,
native_value,
state_value,
):
"""Test temperature conversion."""
hass.config.units = unit_system
platform = getattr(hass.components, "test.sensor")
platform.init(empty=True)
platform.ENTITIES["0"] = platform.MockSensor(
name="Test",
native_value=str(native_value),
native_unit_of_measurement=native_unit,
device_class=DEVICE_CLASS_TEMPERATURE,
)
entity0 = platform.ENTITIES["0"]
assert await async_setup_component(hass, "sensor", {"sensor": {"platform": "test"}})
await hass.async_block_till_done()
state = hass.states.get(entity0.entity_id)
assert float(state.state) == approx(float(state_value))
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == state_unit | 3,879 |
def _render_pygame_frame(
surface: pygame.Surface,
screen: pygame.Surface,
orientation: task_pb2.AdbCall.Rotate.Orientation,
timestep: dm_env.TimeStep) -> None:
"""Displays latest observation on pygame surface."""
frame = timestep.observation['pixels'][:, :, :3] # (H x W x C) (RGB)
frame = utils.transpose_pixels(frame) # (W x H x C)
frame = utils.orient_pixels(frame, orientation)
pygame.surfarray.blit_array(surface, frame)
pygame.transform.smoothscale(surface, screen.get_size(), screen)
pygame.display.flip() | 3,880 |
def create_element_mapping(repnames_bedfile):
"""Create a mapping of the element names to their classes and families"""
elem_key = defaultdict(lambda : defaultdict(str))
with open(repnames_bedfile, "r") as bed:
for line in bed:
l = line.strip().split("\t")
name = l[3]
class_ = l[4]
family = l[5]
elem_key[name]["class"] = class_
elem_key[name]["family"] = family
return elem_key | 3,881 |
def _get_CRABI_iterators(captcha_dataframe,
train_indices,
validation_indices,
batch_size,
image_height,
image_width,
character_length,
categories):
"""
(HELPER FUNCTION)
Args:
captcha_dataframe (pandas.DataFrame): the dataset for training
train_indices (numpy.ndarray): indices of the CAPTCHA dataset used for training data
validation_indices (numpy.ndarray): indices of the CAPTCHA dataset used for validation data
batch_size (int): number of samples to process before the model is updated
image_height (int): height (in pixels) of expected input CAPTCHA image
image_width (int): width (in pixels) of expected input CAPTCHA image
character_length (int): number of characters in expected input CAPTCHA image
categories (int): number of possible characters in expected input
CAPTCHA image, specifying category count in the output layer
('10' for digits 0-9, '26' for alphabet, '36' for alphanumeric)
Returns:
pair of generator objects -> (training_set_iterator, validation_set_iterator)
"""
training_set_iterator = generate_CRABI_preprocessed_images(captcha_dataframe,
train_indices,
for_training=True,
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
categories=categories)
validation_set_iterator = generate_CRABI_preprocessed_images(captcha_dataframe,
validation_indices,
for_training=True,
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
categories=categories)
return training_set_iterator, validation_set_iterator | 3,882 |
def download_sql_dump(language, file, dump="latest", target_dir="."):
"""Downloads and decompresses a Wikipedia SQL dump.
Args:
language: Wikipedia name (language code).
file: File name.
dump: Dump version.
target_dir: Target directory.
"""
with urlopen(_get_url(language, dump, file)) as res, \
GzipFile(fileobj=res) as uncompressed_res, \
open(os.path.join(target_dir, _get_name(language, dump, file)), 'wb') as out_file:
copyfileobj(uncompressed_res, out_file) | 3,883 |
def run(request, context):
"""Creates a template.
Args:
request (orchestrate_pb2.CreateTemplateRequest): Request payload.
context: Context.
Returns:
A orchestrate_pb2.CreateTemplate with the status of the request.
"""
template = request.template
print('Orchestrate.CreateTemplate name={name} project={project}'.format(
name=template.name,
project=template.project,
))
request_id = uuid.uuid4().hex
try:
# Make sure data is valid before creating individual sizes - don't want to
# clean-up half-way or leave incomplete template families.
for size in template.sizes:
validate_metadata(template, size)
# Data checks out. let's create all template sizes.
for size in template.sizes:
create_template_size(template, size)
return orchestrate_pb2.CreateTemplateResponse(
status='CREATED',
request_id=str(request_id),
)
except errors.HttpError as exception:
if exception.resp.status == 409:
message = 'A template with name {name} already exists.'.format(
name=template.name)
raise OrchestrateTemplateCreationError(message)
else:
raise | 3,884 |
def fit_2dgaussian(data, error=None, mask=None):
"""
Fit a 2D Gaussian to a 2D image.
Parameters
----------
data : array_like
The 2D array of the image.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
result : `~astropy.modeling.functional_models.Gaussian2D` instance
The best-fitting Gaussian 2D model.
"""
if error is not None:
weights = 1.0 / error
else:
weights = None
if mask is not None:
if weights is None:
weights = np.ones_like(data)
# down-weight masked pixels
weights[mask] = 1.e-20
props = data_properties(data, mask=mask)
init_amplitude = np.ptp(data)
g_init = models.Gaussian2D(
init_amplitude, props.xcentroid.value, props.ycentroid.value,
props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value,
props.orientation.value)
fitter = LevMarLSQFitter()
y, x = np.indices(data.shape)
gfit = fitter(g_init, x, y, data, weights=weights)
return gfit | 3,885 |
def generate_tf_records(c):
"""Convert imagenet images to tfrecords
"""
print("Preparing tf records")
if (
_number_img_files_in("/data/train") == 0
or _number_img_files_in("/data/validation") == 0
):
raise Exception(
"Not enough files found please make sure you have downloaded and processed the imagenet data"
)
from convert_imagenet_to_tf_records import main as convert_tf_records
convert_tf_records(
"/data/train",
"/data/validation",
"/data/tfrecords",
os.path.join(_BASE_PATH, "imagenet_class_index.json"),
) | 3,886 |
def list_dir_files(path: str, suffix: str = "") -> List[str]:
"""
Lists all files (and only files) in a directory, or return [path] if path is a file itself.
:param path: Directory or a file
:param suffix: Optional suffix to match (case insensitive). Default is none.
:return: list of absolute paths to files
"""
if suffix:
suffix = suffix.lower()
if Path(path).is_file():
files = [os.path.abspath(path)]
else:
files = []
for f in os.listdir(path):
file_path = os.path.join(path, f)
if Path(file_path).is_file():
if not suffix or f.lower().endswith(suffix):
files.append(os.path.abspath(file_path))
return list(sorted(files)) | 3,887 |
def map_role_packages(package_map):
"""Add and sort packages belonging to a role to the role_packages dict.
:type package_map: ``dict``
"""
for k, v in ROLE_PACKAGES.items():
role_pkgs = package_map['role_packages'][k] = list()
for pkg_list in v.values():
role_pkgs.extend(pkg_list)
else:
package_map['role_packages'][k] = sorted(set(role_pkgs)) | 3,888 |
def to_roman(number):
"""
Converts an arabic number within range from 1 to 4999 to the
corresponding roman number. Returns None on error conditions.
"""
try:
return roman.toRoman(number)
except (roman.NotIntegerError, roman.OutOfRangeError):
return None | 3,889 |
def GDAL_like(filename, fileout=""):
"""
GDAL_like
"""
BSx, BSy, Mb, Nb, M, N = 0,0, 0,0, 0,0
dataset1 = gdal.Open(filename, gdal.GA_ReadOnly)
dataset2 = None
if dataset1:
band1 = dataset1.GetRasterBand(1)
M, N = int(dataset1.RasterYSize), int(dataset1.RasterXSize)
B = dataset1.RasterCount
BSx, BSy = band1.GetBlockSize()
Nb = int(N / BSx) + (0 if N % BSx == 0 else 1)
Mb = int(M / BSy) + (0 if M % BSy == 0 else 1)
CO = ["BIGTIFF=YES"]
options = dataset1.GetMetadata("IMAGE_STRUCTURE")
if BSy > 1:
CO += ["TILED=YES", "BLOCKXSIZE=%d" % BSx, "BLOCKYSIZE=%d" % BSy]
for key in options:
if key == "COMPRESSION":
CO.append("COMPRESS=" + options[key])
else:
CO.append(key + "=" + options[key])
driver = gdal.GetDriverByName("GTiff")
fileout = fileout if fileout else forceext(filename, "copy.tif")
dataset2 = driver.Create(fileout, N, M, B, band1.DataType, CO)
dataset2.SetProjection(dataset1.GetProjection())
dataset2.SetGeoTransform(dataset1.GetGeoTransform())
for j in range(1, B + 1):
band1 = dataset1.GetRasterBand(j)
band2 = dataset2.GetRasterBand(j)
if band1.GetNoDataValue() != None:
band2.SetNoDataValue(band1.GetNoDataValue())
else:
band2.SetNoDataValue(np.nan)
dataset1 = None
return (dataset2, BSx, BSy, Mb, Nb, M, N) | 3,890 |
def take_attendance(methodcnt):
"""global setup_bool
if (setup_bool == False or methodcnt == False):
print ("in if statement")
setup_bool = True
else:"""
print ("checking in - F.R.")
react_with_sound(attendance_final)
client.CheckIn()
return 2 | 3,891 |
def feature_selection(data, features):
"""
Choose which features to use for training.
:param data: preprocessed dataset
:param features: list of features to use
:return: data with selected features
"""
return data[features] | 3,892 |
def randomstr(ctx, nbytes=''):
"""
generates a URL-safe text string, containing nbytes random bytes
sets it to ctx.data
"""
if nbytes:
nbytes = int(nbytes)
ctx.data = secrets.token_urlsafe(nbytes)
else:
ctx.data = secrets.token_urlsafe() | 3,893 |
def parse_docstring(docstring, line=0, filename='<string>', logger=None,
format_name=None, options=None):
# type: (str, int, Any, Optional[logging.Logger], Optional[str], Any) -> Tuple[OrderedDict[str, Arg], Optional[Arg]]
"""
Parse the passed docstring.
The OrderedDict holding parsed parameters may be sparse.
Parameters
----------
docstring : str
line : int
start line of the docstring
logger : Optional[logging.Logger]
format_name : Optional[str]
Returns
-------
params : OrderedDict[str, Arg]
results : Optional[Arg]
"""
if format_name is None or format_name == 'auto':
format_cls = guess_format(docstring)
if format_cls is None:
format_cls = RestFormat
else:
format_cls = format_map[format_name]
format = format_cls(line, filename=filename, logger=logger,
options=options)
return format.parse(docstring) | 3,894 |
def srCyrillicToLatin(cyrillic_text):
"""
Return a conversion of the given string from cyrillic to latin, using
'digraph' letters (this means that e.g. "nj" is encoded as one character). Unknown
letters remain unchanged.
CAVEAT: this will ONLY change letters from the cyrillic subset of Unicode.
For instance, the plain ASCII letter "C" (code point 0x0043) will NOT be converted
to "S", as opposed to the cyrillic letter "C" (code point 0x0421), which WILL be converted.
If you are sure that your cyrillic string does not contain latin portions (e.g. quoted text,
company names), you can "normalize" it to cyrillic by using srNormalizeToCyrillic first.
"""
return __translate_string(cyrillic_text, __cyrillic_to_latin) | 3,895 |
def from_phone(func=None):
"""来自手机的消息(给自己发的) FriendMsg"""
if func is None:
return from_phone
async def inner(ctx):
assert isinstance(ctx, FriendMsg)
if ctx.MsgType == MsgTypes.PhoneMsg:
return await func(ctx)
return None
return inner | 3,896 |
def fit_and_validate_readout(data: Iterator[Tuple[Tensor, Tensor]], regularization_constants: List[float],
get_validation_error: Callable[[Tuple[Tensor, Tensor]], float],
verbose: bool = False) -> Tuple[Tensor, Tensor]:
"""
Ridge regression for big data, with efficient regularization selection
Fits a linear model :math:`y = W x + b` with regularization.
See:
T. Zhang & B. Yang (2017). An exact approach to ridge regression for big data.
Computational Statistics, 32(3), 909–928. https://doi.org/10.1007/s00180-017-0731-5
:param data: Batch dataset of pairs (x, y) with samples on rows
:param regularization_constants: Regularization constants for ridge regression (including none)
:param get_validation_error: Evaluate validation error for a regression pair (W, b)
:param verbose: Whether to print validation info (default false)
:return: A pair of tensors (W, b)
"""
# Compute sufficient statistics for regression
x, y = next(data)
Syy = y.square().sum(dim=0) # (targets)
Sxy = x.t() @ y # (features × targets)
Sxx = x.t() @ x # (features × features)
Sy = y.sum(dim=0) # (targets)
Sx = x.sum(dim=0) # (features)
n = float(x.shape[0]) # samples
for x, y in data:
Syy += y.square().sum(dim=0)
Sxy += x.t() @ y
Sxx += x.t() @ x
Sy += y.sum(dim=0)
Sx += x.sum(dim=0)
n += x.shape[0]
# Compute ridge matrices
Vxx = Sxx.diag() - (Sx.square() / n)
Vyy = Syy - (Sy.square() / n)
XX = (Sxx - torch.outer(Sx, Sx) / n) / torch.outer(Vxx, Vxx).sqrt()
Xy = (Sxy - torch.outer(Sx, Sy) / n) / torch.outer(Vxx, Vyy).sqrt()
# Compute and select weights
best_validation_error, best_W, best_b = None, None, None
for regularization in regularization_constants:
# Compute weights
XXr = (XX + torch.eye(n=XX.shape[0]).to(XX) * regularization) if regularization else XX
Ws = torch.linalg.solve(XXr, Xy)
W = Ws * torch.sqrt(Vyy.expand_as(Ws) / Vxx.unsqueeze(-1))
b = (Sy / n) - (Sx / n) @ W
# Validate, select
validation_error = get_validation_error((W.t(), b))
if best_validation_error is None or validation_error < best_validation_error:
best_validation_error, best_W, best_b = validation_error, W.t(), b
if verbose:
print(f'{regularization:e}: {validation_error}', file=sys.stderr)
return best_W, best_b | 3,897 |
def summarize_df(df: DataFrame) -> None:
"""Show properties of a DataFrame."""
display(
df.dtypes.rename("dtype")
.to_frame()
.merge(
df.isna().sum().rename("num_missing").to_frame(),
left_index=True,
right_index=True,
how="left",
)
.assign(num=len(df))
.merge(
df.nunique().rename("nunique").to_frame(),
left_index=True,
right_index=True,
how="left",
)
.merge(
df.dropna(how="any")
.sample(1)
.squeeze()
.rename("single_non_nan_value")
.to_frame(),
left_index=True,
right_index=True,
how="left",
)
) | 3,898 |
def create_scan_message():
"""Creates a dummy message of type v3.asset.file to be used by the agent for testing purposes.
The files used is the EICAR Anti-Virus Test File.
"""
file_content = (pathlib.Path(__file__).parents[0] / 'files/malicious_dummy.com').read_bytes()
selector = 'v3.asset.file'
msg_data = {'content': file_content, 'path': 'some/dummy/path'}
return message.Message.from_data(selector, data=msg_data) | 3,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.