content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def neighborhood(index, npoints, maxdist=1):
"""
Returns the neighbourhood of the current index,
= all points of the grid separated by up to
*maxdist* from current point.
@type index: int
@type npoints: int
@type maxdist int
@rtype: list of int
"""
return [index + i for i in range(-maxdist, maxdist + 1)
if i != 0 and 0 <= index + i <= npoints - 1] | 2,800 |
def insert_node_before_node(graph: Graph,
node_to_insert: BaseNode,
last_node: BaseNode):
"""
Insert a new node to a graph before an existing node in the graph.
Check before insertion that the node (that we add the new node before) has
only a single incoming edge, so such an insertion is possible. If it is not the
case, an exception is thrown.
Args:
graph: Graph to add the new node to.
node_to_insert: Node to add.
last_node: Node to insert the new node after it.
"""
first_nodes = graph.get_prev_nodes(last_node)
if len(first_nodes) != 1:
raise Exception('Can only insert if there is only one input')
first_node = first_nodes[0]
insert_node_between_two_nodes(graph, node_to_insert, first_node, last_node) | 2,801 |
def test_book_id_get_correct_auth_not_users_book_gets_404_status_code(testapp, testapp_session, one_user):
"""Test that GET to book-id route gets 404 status code for book that does not beling to user."""
book = testapp_session.query(Book).filter(Book.user_id != one_user.id).first()
data = {
'email': one_user.email,
'password': 'password',
}
res = testapp.get('/books/{}'.format(book.id), data, status=404)
assert res.status_code == 404 | 2,802 |
def data_check(data):
"""Check the data in [0,1]."""
return 0 <= float(data) <= 1 | 2,803 |
def tokenize(text):
"""
Tokenization function to process text data
Args:
text: String. disaster message.
Returns:
clean_tokens: list. token list from text message.
"""
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# get list of all urls using regex
detected_urls = re.findall(url_regex, text)
# replace each url in text string with placeholder
for url in detected_urls:
text = text.replace(url, "urlplaceholder")
# tokenize text
tokens = word_tokenize(text)
# initiate lemmatizer
lemmatizer = WordNetLemmatizer()
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize, normalize case, and remove leading/trailing white space
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens | 2,804 |
def log(msg, level=xbmc.LOGDEBUG, **kwargs):
"""InputStream Helper log method"""
xbmc.log(msg=from_unicode('[{addon}] {msg}'.format(addon=addon_id(), msg=msg.format(**kwargs))), level=level) | 2,805 |
def get_symbol_historical(symbol_name):
"""Returns the available historical data for a symbol as a dictionary."""
# Get the data
symbol_data = get_symbol_data(symbol_name)
# Build the response
response = symbol_data.to_dict(orient="records")
return response | 2,806 |
def create_topology1():
"""
1. Create a data center network object (DCNetwork) with monitoring enabled
"""
net = DCNetwork(monitor=True, enable_learning=False)
"""
1b. Add endpoint APIs for the whole DCNetwork,
to access and control the networking from outside.
e.g., to setup forwarding paths between compute
instances aka. VNFs (represented by Docker containers), passing through
different switches and datacenters of the emulated topology
"""
# create monitoring api endpoint for backwards compatibility with zerorpc api
mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
mon_api.connectDCNetwork(net)
mon_api.start()
"""
2. Add (logical) data centers to the topology
(each data center is one "bigswitch" in our simplified
first prototype)
"""
dc1 = net.addDatacenter("datacenter1")
dc2 = net.addDatacenter("datacenter2")
"""
3. You can add additional SDN switches for data center
interconnections to the network.
"""
s1 = net.addSwitch("s1")
"""
4. Add links between your data centers and additional switches
to define you topology.
These links can use Mininet's features to limit bw, add delay or jitter.
"""
net.addLink(dc1, s1)
net.addLink(s1, dc2)
"""
5. We want to access and control our data centers from the outside,
e.g., we want to connect an orchestrator to start/stop compute
resources aka. VNFs (represented by Docker containers in the emulated)
So we need to instantiate API endpoints (e.g. a zerorpc or REST
interface). Depending on the endpoint implementations, we can connect
one or more data centers to it, which can then be controlled through
this API, e.g., start/stop/list compute instances.
"""
# keep the old zeroRPC interface for the prometheus metric query test
zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
# connect data centers to this endpoint
zapi1.connectDatacenter(dc1)
zapi1.connectDatacenter(dc2)
# run API endpoint server (in another thread, don't block)
zapi1.start()
# create a new instance of a endpoint implementation
# the restapi handles all compute, networking and monitoring commands in one api endpoint
api1 = RestApiEndpoint("0.0.0.0", 5001)
# connect data centers to this endpoint
api1.connectDatacenter(dc1)
api1.connectDatacenter(dc2)
# connect total network also, needed to do the chaining and monitoring
api1.connectDCNetwork(net)
# run API endpoint server (in another thread, don't block)
api1.start()
"""
5.1. For our example, we create a second endpoint to illustrate that
this is supported by our design. This feature allows us to have
one API endpoint for each data center. This makes the emulation
environment more realistic because you can easily create one
OpenStack-like REST API endpoint for *each* data center.
This will look like a real-world multi PoP/data center deployment
from the perspective of an orchestrator.
"""
#zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
#zapi2.connectDatacenter(dc3)
#zapi2.connectDatacenter(dc4)
#zapi2.start()
"""
6. Finally we are done and can start our network (the emulator).
We can also enter the Mininet CLI to interactively interact
with our compute resources (just like in default Mininet).
But we can also implement fully automated experiments that
can be executed again and again.
"""
net.start()
net.CLI()
# when the user types exit in the CLI, we stop the emulator
net.stop() | 2,807 |
def guesses(word):
"""
return all of the first and second order guesses for this word
"""
result = list(known(*first_order_variants(word)))
result.sort()
return result | 2,808 |
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ps = pmf.Render()
if pmf.name:
options = Underride(options, label=pmf.name)
Plot(xs, ps, **options) | 2,809 |
def em(X, sf, inits, K, L, n_iter=100, n_inner_iter=50, tol=1e-5, zero_inflated=True):
"""
run EM algorithm on the given init centers
return the clustering labels with the highest log likelihood
"""
# add prepare reduced data here
print("start em algorithm")
res = _em(X, sf, inits, K, L, n_iter, n_inner_iter, tol, zero_inflated)
max_idx = np.argmax([r['llf'] for r in res])
sol = res[max_idx]
em_labels = np.argmax(sol['rho'], axis=1).flatten()
sol['labels'] = em_labels
return sol | 2,810 |
def is_disaggregate(data, raw_fuel_sectors_enduses):
"""TODO: Disaggregate fuel for sector and enduses with floor
area and GVA for sectors and enduses (IMPROVE)
#TODO: DISAGGREGATE WITH OTHER DATA
"""
is_fueldata_disagg = {}
national_floorarea_sector = 0
for region_name in data['lu_reg']:
national_floorarea_sector += sum(data['ss_sector_floor_area_by'][region_name].values())
# Iterate regions
for region_name in data['lu_reg']:
is_fueldata_disagg[region_name] = {}
# Iterate sector
for sector in data['is_sectors']:
is_fueldata_disagg[region_name][sector] = {}
# Sector specifid info
reg_floorarea_sector = sum(data['ss_sector_floor_area_by'][region_name].values())
# Iterate enduse
for enduse in data['is_all_enduses']:
national_fuel_sector_by = raw_fuel_sectors_enduses[sector][enduse]
#print("national_fuel_sector_by: " + str(national_fuel_sector_by))
# ----------------------
# Disaggregating factors
# TODO: IMPROVE. SHOW HOW IS DISAGGREGATED
reg_disaggregation_factor = (1 / national_floorarea_sector) * reg_floorarea_sector
# Disaggregated national fuel
reg_fuel_sector_enduse = reg_disaggregation_factor * national_fuel_sector_by
is_fueldata_disagg[region_name][sector][enduse] = reg_fuel_sector_enduse
return is_fueldata_disagg | 2,811 |
def update_logger(evo_logger, x, fitness, memory, top_k, verbose=False):
""" Helper function to keep track of top solutions. """
# Check if there are solutions better than current archive
vals = jnp.hstack([evo_logger["top_values"], fitness])
params = jnp.vstack([evo_logger["top_params"], x])
concat_top = jnp.hstack([jnp.expand_dims(vals, 1), params])
sorted_top = concat_top[concat_top[:, 0].argsort()]
# Importantly: Params are stored as flat vectors
evo_logger["top_values"] = sorted_top[:top_k, 0]
evo_logger["top_params"] = sorted_top[:top_k, 1:]
evo_logger["log_top_1"].append(evo_logger["top_values"][0])
evo_logger["log_top_mean"].append(jnp.mean(evo_logger["top_values"]))
evo_logger["log_top_std"].append(jnp.std(evo_logger["top_values"]))
evo_logger["log_gen_1"].append(jnp.min(fitness))
evo_logger["log_gen_mean"].append(jnp.mean(fitness))
evo_logger["log_gen_std"].append(jnp.std(fitness))
evo_logger["log_sigma"].append(memory["sigma"])
evo_logger["log_gen"].append(memory["generation"])
if verbose:
print(evo_logger["log_gen"][-1], evo_logger["top_values"])
return evo_logger | 2,812 |
def do_open(args, _):
"""usage: open cluster[/role[/env/job]]
Opens the scheduler page for a cluster, role or job in the default web browser.
"""
cluster_name = role = env = job = None
if len(args) == 0:
print('Open command requires a jobkey parameter.')
exit(1)
v1_deprecation_warning("open", ["job", "open"])
args = args[0].split("/")
if len(args) > 0:
cluster_name = args[0]
if len(args) > 1:
role = args[1]
if len(args) > 2:
env = args[2]
if len(args) > 3:
job = args[3]
else:
# TODO(ksweeney): Remove this after MESOS-2945 is completed.
die('env scheduler pages are not yet implemented, please specify job')
if not cluster_name:
die('cluster is required')
api = make_client(cluster_name)
import webbrowser
webbrowser.open_new_tab(
synthesize_url(api.scheduler_proxy.scheduler_client().url, role, env, job)) | 2,813 |
def ae(nb_features,
input_shape,
nb_levels,
conv_size,
nb_labels,
enc_size,
name='ae',
prefix=None,
feat_mult=1,
pool_size=2,
padding='same',
activation='elu',
use_residuals=False,
nb_conv_per_level=1,
batch_norm=None,
enc_batch_norm=None,
ae_type='conv', # 'dense', or 'conv'
enc_lambda_layers=None,
add_prior_layer=False,
add_prior_layer_reg=0,
use_logp=True,
conv_dropout=0,
include_mu_shift_layer=False,
single_model=False, # whether to return a single model, or a tuple of models that can be stacked.
final_pred_activation='softmax',
do_vae=False):
"""
Convolutional Auto-Encoder.
Optionally Variational.
Optionally Dense middle layer
"Mostly" in that the inner encoding can be (optionally) constructed via dense features.
Parameters:
do_vae (bool): whether to do a variational auto-encoder or not.
enc_lambda_layers functions to try:
K.softsign
a = 1
longtanh = lambda x: K.tanh(x) * K.log(2 + a * abs(x))
"""
# naming
model_name = name
# volume size data
ndims = len(input_shape) - 1
if isinstance(pool_size, int):
pool_size = (pool_size,) * ndims
# get encoding model
enc_model = conv_enc(nb_features,
input_shape,
nb_levels,
conv_size,
name=model_name,
feat_mult=feat_mult,
pool_size=pool_size,
padding=padding,
activation=activation,
use_residuals=use_residuals,
nb_conv_per_level=nb_conv_per_level,
conv_dropout=conv_dropout,
batch_norm=batch_norm)
# middle AE structure
if single_model:
in_input_shape = None
in_model = enc_model
else:
in_input_shape = enc_model.output.shape.as_list()[1:]
in_model = None
mid_ae_model = single_ae(enc_size,
in_input_shape,
conv_size=conv_size,
name=model_name,
ae_type=ae_type,
input_model=in_model,
batch_norm=enc_batch_norm,
enc_lambda_layers=enc_lambda_layers,
include_mu_shift_layer=include_mu_shift_layer,
do_vae=do_vae)
# decoder
if single_model:
in_input_shape = None
in_model = mid_ae_model
else:
in_input_shape = mid_ae_model.output.shape.as_list()[1:]
in_model = None
dec_model = conv_dec(nb_features,
in_input_shape,
nb_levels,
conv_size,
nb_labels,
name=model_name,
feat_mult=feat_mult,
pool_size=pool_size,
use_skip_connections=False,
padding=padding,
activation=activation,
use_residuals=use_residuals,
final_pred_activation='linear',
nb_conv_per_level=nb_conv_per_level,
batch_norm=batch_norm,
conv_dropout=conv_dropout,
input_model=in_model)
if add_prior_layer:
dec_model = add_prior(dec_model,
[*input_shape[:-1], nb_labels],
name=model_name,
prefix=model_name + '_prior',
use_logp=use_logp,
final_pred_activation=final_pred_activation,
add_prior_layer_reg=add_prior_layer_reg)
if single_model:
return dec_model
else:
return (dec_model, mid_ae_model, enc_model) | 2,814 |
def read_authorized_keys(username=None):
"""Read public keys from specified user's authorized_keys file.
args:
username (str): username.
returns:
list: Authorised keys for the specified user.
"""
authorized_keys_path = '{0}/.ssh/authorized_keys'.format(os.path.expanduser('~{0}'.format(username)))
rnd_chars = random_string(length=RANDOM_FILE_EXT_LENGTH)
tmp_authorized_keys_path = '/tmp/authorized_keys_{0}_{1}'.format(username, rnd_chars)
authorized_keys = list()
copy_result = execute_command(
shlex.split(str('{0} cp {1} {2}'.format(sudo_check(), authorized_keys_path, tmp_authorized_keys_path))))
result_message = copy_result[0][1].decode('UTF-8')
if 'you must have a tty to run sudo' in result_message: # pragma: no cover
raise OSError("/etc/sudoers is blocked sudo. Remove entry: 'Defaults requiretty'.")
elif 'No such file or directory' not in result_message:
execute_command(shlex.split(str('{0} chmod 755 {1}'.format(sudo_check(), tmp_authorized_keys_path))))
with open(tmp_authorized_keys_path) as keys_file:
for key in keys_file:
authorized_keys.append(PublicKey(raw=key))
execute_command(shlex.split(str('{0} rm {1}'.format(sudo_check(), tmp_authorized_keys_path))))
return authorized_keys | 2,815 |
def get_fprime_version():
""" Gets the fprime version using setuptools_scm """
# First try to read the SCM version
try:
return get_version(root=os.sep.join([".."] * ROOT_PARENT_COUNT), relative_to=__file__)
# Fallback to a specified version when SCM is unavailable
except LookupError:
return "1.5.4" | 2,816 |
def push(player, dealer):
"""
Functions to handle end of game scenarios.
:param player:
:param dealer:
:return:
"""
print('Dealer and Player tie! It is a push.') | 2,817 |
def parser_config(p):
"""JLS file info."""
p.add_argument('--verbose', '-v',
action='store_true',
help='Display verbose information.')
p.add_argument('filename',
help='JLS filename')
return on_cmd | 2,818 |
def preprocess_raw_data():
"""
Reads raw data from local and makes pre-processing necessary to use dataset with ARIMA.
Function assumes that the date column is named as 'Date'. It saves prep-processed dataset
the local.
"""
raw_df = read_data('raw_data')
raw_df['Date'] = list(map(lambda x: pd.to_datetime(x), raw_df['Date']))
raw_df = raw_df.sort_values('Date')
procesed_df = raw_df.rename(index=str,
columns={'Daily minimum temperatures in Melbourne, '
'Australia, 1981-1990': 'y'})
for sub in procesed_df['y']:
if '?' in sub:
procesed_df.loc[procesed_df['y'] == sub, 'y'] = sub.split('?')[1]
repo_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(repo_path, 'data', 'interim', 'processed_df.csv')
os.makedirs(os.path.dirname(data_path), exist_ok=True)
procesed_df.to_csv(path_or_buf=data_path, index=False, header=True) | 2,819 |
def error_by_error_scatterplot(output_directory, file_prefix, df,
reference_series_index, x_series_index, y_series_index,
x_color, y_color,
x_series_name = None, y_series_name = None,
plot_title = '', x_axis_label = '', y_axis_label = '', similarity_range = 0.25,
add_similarity_range_annotation = True,
shape_by_category = False, shape_category_series_index = None, shape_category_title = 'Case',
label_series_index = None, label_outliers = True,
use_geom_text_repel = True,
):
""" Creates a scatterplot of error versus error intended to show which computational method (X or Y) has the least amount of error relative to a reference series.
The difference vectors (reference_series - x_series, reference_series - y_series) are created and these differences (errors)
are plotted against each other.
:param output_directory: The output directory.
:param file_prefix: A prefix for the generated files. A CSV file with the plot points, the R script, and the R output is saved along with the plot itself.
:param df: A pandas dataframe. Note: The dataframe is zero-indexed.
:param reference_series_index: The numerical index of the reference series e.g. experimental data.
:param x_series_index: The numerical index of the X-axis series e.g. predictions from a computational method.
:param y_series_index: The numerical index of the Y-axis series e.g. predictions from a second computational method.
:param x_color: The color of the "method X is better" points.
:param y_color: The color of the "method Y is better" points.
:param x_series_name: A name for the X-series which is used in the the classification legend.
:param y_series_name: A name for the Y-series which is used in the the classification legend.
:param plot_title: Plot title.
:param x_axis_label: X-axis label.
:param y_axis_label: Y-axis label.
:param similarity_range: A point (x, y) is considered as similar if |x - y| <= similarity_range.
:param add_similarity_range_annotation: If true then the similarity range is included in the plot.
:param shape_by_category: Boolean. If set then points are shaped by the column identified with shape_category_series_index. Otherwise, points are shaped by classification ("X is better", "Y is better", or "Similar")
:param shape_category_series_index: The numerical index of the series used to choose point shapes.
:param shape_category_title: The title of the shape legend.
:param label_series_index: The numerical index of the series label_series_index
:param label_outliers: Boolean. If set then label outliers using the column identified with label_series_index.
:param use_geom_text_repel: Boolean. If set then the ggrepel package is used to avoid overlapping labels.
This function was adapted from the Kortemme Lab covariation benchmark (https://github.com/Kortemme-Lab/covariation).
todo: I need to check that ggplot2 is respecting the color choices. It may be doing its own thing.
"""
try:
os.mkdir(output_directory)
except:
pass
assert (os.path.exists(output_directory))
if not isinstance(shape_category_series_index, int):
shape_by_category = False
if not isinstance(label_series_index, int):
label_outliers = False
assert(x_series_name != None and y_series_name != None)
df = df.copy()
headers = df.columns.values
num_categories = len(set(df.ix[:, shape_category_series_index].values))
legal_shapes = list(range(15,25+1)) + list(range(0,14+1))
if num_categories > len(legal_shapes):
colortext.warning('Too many categories ({0}) to plot using meaningful shapes.'.format(num_categories))
shape_by_category = False
else:
legal_shapes = legal_shapes[:num_categories]
df['X_error'] = abs(df[headers[reference_series_index]] - df[headers[x_series_index]])
x_error_index = len(df.columns.values) - 1
df['Y_error'] = abs(df[headers[reference_series_index]] - df[headers[y_series_index]])
y_error_index = len(df.columns.values) - 1
# Get the list of domains common to both runs
df['Classification'] = df.apply(lambda r: _classify_smallest_error(r['X_error'], r['Y_error'], similarity_range, x_series_name, y_series_name), axis = 1)
error_classification_index = len(df.columns.values) - 1
# Create the R script
boxplot_r_script = '''
library(ggplot2)
library(gridExtra)
library(scales)
library(qualV)
library(grid)'''
if use_geom_text_repel:
boxplot_r_script +='''
library(ggrepel) # install with 'install.packages("ggrepel")' inside the R interactive shell.
'''
boxplot_r_script += '''
# PNG generation
png('%(file_prefix)s.png', width=2560, height=2048, bg="white", res=600)
txtalpha <- 0.8
redtxtalpha <- 0.8
%(png_plot_commands)s
'''
xy_table_filename = '{0}.txt'.format(file_prefix)
xy_table_filepath = os.path.join(output_directory, xy_table_filename)
data_table = df.to_csv(header = True, index = False)
write_file(xy_table_filepath, data_table)
main_plot_script = '''
# Set the margins
par(mar=c(5, 5, 1, 1))
xy_data <- read.csv('%(xy_table_filename)s', header=T)
names(xy_data)[%(x_error_index)d + 1] <- "xerrors"
names(xy_data)[%(y_error_index)d + 1] <- "yerrors"
'''
if label_outliers:
main_plot_script +='''names(xy_data)[%(label_series_index)d + 1] <- "outlier_labels"'''
main_plot_script +='''
names(xy_data)[%(shape_category_series_index)d + 1] <- "categories"
xy_data[%(x_error_index)d + 1]
xy_data[%(y_error_index)d + 1]
# coefs contains two values: (Intercept) and yerrors
coefs <- coef(lm(xerrors~yerrors, data = xy_data))
fitcoefs = coef(lm(xerrors~0 + yerrors, data = xy_data))
fitlmv_yerrors <- as.numeric(fitcoefs[1])
lmv_intercept <- as.numeric(coefs[1])
lmv_yerrors <- as.numeric(coefs[2])
lm(xy_data$yerrors~xy_data$xerrors)
xlabel <- "%(x_axis_label)s"
ylabel <- "%(y_axis_label)s"
plot_title <- "%(plot_title)s"
rvalue <- cor(xy_data$yerrors, xy_data$xerrors)
# Alphabetically, "Similar" < "X" < "Y" so the logic below works
countsim <- paste("Similar =", dim(subset(xy_data, Classification=="Similar"))[1])
countX <- paste("%(x_series_name)s =", dim(subset(xy_data, Classification=="%(x_series_name)s"))[1])
countY <- paste("%(y_series_name)s =", dim(subset(xy_data, Classification=="%(y_series_name)s"))[1])
countX
countY
countsim
# Set graph limits and the position for the correlation value
minx <- min(0.0, min(xy_data$xerrors) - 0.1)
miny <- min(0.0, min(xy_data$yerrors) - 0.1)
maxx <- max(1.0, max(xy_data$xerrors) + 0.1)
maxy <- max(1.0, max(xy_data$yerrors) + 0.1)
# Create a square plot (x-range = y-range)
minx <- min(minx, miny)
miny <- minx
maxx <- max(maxx, maxy)
maxy <- maxx
xpos <- maxx / 25.0
ypos <- maxy - (maxy / 25.0)
ypos_2 <- maxy - (2 * maxy / 25.0)
plot_scale <- scale_color_manual(
"Counts",
values = c( "Similar" = '#444444', "%(x_series_name)s" = '%(x_color)s', "%(y_series_name)s" ='%(y_color)s'),
labels = c( "Similar" = countsim, "%(x_series_name)s" = countX, "%(y_series_name)s" = countY) )'''
if add_similarity_range_annotation:
main_plot_script += '''
# Polygon denoting the similarity range. We turn off plot clipping below (gt$layout$clip) so we need to be more exact than using 4 points when defining the region
boxy_mc_boxface <- data.frame(
X = c(minx - 0, maxx - %(similarity_range)f, maxx + 0, maxx + 0, 0 + %(similarity_range)f, 0),
Y = c(minx - 0 + %(similarity_range)f, maxx + 0, maxx + 0, maxx + 0 -%(similarity_range)f, 0, 0 )
)'''
else:
main_plot_script += '''
# Polygon denoting the similarity range. We turn off plot clipping below (gt$layout$clip) so we need to be more exact than using 4 points when defining the region
boxy_mc_boxface <- data.frame(
X = c(minx - 1, maxx + 1, maxx + 1, minx - 1),
Y = c(minx - 1 + %(similarity_range)f, maxx + 1 + %(similarity_range)f, maxx + 1 - %(similarity_range)f, minx - 1 - %(similarity_range)f)
)'''
if shape_by_category:
main_plot_script += '''
# Plot
p <- qplot(main="", xerrors, yerrors, data=xy_data, xlab=xlabel, ylab=ylabel, alpha = I(txtalpha), shape=factor(categories), col=factor(Classification)) +'''
else:
main_plot_script += '''
# Plot
p <- qplot(main="", xerrors, yerrors, data=xy_data, xlab=xlabel, ylab=ylabel, alpha = I(txtalpha), shape=factor(Classification), col=factor(Classification)) +'''
main_plot_script += '''
geom_polygon(data=boxy_mc_boxface, aes(X, Y), fill = "#bbbbbb", alpha = 0.4, color = "darkseagreen", linetype="blank", inherit.aes = FALSE, show.legend = FALSE) +
plot_scale +
geom_point() +
guides(col = guide_legend()) +
labs(title = "%(plot_title)s") +
theme(plot.title = element_text(color = "#555555", size=rel(0.75))) +
theme(axis.title = element_text(color = "#555555", size=rel(0.6))) +
theme(legend.title = element_text(color = "#555555", size=rel(0.45)), legend.text = element_text(color = "#555555", size=rel(0.4))) +
coord_cartesian(xlim = c(minx, maxx), ylim = c(miny, maxy)) + # set the graph limits
annotate("text", hjust=0, size = 2, colour="#222222", x = xpos, y = ypos, label = sprintf("R = %%0.2f", round(rvalue, digits = 4))) + # add correlation text; hjust=0 sets left-alignment. Using annotate instead of geom_text avoids blocky text caused by geom_text being run multiple times over the series'''
if label_outliers:
if use_geom_text_repel:
main_plot_script += '''
# Label outliers
geom_text_repel(size=1.5, segment.size = 0.15, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors <= maxx / 2 & yerrors >=maxy/2), aes(xerrors, yerrors-maxy/100, label=outlier_labels)) +
geom_text_repel(size=1.5, segment.size = 0.15, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors <= maxx / 2 & yerrors < maxy/2), aes(xerrors, yerrors+2*maxy/100, label=outlier_labels)) +
geom_text_repel(size=1.5, segment.size = 0.15, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors > maxx / 2 & yerrors >=maxy/2), aes(xerrors, yerrors-maxy/100, label=outlier_labels)) +
geom_text_repel(size=1.5, segment.size = 0.15, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors > maxx / 2 & yerrors < maxy/2), aes(xerrors, yerrors+2*maxy/100, label=outlier_labels)) +'''
else:
main_plot_script += '''
# Label outliers
geom_text(hjust = 0, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors <= maxx / 2 & yerrors >=maxy/2), aes(xerrors, yerrors-maxy/100, label=outlier_labels)) +
geom_text(hjust = 0, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors <= maxx / 2 & yerrors < maxy/2), aes(xerrors, yerrors+2*maxy/100, label=outlier_labels)) +
geom_text(hjust = 1, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors > maxx / 2 & yerrors >=maxy/2), aes(xerrors, yerrors-maxy/100, label=outlier_labels)) +
geom_text(hjust = 1, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yerrors - xerrors) > maxx/3 & xerrors > maxx / 2 & yerrors < maxy/2), aes(xerrors, yerrors+2*maxy/100, label=outlier_labels)) +'''
counts_title = 'Counts'
if add_similarity_range_annotation:
counts_title += '*'
main_plot_script += '''
#geom_text(hjust = 0, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yvalues - xvalues) > 2 & xvalues <= 0), aes(xvalues, yvalues+0.35, label=Origin_of_peptide), check_overlap = TRUE) + # label outliers
#geom_text(hjust = 1, size=1.5, color="#000000", alpha=0.6, data=subset(xy_data, abs(yvalues - xvalues) > 2 & xvalues > 0), aes(xvalues, yvalues+0.35, label=Origin_of_peptide), check_overlap = TRUE) + # label outliers
scale_colour_manual('%(counts_title)s', values = c('#444444', '%(x_color)s', '%(y_color)s'),
labels = c( "Similar" = countsim, "%(x_series_name)s" = countX, "%(y_series_name)s" = countY)) +'''
if shape_by_category:
legal_shapes_str = ', '.join(map(str, legal_shapes))
main_plot_script += '''
scale_shape_manual('%(shape_category_title)s', values = c(%(legal_shapes_str)s),
labels = c( "Similar" = countsim, "%(x_series_name)s" = countX, "%(y_series_name)s" = countY))'''
else:
main_plot_script += '''
scale_shape_manual('%(counts_title)s', values = c(18, 16, 15),
labels = c( "Similar" = countsim, "%(x_series_name)s" = countX, "%(y_series_name)s" = countY))'''
if add_similarity_range_annotation:
main_plot_script += '''+
# Add a caption
annotation_custom(grob = textGrob(gp = gpar(fontsize = 5), hjust = 0, sprintf("* Similar \\u225d \\u00b1 %%0.2f", round(%(similarity_range)f, digits = 2))), xmin = maxx + (2 * maxx / 10), ymin = -1, ymax = -1)'''
main_plot_script += '''
# Plot graph
p
'''
if add_similarity_range_annotation:
main_plot_script += '''
# Code to override clipping
gt <- ggplot_gtable(ggplot_build(p))
gt$layout$clip[gt$layout$name=="panel"] <- "off"
grid.draw(gt)'''
main_plot_script +='''
dev.off()
'''
# Create the R script
plot_type = 'png'
png_plot_commands = main_plot_script % locals()
boxplot_r_script = boxplot_r_script % locals()
r_script_filename = '{0}.R'.format(file_prefix)
r_script_filepath = os.path.join(output_directory, r_script_filename)
write_file(r_script_filepath, boxplot_r_script)
# Run the R script
run_r_script(r_script_filename, cwd = output_directory) | 2,820 |
def decentralized_training_strategy(communication_rounds, epoch_samples, batch_size, total_epochs):
"""
Split one epoch into r rounds and perform model aggregation
:param communication_rounds: the communication rounds in training process
:param epoch_samples: the samples for each epoch
:param batch_size: the batch_size for each epoch
:param total_epochs: the total epochs for training
:return: batch_per_epoch, total_epochs with communication rounds r
"""
if communication_rounds >= 1:
epoch_samples = round(epoch_samples / communication_rounds)
total_epochs = round(total_epochs * communication_rounds)
batch_per_epoch = round(epoch_samples / batch_size)
elif communication_rounds in [0.2, 0.5]:
total_epochs = round(total_epochs * communication_rounds)
batch_per_epoch = round(epoch_samples / batch_size)
else:
raise NotImplementedError(
"The communication round {} illegal, should be 0.2 or 0.5".format(communication_rounds))
return batch_per_epoch, total_epochs | 2,821 |
def create_config_file_lines():
"""Wrapper for creating the initial config file content as lines."""
lines = [
"[default]\n",
"config_folder = ~/.zettelkasten.d\n",
"\n",
"def_author = Ammon, Mathias\n",
"def_title = Config Parsed Test Title\n",
"def_location_specifier = None\n",
"\n",
"location = ~/zettelkasten\n",
"\n",
"initial_folder_structure = \n",
" lobby,\n",
" %(sources_directory)s,\n",
" _sources/audios,\n",
" _sources/images,\n",
" _sources/pdfs,\n",
" _sources/videos\n",
"\n",
"name_sep = /\n",
"\n",
"required_attributes = \n",
" uid,\n",
" category,\n",
" subcategory\n",
"\n",
"sources_directory = _sources\n",
"\n",
"styles_file = styles.cfg\n",
"\n",
"reserved_folder_names = \n",
" lobby,\n",
" %(sources_directory)s,\n",
" pytest_dir,\n",
" doctest_dir,\n",
" .zettelkasten.d\n",
"\n",
"zettelkasten_bib_file = zettelkasten.bib\n",
"\n",
"[source_file_formats]\n",
"audios = \n",
" mp3,\n",
" wav\n",
"images = \n",
" webp,\n",
" jpg,\n",
" jpeg,\n",
" png\n",
"pdfs =\n",
" pdf,\n",
" odt\n",
"videos =\n",
" mkv,\n",
" webm,\n",
" mp4\n",
"\n",
"[zettel_meta_attribute_defaults]\n",
"# required for zettel adding to work \n",
"category= None\n",
"subcategory= None\n",
"# optional\n",
"author = Mathias Ammon\n",
"topics =\n",
"tags =\n",
"doc = today\n",
"\n",
"[zettel_meta_attribute_labels]\n",
"# required for zettel adding to work\n",
"uid = #+Title:\n",
"category = #+Category:\n",
"subcategory = #+Subcategory:\n",
"# optional\n",
"author = #+Author:\n",
"doc = #+DOC:\n",
"dole = #+DOLE:\n",
"topics = #+Topics:\n",
"tags = #+Tags:\n",
]
return lines | 2,822 |
def delete_snapshots(cluster_name, path, recursive='0'):
"""删除快照数据信息
"""
if recursive == "1":
# monkey patch for delete snapshots recursively
target_path = path.rstrip("/") + "/"
del_snapshot_query = ZdSnapshot.delete().where(
(ZdSnapshot.cluster_name == cluster_name) &
((ZdSnapshot.path.startswith(target_path)) | (ZdSnapshot.path == path))
)
else:
del_snapshot_query = ZdSnapshot.delete().where(
(ZdSnapshot.cluster_name == cluster_name) &
(ZdSnapshot.path == path)
)
del_snapshot_query.execute() | 2,823 |
def get_dummies(
data: pandas.core.frame.DataFrame,
prefix: Dict[Literal["B", "A"], Literal["bar", "foo"]],
columns: List[Literal["B", "A"]],
dtype: Type[numpy.int8],
):
"""
usage.koalas: 1
"""
... | 2,824 |
def p_planes_tangent_to_cylinder(base_point, line_vect, ref_point, dist, ):
"""find tangent planes of a cylinder passing through a given point ()
.. image:: ../images/plane_tangent_to_one_cylinder.png
:scale: 80 %
:align: center
Parameters
----------
base_point : point
point M
line_vect : vector
direction of the existing bar's axis, direction [the other pt, base_pt], **direction very important!**
ref_point : point
point Q
dist : float
cylinder radius
Returns
-------
list of two [ref_point, local_y, local_x]
local x = QB
local_y // line_vect
"""
l_vect = normalize_vector(line_vect)
tangent_pts = lines_tangent_to_cylinder(base_point, line_vect, ref_point, dist)
if tangent_pts is None:
return None
base_pt, upper_tang_pt, lower_tang_pt = tangent_pts
r1 = subtract_vectors(add_vectors(base_pt, upper_tang_pt), ref_point)
r1 = normalize_vector(r1)
r2 = subtract_vectors(add_vectors(base_pt, lower_tang_pt), ref_point)
r2 = normalize_vector(r2)
return [[ref_point, l_vect, r1], [ref_point, l_vect, r2]] | 2,825 |
def BOPTools_AlgoTools3D_OrientEdgeOnFace(*args):
"""
* Get the edge <aER> from the face <aF> that is the same as the edge <aE>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aER:
:type aER: TopoDS_Edge &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools3D_OrientEdgeOnFace(*args) | 2,826 |
def create_store_from_creds(access_key, secret_key, region, **kwargs):
"""
Creates a parameter store object from the provided credentials.
Arguments:
access_key {string} -- The access key for your AWS account
secret_key {string} -- The secret key for you AWS account
region {string} -- The region you wish to connect to
Keyword Arguments (Optional):
session='session' {string} -- The session token you wish to use.
Returns:
Object -- An AWS parameter store object.
"""
session = kwargs.get('session') if 'session' in kwargs else ''
store = EC2ParameterStore(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=session, #optional
region_name=region
)
return store | 2,827 |
def comment_lgtm(pr_handle):
"""
Posts a LGTM (Looks good to me!) comment in the PR, if PR did not produced new clang-tidy warnings or errors.
"""
lgtm = 'This Pull request Passed all of clang-tidy tests. :+1:'
comments = pr_handle.get_issue_comments()
for comment in comments:
if comment.body == lgtm:
print("Already posted LGTM!!")
return
pr_handle.create_issue_comment(lgtm) | 2,828 |
def example_manual_j1939():
"""Manually enter a few frames as a list and decode using J1939 rules.
"""
timestamp = datetime.now(timezone.utc)
frames = [
{
"TimeStamp": timestamp.timestamp() * 1E9,
"ID": 0x0CF004FE,
"IDE": True,
"DataBytes": [
0x10, 0x7D, 0x82, 0xBD, 0x12, 0x00, 0xF4, 0x82
]
},
{
"TimeStamp": timestamp.timestamp() * 1E9 + 1E6,
"ID": 0x0CF004FE,
"IDE": True,
"DataBytes": [
0x10, 0x7D, 0x82, 0xBD, 0x1A, 0x00, 0xF4, 0x82
]
},
{
"TimeStamp": timestamp.timestamp() * 1E9 + 2E6,
"ID": 0x0CF004FE,
"IDE": True,
"DataBytes": [
0x10, 0x7D, 0x82, 0xBD, 0x22, 0x00, 0xF4, 0x82
]
}
]
result = can_decoder.IteratorDecoder(frames, setup_db_j1939())
for r in result:
print(r)
return | 2,829 |
def are_objects_equal(object1, object2):
"""
compare two (collections of) arrays or other objects for equality. Ignores nan.
"""
if isinstance(object1, abc.Sequence):
items = zip(object1, object2)
elif isinstance(object1, dict):
items = [(value, object2[key]) for key, value in object1.items()]
else:
items = [(object1, object2)]
# equal_nan does not exist in array_equal in old numpy
npy_major_version = tuple(int(v) for v in np.__version__.split('.')[:2])
if npy_major_version < (1, 19):
fixed = [(np.nan_to_num(a1), np.nan_to_num(a2)) for a1, a2 in items]
return np.all([np.all(a1 == a2) for a1, a2 in fixed])
try:
return np.all(
[np.array_equal(a1, a2, equal_nan=True) for a1, a2 in items]
)
except TypeError:
# np.array_equal fails for arrays of type `object` (e.g: strings)
return np.all([a1 == a2 for a1, a2 in items]) | 2,830 |
def get_recipes_from_dict(input_dict: dict) -> dict:
"""Get recipes from dict
Attributes:
input_dict (dict): ISO_639_1 language code
Returns:
recipes (dict): collection of recipes for input language
"""
if not isinstance(input_dict, dict):
raise TypeError("Input is not type dict")
recipes = input_dict
return recipes | 2,831 |
def tasks_from_wdl(wdl):
"""
Return a dictionary of tasks contained in a .wdl file.
The values are task definitions within the wdl
"""
return scopes_from_wdl("task", wdl) | 2,832 |
def ejobs(args, bjobsargs):
"""Wrapper script with bjobs functionality."""
# handle arguments
if args.pending:
bjobsargs = ["-p"] + bjobsargs
args.groupby = "pend_reason"
for shortcutname, shortcutargs in ejobsshortcuts.items():
if getattr(args, shortcutname):
bjobsargs = shortcutargs + bjobsargs
for l in list("rsda"):
if args.__dict__[l]:
bjobsargs = ["-" + l] + bjobsargs
if args.u:
unames = map(lookupalias, args.u.split())
bjobsargs = ["-u", " ".join(unames)] + bjobsargs
if args.jid:
args.output = ["id"]
args.fast = True
args.noheader = True
if args.output:
args.output = sum([fields.split() for fields in args.output], [])
if len(args.output) == 1:
args.noheader = True
# read
jobs = readjobs(bjobsargs, fast=args.fast)
if not jobs:
return
# sort
jobs.sort(key=lambda j: j["submit_time"])
jobs.sort(key=lambda j: j["priority"], reverse=True) # can be None
jobs.sort(key=lambda j: -j["run_time"])
jobs.sort(key=lambda j: -statorder[j["stat"]])
if args.sort:
try:
jobs.sort(key=lambda j: j[args.sort])
except:
print("Unknown sorting key \"%s\"!" % args.sort, file=sys.stderr)
# no grouping
if not args.groupby or args.groupby not in jobs[0]:
if args.sum:
jobs = [sumjobs(jobs)]
printjobs(jobs, wide=args.wide, long=args.long, output=args.output,
header=not args.noheader)
return
# grouping
jobgroups = groupjobs(jobs, args.groupby)
if not args.pending:
if args.sum:
jobs = []
for title in sorted(jobgroups.keys()):
sumjob = sumjobs(jobgroups[title])
if args.groupby not in ("name", "jobname", "user"):
sumjob["title"] = title
jobs.append(sumjob)
printjobs(jobs, wide=args.wide, long=args.long, output=args.output,
header=not args.noheader)
else:
for title in sorted(jobgroups.keys()):
printjobs(jobgroups[title], wide=args.wide, long=args.long,
output=args.output, header=not args.noheader,
title=title)
return
# pending
for title in sorted(jobgroups.keys()):
jobs = jobgroups[title]
reasons = jobs[0]["pend_reason"]
resreq = jobs[0]["resreq"]
hostreq = jobs[0]["host_req"]
if not reasons or len(reasons) != 1:
title = None
else:
# use singular reason as title
reason = reasons[0]
title = reason[0]
if not isinstance(reason[1], bool):
title += ": %d" % reason[1]
if args.sum:
jobs = [sumjobs(jobs)]
printjobs(jobs, wide=args.wide, long=args.long, output=args.output,
header=not args.noheader, title=title)
if reasons and len(reasons) > 1:
# show pending reasons
for reason, count in reasons:
for pattern in pendingcolors:
if re.match(pattern, reason):
reason = color(reason, pendingcolors[pattern])
break
if count is True:
print(" " + reason)
else:
print(" %4d %s" % (count, reason))
# show potential hosts
if resreq and not args.fast:
resreq = re.sub(" && \(hostok\)", "", resreq)
resreq = re.sub(" && \(mem>\d+\)", "", resreq)
hosts = readhosts(["-R", resreq] + hostreq)
hostnames = [h["host_name"] for h in hosts]
jobs = readjobs(["-u", "all", "-r", "-m", " ".join(hostnames)])
hosts.sort(key=lambda h: h["host_name"])
printhosts(hosts, jobs, wide=args.wide,
header=not args.noheader)
if len(jobgroups) > 1:
print() | 2,833 |
def install_openstack_service_checks():
"""Entry point to start configuring the unit
Triggered if related to the nrpe-external-master relation.
Some relation data can be initialized if the application is related to
keystone.
"""
set_flag('openstack-service-checks.installed')
clear_flag('openstack-service-checks.configured') | 2,834 |
def test_phenomodel_POST_add_hpo_checkbox_to_subpanel(app, user_obj, institute_obj, hpo_checkboxes):
"""Test adding an HPO checkbox with its children to a subpanel of a phenotype model via POST request"""
# GIVEN an institute with a phenotype model
store.create_phenomodel(institute_obj["internal_id"], "Test model", "Model description")
model_obj = store.phenomodel_collection.find_one()
# containing a subpanel
model_obj["subpanels"] = {"subpanel_x": TEST_SUBPANEL}
store.update_phenomodel(model_obj["_id"], model_obj)
# GIVEN a database with the required HPO terms (one parent term and one child term)
store.hpo_term_collection.insert_many(hpo_checkboxes)
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
resp = client.get(url_for("auto_login"))
# WHEN the user creates an HPO checkbox using the endpoint
form_data = dict(
hpo_subpanel_id="subpanel_x",
hpoHasTitle="on",
hpoTermTitle="Title for term",
hpo_term=" | ".join([hpo_checkboxes[0]["_id"], hpo_checkboxes[0]["description"]]),
hpo_custom_name="Alternative HPO name",
add_hpo="",
includeChildren="on",
)
resp = client.post(
url_for(
"overview.checkbox_edit",
institute_id=institute_obj["internal_id"],
model_id=model_obj["_id"],
),
data=form_data,
)
# THEN the term should have been added to the subpanel checkboxes
updated_model = store.phenomodel_collection.find_one()
checkbox = updated_model["subpanels"]["subpanel_x"]["checkboxes"]["HP:0025190"]
assert checkbox["name"] == "HP:0025190"
assert checkbox["checkbox_type"] == "hpo"
assert checkbox["description"] == "Bilateral tonic-clonic seizure with generalized onset"
assert checkbox["term_title"] == form_data["hpoTermTitle"]
assert checkbox["custom_name"] == form_data["hpo_custom_name"]
# Additionally, the HPO term checkbox should contain a nested HPO term:
nested_hpo_term = {
"name": hpo_checkboxes[1]["_id"],
"description": hpo_checkboxes[1]["description"],
}
assert checkbox["children"] == [nested_hpo_term] | 2,835 |
def wrap_parse(content, args):
"""
Wraps a call to `parse` in a try/except block so that one can use a Pool
and still get decent error messages.
Arguments
---------
content: segments are strings
args: a namespace, see `parse`
Returns
-------
parse trees and time to parse
"""
if content.strip()=="" or content is None:
return None
try:
trees = parse(content, args)
if len(trees)!=0:
return trees
else:
return None
except:
raise Exception(''.join(traceback.format_exception(*sys.exc_info()))) | 2,836 |
async def get_category_item_route(category_id: CategoryEnum, item_id: ObjectID,
db: AsyncIOMotorClient = Depends(get_database)) -> ItemInResponse:
"""Get the details about a particular item"""
_res = await db[category_id]["data"].find_one({"_id": item_id})
if _res:
return ItemInResponse(data=_res)
raise HTTPException(
status_code=404,
detail=f'ObjectID {item_id} not found in {category_id}') | 2,837 |
def weighted_SVD(matrix, error=None, full_matrices=False):
"""
Finds the most important modes of the given matrix given the weightings
given by the error.
matrix a horizontal rectangular matrix
error weighting applied to the dimension corresponding to the rows
"""
if type(error) is type(None):
error = np.ones(matrix.shape[0])
expanded_error = error[:,np.newaxis]
to_svd = matrix / expanded_error
(SVD_U, SVD_S, SVD_V_transpose) =\
la.svd(to_svd, full_matrices=full_matrices)
SVD_U = SVD_U * expanded_error
return SVD_U, SVD_S, SVD_V_transpose.T | 2,838 |
def ingredients():
"""Route to list all ingredients currently in the database.
"""
query = request.args.get("q")
ingredients = db.get_ingredient_subset_from_db(query)
return jsonify(ingredients) | 2,839 |
def read_h5_particles(particles_file, refpart, real_particles, bucket_length, comm, verbose):
"""Read an array of particles from an HDF-5 file"""
four_momentum = refpart.get_four_momentum()
pmass = four_momentum.get_mass()
E_0 = four_momentum.get_total_energy()
p0c = four_momentum.get_momentum()
myrank = comm.get_rank()
mpisize = comm.get_size()
if myrank==0 and verbose:
print("Loading particles from h5 file: ", particles_file)
if myrank == 0:
#h5 = tables.open_file(particles_file)
h5 = h5py.File(particles_file)
# use explicit int conversion otherwise there seems to
# be a typepython->C++ type mismatch of numpy.int64->int
#num_total_particles = int(h5.root.particles.shape[0])
num_total_particles = int(h5['particles'].shape[0])
if verbose:
print("Total of ", num_total_particles, " particles from file")
# broadcast num particles to all nodes
MPI.COMM_WORLD.bcast(num_total_particles, root=0)
else:
num_total_particles = None
num_total_particles = MPI.COMM_WORLD.bcast(num_total_particles, root=0)
if myrank == 0:
particles = h5['particles']
# make sure the data has the correct shape, either [n,6] without
# particles IDs or [n,7] with particle IDs.
if (particles.shape[1] != 7):
raise RuntimeError, "input data shape %shas incorrect number of particle coordinates"%repr(particles.shape)
#Note: Synergia bunch constructor updated - commit 077b99d7 - 11/17/2016
#Using old constructor throws an ArgumentError of a non-standard type.
# Using a try and except to handle both instances.
try:
# try the original constructor
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm,
bucket_length)
except Exception, e:
#look to see if it's an ArgumentError by evaluating the traceback
if (not str(e).startswith("Python argument types in")):
raise
else:
# use the new constructor
if verbose:
print("Using updated bunch constructor")
bunch = synergia.bunch.Bunch(
refpart,
num_total_particles, real_particles, comm)
# now set the new parameter 'z_period_length'
if bucket_length is not None:
bunch.set_z_period_length(bucket_length)
else:
bucket_length = 1. #fix this quantity
local_num = bunch.get_local_num()
local_particles = bunch.get_local_particles()
# Each processor will have a possibly different number of local particles.
# rank 0 has to find out how many each of them has and distribute them
n_particles_by_proc = MPI.COMM_WORLD.gather(local_num, 0)
if myrank == 0:
# copy in my particles
this_rank_start = 0
local_particles[:,:] = particles[0:local_num, :]
this_rank_start += local_num
# send particles out to other ranks
for r in range(1, mpisize):
this_rank_end = this_rank_start+n_particles_by_proc[r]
MPI.COMM_WORLD.send(obj=particles[this_rank_start:this_rank_end, :],
dest=r)
this_rank_start += n_particles_by_proc[r]
else:
# I'm not rank 0. Receive my particles
lp = MPI.COMM_WORLD.recv(source=0)
local_particles[:,:] = lp[:,:]
return bunch | 2,840 |
def readAndMapFile(path):
"""
Main file breaker - this takes a given file and breaks it into arbitrary
fragments, returning and array of fragments. For simplicity, this is breaking on
newline characters to start with. May have to be altered to work with puncuation
and/or special characters as needed.
"""
splitLines = []
def mapper(line):
strippedLine = line.strip()
if (len(strippedLine) > 0):
splitLines.append(strippedLine)
with open(path, "r", encoding=FILE_ENCODING) as f:
content = f.read()
items = content.split("\n")
for i in items:
logging.info("n-gram length = {}".format(len(i)))
mapper(i)
logging.info("Read {} lines of text from {}".format(len(splitLines), path))
return splitLines | 2,841 |
def fetch_git_logs(repo, from_date, to_date, args): # pragma: no cover
"""Fetch all logs from Gitiles for the given date range.
Gitiles does not natively support time ranges, so we just fetch
everything until the range is covered. Assume that logs are ordered
in reverse chronological order.
"""
cursor = ''
commit_date = to_date
data = []
while cursor is not None:
page = fetch_git_page(repo, cursor)
logs = page.get('log', [])
cursor = page.get('next')
for log in logs:
committer = log.get('committer', {})
commit_date = date_from_git(committer.get('time'))
if not commit_date:
continue
if commit_date > to_date:
continue
if commit_date < from_date:
break
files = set()
for entry in log.get('tree_diff', []):
files.add(entry['old_path'])
files.add(entry['new_path'])
if args.path_filter_include:
if not any(matches_path_filter(p, args.path_filter_include)
for p in files):
continue
if args.path_filter_exclude:
if any(matches_path_filter(p, args.path_filter_exclude)
for p in files):
continue
data.append({
'author': log.get('author', {}).get('email'),
'date': commit_date,
'commit-bot': bool('commit-bot' in committer.get('email', '')),
'revision': log.get('commit'),
})
if commit_date < from_date:
break
return data | 2,842 |
def _gen_bfp_op(op, name, bfp_args):
"""
Do the 'sandwich'
With an original op:
out = op(x, y)
grad_x, grad_y = op_grad(grad_out)
To the following:
x_, y_ = input_op(x, y)
Where input_op(x, y) -> bfp(x), bfp(y)
and input_op_grad(grad_x, grad_y) -> bfp(grad_x), bfp(grad_y)
out_ = op(x_, y_)
out = output_op(out)
Where output_op(out) -> bfp(out)
and output_op_grad(grad_out) -> bfp(grad_out)
This way we garantee that everything in and out of the forward and backward operations is
properly converted to bfp
"""
name = _get_op_name(name, **bfp_args)
class NewOpIn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, w):
return (float_to_bfp_batched(x, backward=False, **bfp_args), w)
@staticmethod
def backward(ctx, grad_x, grad_w):
return (grad_x, grad_w)
NewOpIn.__name__ = name + '_In'
new_op_in = NewOpIn.apply
class NewOpOut(torch.autograd.Function):
@staticmethod
def forward(ctx, op_out):
return op_out
@staticmethod
def backward(ctx, op_out_grad):
return float_to_bfp_batched(op_out_grad, backward=True, **bfp_args)
NewOpOut.__name__ = name + '_Out'
new_op_out = NewOpOut.apply
def new_op(x, w, *args, **kwargs):
x, w = new_op_in(x, w)
out = op(x, w, *args, **kwargs)
return new_op_out(out)
return new_op | 2,843 |
def angleaxis_to_rotation_matrix(aa):
"""Converts the 3 element angle axis representation to a 3x3 rotation matrix
aa: numpy.ndarray with 1 dimension and 3 elements
Returns a 3x3 numpy.ndarray
"""
angle = np.sqrt(aa.dot(aa))
if angle > 1e-6:
c = np.cos(angle);
s = np.sin(angle);
u = np.array([aa[0]/angle, aa[1]/angle, aa[2]/angle]);
R = np.empty((3,3))
R[0,0] = c+u[0]*u[0]*(1-c); R[0,1] = u[0]*u[1]*(1-c)-u[2]*s; R[0,2] = u[0]*u[2]*(1-c)+u[1]*s;
R[1,0] = u[1]*u[0]*(1-c)+u[2]*s; R[1,1] = c+u[1]*u[1]*(1-c); R[1,2] = u[1]*u[2]*(1-c)-u[0]*s;
R[2,0] = u[2]*u[0]*(1-c)-u[1]*s; R[2,1] = u[2]*u[1]*(1-c)+u[0]*s; R[2,2] = c+u[2]*u[2]*(1-c);
else:
R = np.eye(3)
return R | 2,844 |
def optimise_csr_matrix(csr_matrix):
"""
Performs **in place** operations to optimise csr matrix data. Returns None.
"""
# xxx todo profile performance using permutations / subsets of these
csr_matrix.sum_duplicates()
csr_matrix.eliminate_zeros()
csr_matrix.sort_indices() | 2,845 |
def test_basic_property_of_random_matrix():
"""Check basic properties of random matrix generation"""
for name, random_matrix in all_random_matrix.items():
print(name)
check_input_size_random_matrix(random_matrix)
check_size_generated(random_matrix)
if name != "random_subsample_normalized":
check_zero_mean_and_unit_norm(random_matrix)
check_approximate_isometry(random_matrix) | 2,846 |
def profilerTool(categoryView: bool = False,collapseSelectedEvents: bool = False,collapseSelectedEventsRepetition: bool = False,cpuView: bool = False,destroy: bool = False,exists: bool = False,expandSelectedEvents: bool = False,expandSelectedEventsRepetition: bool = False,findNext: bool = False,findPrevious: bool = False,frameAll: bool = False,frameSelected: bool = False,isolateSegment: int = 1,make: bool = False,matchWholeWord: bool = False,searchEvent: str = "",segmentCount: bool = False,showAllEvent: bool = False,showCriticalPath: bool = False,showSelectedEvents: bool = False,showSelectedEventsRepetition: bool = False,threadView: bool = False,unisolateSegment: bool = False) -> None:
"""
このスクリプトは、プロファイラ ツールのビュー(描画領域)と対話する profilerPanel によって使用されます。これは プロファイラ ツールの一部の動作をコントロールするために使用できます。
-----------------------------------------
Flags:
-----------------------------------------
categoryView (boolean): ビューモードをカテゴリビューに変更する
-----------------------------------------
collapseSelectedEvents (boolean): 選択したイベントのすべてのサブイベントを非表示にし、最上位のイベントのみが表示されるようにする
-----------------------------------------
collapseSelectedEventsRepetition (boolean): コメントに基づき、選択したイベントのすべてのサブイベントの繰り返しを非表示にする
-----------------------------------------
cpuView (boolean): ビューモードをCPUビューに変更する
-----------------------------------------
destroy (boolean): プロファイラツール内部フラグを破棄します。ユーザは使用しないでください。
-----------------------------------------
exists (boolean): プロファイラツールビューが存在するかどうかを照会します。プロファイラツールは、「profilerTool-make」を呼び出した後しか存在できません。
-----------------------------------------
expandSelectedEvents (boolean): 選択したイベントのすべてのサブイベントを表示する
-----------------------------------------
expandSelectedEventsRepetition (boolean): コメントに基づき、選択したイベントのすべてのサブイベントの繰り返しを表示する
-----------------------------------------
findNext (boolean): このフラグはフラグ-searchEventと併用されます。
-----------------------------------------
findPrevious (boolean): このフラグはフラグ-searchEventと併用されます。
-----------------------------------------
frameAll (boolean): profilerToolView内のすべてのイベントをフレームに入れる
-----------------------------------------
frameSelected (boolean): profilerToolViewで選択されたすべてのイベントをフレームに入れる
-----------------------------------------
isolateSegment (int): 指定されたセグメントを分離します。セグメントは1つのアニメーションフレームで発生したイベントのセットです。-segmentCountフラグを使用すると、イベントバッファ内のセグメント数を照会できます。セグメントのインデックスは0から開始します。指定したセグメントが存在しない場合は、エラーが発生します。
-----------------------------------------
make (boolean): プロファイラツールを作成し、最も最近作成されたレイアウト(内部フラグ)の親にするユーザは使用しないでください。
-----------------------------------------
matchWholeWord (boolean): イベントを検索するときに、単語の完全一致を条件とするかどうかをプロファイラツールに指示します。既定値はfalseです。
-----------------------------------------
searchEvent (string): イベントを検索する-searchEventを使用する前に-matchWholeWordbeforeを設定できます。-matchWholeWordをtrueに設定すると、プロファイラツールは、名前が文字列と正確に一致するイベントを検索します。-matchWholeWordをfalseに設定すると、プロファイラツールは、名前に文字列が含まれているイベントを検索します。このフラグと-findNextを併用すると、プロファイラツールは、現在選択されているイベントの次に見つかる最初のイベントを検索します。このフラグと-findPreviousを併用すると、プロファイラツールは、現在選択されているイベントの前に見つかる最初のイベントを検索します。現在選択されているイベントがないか、複数のイベントが選択されている場合、検索はプロファイラバッファ内の最初のイベントから開始されます。このフラグに-findNextおよび-findPreviousを併用しないと、プロファイラツールはすべてのイベントを検出します。照会モードでは、このフラグに値が必要になります。
-----------------------------------------
segmentCount (boolean): イベントバッファ内のセグメントの数を返します。
-----------------------------------------
showAllEvent (boolean): すべてのイベントを表示(フィルタによってイベントが非表示になっている場合)(true)、またはすべてのイベントを非表示(false)
-----------------------------------------
showCriticalPath (boolean): 選択したフレームの重要なパスを表示する
-----------------------------------------
showSelectedEvents (boolean): 選択したイベントのみを表示(true)、または選択したすべてのイベントを非表示(false)
-----------------------------------------
showSelectedEventsRepetition (boolean): 選択したイベントの繰り返しのみをそのコメント(true)に基づいて表示、または選択したすべてのイベントの繰り返しをそのコメント(false)に基づいて非表示
-----------------------------------------
threadView (boolean): ビューモードをスレッドビューに変更する
-----------------------------------------
unisolateSegment (boolean): 現在分離されているセグメントの分離を解除します。セグメントが現在分離されていない場合は、何も起こりません。
-----------------------------------------
Return Value:
None: なし照会モードでは、戻り値のタイプは照会されたフラグに基づきます。
"""
pass | 2,847 |
def sendOrderFAK(self, orderType, price, volume, symbol, exchange, stop=False):
"""发送委托"""
if self.trading:
# 如果stop为True,则意味着发本地停止单
req = {}
req['sid'] = self.sid
if orderType == CTAORDER_BUY:
req['direction'] = '0'
req['offset'] = '0'
elif orderType == CTAORDER_SELL:
req['direction'] = '1'
req['offset'] = '1'
elif orderType == CTAORDER_SELL_TODAY:
req['direction'] = '1'
req['offset'] = '3'
elif orderType == CTAORDER_SHORT:
req['direction'] = '1'
req['offset'] = '0'
elif orderType == CTAORDER_COVER:
req['direction'] = '0'
req['offset'] = '1'
elif orderType == CTAORDER_COVER_TODAY:
req['direction'] = '0'
req['offset'] = '3'
req['symbol'] = symbol
req['volume'] = volume
req['price'] = price
req['hedgeflag'] = '1'
req['ordertype'] = '1'
req['exchange'] = exchange
vtOrderID = ctaEngine.sendOrder(req)
return vtOrderID
else:
return None
# ---------------------------------------------------------------------- | 2,848 |
def unique(lst):
"""
:param lst: a list of lists
:return: a unique list of items appearing in those lists
"""
indices = sorted(list(range(len(lst))), key=lst.__getitem__)
indices = set(next(it) for k, it in
itertools.groupby(indices, key=lst.__getitem__))
return [x for i, x in enumerate(lst) if i in indices] | 2,849 |
def max_frequency(sig, FS):
"""Compute max frequency along the specified axes.
Parameters
----------
sig: ndarray
input from which max frequency is computed.
FS: int
sampling frequency
Returns
-------
f_max: int
0.95 of max_frequency using cumsum.
"""
f, fs = plotfft(sig, FS, doplot=False)
t = np.cumsum(fs)
try:
ind_mag = np.where(t > t[-1]*0.95)[0][0]
except:
ind_mag = np.argmax(t)
f_max = f[ind_mag]
return f_max | 2,850 |
def split(text):
"""Turns the mobypron.unc file into a dictionary"""
map_word_moby = {}
try:
lines = text.split("\n")
for line in lines:
(word, moby) = line.split(" ", 1)
map_word_moby[word] = moby
except IOError as error:
print(f"Failed due to IOError: {error}")
return map_word_moby | 2,851 |
def remove_pos(lst):
"""Takes a list of pairs where the first part of a pair is the PO number and
the second is the result 1 = disproved, 2 = proved, 3 = unresolved. Then removes
the proved and disproved outputs and returns the aig with the unresolved
outputs"""
proved = disproved = unresolved = []
for j in range(len(lst)):
jj = lst[j]
if jj[1] == 2:
proved = proved + [jj[0]]
if (jj[1] == 1 or (jj[1] == 0)):
disproved = disproved +[jj[0]]
if jj[1] > 2:
unresolved = unresolved +[jj[0]]
print '%d outputs proved'%len(proved)
if not proved == []:
if ((max(proved)>n_pos()-1) or min(proved)< 0):
print proved
remove(proved,0) | 2,852 |
def download_as_temporary(src_path_in_s3: Path, bucket_name: str = "keng000-mlops") -> Path:
"""
This context manager downloads a file from s3 as temporary.
Args:
same as the `download` function.
Returns:
Path: temporary file path.
"""
tmp_file = tempfile.NamedTemporaryFile()
tmp_file_path = Path(tmp_file.name)
download(src_path_in_s3, tmp_file_path, bucket_name)
try:
yield tmp_file_path
finally:
logger.info(f"the temporary file removed: {tmp_file_path}")
tmp_file.close() | 2,853 |
def patch_airflow_config(airflow_config):
"""
Updates provided Airflow configuration file to include defaults for cwl-airflow.
If something went wrong, restores the original airflow.cfg from the backed up copy
"""
# TODO: add cwl section with the following parameters:
# - singularity
# - use_container
patches = [
[
"sed",
"-i",
"-e",
"s/^dags_are_paused_at_creation.*/dags_are_paused_at_creation = False/g",
airflow_config,
],
[
"sed",
"-i",
"-e",
"s/^load_examples.*/load_examples = False/g",
airflow_config,
],
[
"sed",
"-i",
"-e",
"s/^logging_config_class.*/logging_config_class = cwl_airflow.config_templates.airflow_local_settings.DEFAULT_LOGGING_CONFIG/g",
airflow_config,
],
[
"sed",
"-i",
"-e",
"s/^hide_paused_dags_by_default.*/hide_paused_dags_by_default = True/g",
airflow_config,
],
]
airflow_config_backup = airflow_config + "_backup_" + str(uuid.uuid4())
try:
shutil.copyfile(airflow_config, airflow_config_backup)
for patch in patches:
run(
patch,
shell=False, # for proper handling of filenames with spaces
check=True,
stdout=DEVNULL,
stderr=DEVNULL,
)
except (CalledProcessError, FileNotFoundError) as err:
logging.error(
f"""Failed to patch Airflow configuration file. Restoring from the backup and exiting.\n{err}"""
)
if os.path.isfile(airflow_config_backup):
shutil.copyfile(airflow_config_backup, airflow_config)
sys.exit(1)
finally:
if os.path.isfile(airflow_config_backup):
os.remove(airflow_config_backup) | 2,854 |
def merge(old, new):
"""
Merge two .properties files into 1
For properties that are common between old and new, find the values that
changed, then overwrite values in new file with those from old file
For properties that differ between the two files, just print them out
Write the new properties file to the current working directory
"""
old = os.path.abspath(os.path.expanduser(old))
new = os.path.abspath(os.path.expanduser(new))
print(f'Comparing old properties file with new one:')
contents = {}
for i, f in enumerate([old, new]):
if os.path.splitext(f)[-1] != '.properties':
# print(f'{f} must be a .properties file. Exiting!')
exit(1)
print(f)
contents[i] = {
'filepath': f,
'properties': properties_to_dict(f)
}
print()
old_props = set(contents[0]['properties'].keys())
new_props = set(contents[1]['properties'].keys())
common = old_props.intersection(new_props)
for key in common:
old_val = contents[0]['properties'][key]
new_val = contents[1]['properties'][key]
if old_val != new_val:
print(
f'Overwriting property: {key} in new file with old value. '
f'{new_val} --> {old_val}'
)
contents[1]['properties'][key] = old_val
old_only = old_props.difference(new_props)
new_only = new_props.difference(old_props)
for i, prop_set in enumerate([old_only, new_only]):
if i == 0:
print('\nProperties in old file but not in new file: ')
pprint(old_only)
else:
print('\nProperties in new file but not in old file: ')
pprint(new_only)
out_file = os.path.join(
os.getcwd(), os.path.splitext(os.path.split(new)[-1])[0] +
'-updated.properties'
)
print(f'Writing merged properties file to: {out_file}')
dict_to_properties(contents[1]['properties'], out_file) | 2,855 |
def report(key_name=None, priority=-1, **formatters):
""" Use this decorator to indicate what returns to include in the report and how to format it """
def tag_with_report_meta_data(cls):
# guard: prevent bad coding by catching bad return key
if key_name and key_name not in cls.return_keys:
raise Exception("Task %s does not specify %s using the @returns decorator. "
"It cannot be used in @report" % (cls.name, key_name))
report_entry = {
"key_name": key_name,
'priority': priority,
'formatters': formatters,
}
if not hasattr(cls, 'report_meta'):
cls.report_meta = []
cls.report_meta.append(report_entry)
return cls
return tag_with_report_meta_data | 2,856 |
def calc_momentum_def(x_loc, X, Y, U):
""" calc_momentum_def() : Calculates the integral momentum deficit of scalar field U stored at \
locations X,Y on a vertical line that runs nearest to x_loc. """
U_line, x_line, x_idx_line = get_line_quantity(x_loc, X, Y, U)
y_line = Y[:,x_idx_line]
return scipy.integrate.trapz(U_line*(1-U_line), y_line) | 2,857 |
def display_unit_title(unit, app_context):
"""Prepare an internationalized display for the unit title."""
course_properties = app_context.get_environ()
template = get_unit_title_template(app_context)
return template % {'index': unit.index, 'title': unit.title} | 2,858 |
def load_user(user_id):
"""Load the user object from the user ID stored in the session"""
return User.objects(pk=user_id).first() | 2,859 |
def get_complex_replay_list():
"""
For full replays that have crashed or failed to be converted
:return:
"""
return [
'https://cdn.discordapp.com/attachments/493849514680254468/496153554977816576/BOTS_JOINING_AND_LEAVING.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496153569981104129/BOTS_NO_POSITION.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496153605074845734/ZEROED_STATS.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/496180938968137749/FAKE_BOTS_SkyBot.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/497149910999891969/NEGATIVE_WASTED_COLLECTION.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/497191273619259393/WASTED_BOOST_WHILE_SUPER_SONIC.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/501630263881760798/OCE_RLCS_7_CARS.replay',
'https://cdn.discordapp.com/attachments/493849514680254468/561300088400379905/crossplatform_party.replay'
] | 2,860 |
def print_stat(label, stat_val, p_val):
"""Helper function to print out statistical tests."""
print(label + ': \t {: 5.4f} \t{: 5.4f}'.format(stat_val, p_val)) | 2,861 |
def parse_registry():
""" Parses the provided registry.dat file and returns a dictionary of chunk
file names and hashes. (The registry file is just a json dictionary containing
a list of file names and hashes.) """
registry = request.values.get("registry", None)
if registry is None:
return None
try:
ret = json.loads(registry)
except ValueError:
return abort(400)
if not isinstance(ret, dict):
return abort(400)
return ret | 2,862 |
def get_cosine_with_hard_restarts_schedule_with_warmup(optim: Optimizer,
num_warmup_step: float,
num_training_step: int,
num_cycles: float = 1.,
last_epoch: int = -1):
"""
get a scheduler with a linear warmup between ``[0, num_warmup_step)`` and then decreases it following a cosine
function with several hard restarts.
"""
def lr_lambda(current_step):
if current_step < num_warmup_step:
return float(current_step) / float(max(1.0, num_warmup_step))
progress = float(current_step - num_warmup_step) / float(max(1, num_training_step - num_warmup_step))
if progress >= 1.0:
return 0.
return max(0., .5 * (1. + math.cos(math.pi * ((float(num_cycles) * progress) % 1.))))
return LambdaLR(optim, lr_lambda, last_epoch) | 2,863 |
def main():
"""
"""
generator('arch_dependencies', arch_dependencies)
generator('arch_packages', arch_packages)
generator('aur_packages', aur_packages)
generator('yay_packages', yay_packages)
generator('python_packages', python_packages)
generator('ttf_fonts', ttf_fonts)
generator('otf_fonts', otf_fonts) | 2,864 |
def home():
"""
Render Homepage
--------------------------------------------------------------
This site should be cached, because it is the main entry point for many users.
"""
bestseller: typing.List[Device] = get_bestsellers()
specialist_manufacturers = Manufacturer.query.filter(
(Manufacturer.name == "Samsung") | (Manufacturer.name == "Huawei")
).all()
return render_template("shop/home.html", bestseller=bestseller, specialist_manufacturers=specialist_manufacturers) | 2,865 |
def reject_call():
"""Ends the call when a user does not want to talk to the caller"""
resp = twilio.twiml.Response()
resp.say("I'm sorry, Mr. Baker doesn't want to talk to you. Goodbye scum.", voice='woman', language='en-GB')
resp.hangup()
return str(resp) | 2,866 |
def table_prep(data, columns=''):
"""
Data processor for table() function.
You can call it separately as well and in
return get a non-prettyfied summary table.
Unless columns are defined, the three first
columns are chosen by default.
SYNTAX EXAMPLE:
df['quality_score','influence_score','reach_score']
"""
if data.shape[1] != 3:
if len(columns) != 3:
if data.shape[1] > 3:
print("showing first three columns because no columns were \
specific / data had more than 3 columns")
data = pd.DataFrame(data[data.columns[0:3]])
if data.shape[1] < 3:
print("You need at least 3 columns of data for this table")
quit()
if len(columns) == 3:
data = data[columns]
desc = pd.DataFrame({'sum': data.sum().astype('int'),
'median': data.median(),
'mean': data.mean(),
'std': data.std()})
desc = desc.round(decimals=2)
return desc | 2,867 |
def test_all_find_el_are_wrapped(snapshot):
"""All find_* functions are wrapped."""
el = MockDriver().find_element_by_id("ignored")
result = []
result.append(el.find_element_by_id("ignored"))
result.append(el.find_elements_by_id("ignored"))
result.append(el.find_element_by_xpath("ignored"))
result.append(el.find_elements_by_xpath("ignored"))
result.append(el.find_element_by_link_text("ignored"))
result.append(el.find_elements_by_link_text("ignored"))
result.append(el.find_element_by_partial_link_text("ignored"))
result.append(el.find_elements_by_partial_link_text("ignored"))
result.append(el.find_element_by_name("ignored"))
result.append(el.find_elements_by_name("ignored"))
result.append(el.find_element_by_tag_name("ignored"))
result.append(el.find_elements_by_tag_name("ignored"))
result.append(el.find_element_by_class_name("ignored"))
result.append(el.find_elements_by_class_name("ignored"))
result.append(el.find_element_by_css_selector("ignored"))
result.append(el.find_elements_by_css_selector("ignored"))
result.append(el.find_element("ignored", "twice"))
result.append(el.find_elements("ignored", "twice"))
snapshot.assert_match(result)
snapshot.assert_match(el.calls) | 2,868 |
def lerp(a,b,t):
""" Linear interpolation between from @a to @b as @t goes between 0 an 1. """
return (1-t)*a + t*b | 2,869 |
def convert_to_legacy_v3(
game_tick_packet: game_data_struct.GameTickPacket,
field_info_packet: game_data_struct.FieldInfoPacket = None):
"""
Returns a legacy packet from v3
:param game_tick_packet a game tick packet in the v4 struct format.
:param field_info_packet a field info packet in the v4 struct format. Optional. If this is not supplied,
none of the boost locations will be filled in.
"""
legacy_packet = GameTickPacket()
legacy_packet.numBoosts = game_tick_packet.num_boost
legacy_packet.numCars = game_tick_packet.num_cars
for i in range(game_tick_packet.num_cars):
convert_player_info(legacy_packet.gamecars[i], game_tick_packet.game_cars[i])
for i in range(game_tick_packet.num_boost):
convert_boost_info(legacy_packet.gameBoosts[i], game_tick_packet.game_boosts[i])
if field_info_packet is not None:
convert_vector(legacy_packet.gameBoosts[i].Location, field_info_packet.boost_pads[i].location)
convert_ball_info(legacy_packet.gameball, game_tick_packet.game_ball)
convert_game_info(legacy_packet.gameInfo, game_tick_packet.game_info)
return legacy_packet | 2,870 |
def load_settings():
"""Load settings and set the log level."""
settings.load()
logging.getLogger().setLevel(
logging.DEBUG if settings["debug_logging"] else logging.INFO
) | 2,871 |
def _load_audio(audio_path, sample_rate):
"""Load audio file."""
global counter
global label_names
global start
global end
logging.info("Loading '%s'.", audio_path)
try:
lbl1=Alphabet[audio_path[-6]]
lbl2 = Alphabet[audio_path[-5]]
except:
lbl1=1 + counter
lbl2=2 + counter
label_names=np.array([[lbl1,lbl2]]).astype(np.float32)
counter = counter + 1
print('label names')
print(audio_path)
#print(audio_path[-6]+audio_path[-5])
print(label_names)
beam.metrics.Metrics.counter('prepare-tfrecord', 'load-audio').inc()
with tf.io.gfile.GFile(audio_path, 'rb') as f:
audio_segment = (
pydub.AudioSegment.from_file(f)
.set_channels(1).set_frame_rate(sample_rate))
audio = np.array(audio_segment.get_array_of_samples()).astype(np.float32)
audio=audio[start:end]
audio /= 2 ** (8 * audio_segment.sample_width)
with tf.io.gfile.GFile(str(audio_path.replace("audio","audio_2")), 'rb') as sd:
audio_segment_2 = (
pydub.AudioSegment.from_file(sd)
.set_channels(1).set_frame_rate(sample_rate))
audio_2 = np.array(audio_segment_2.get_array_of_samples()).astype(np.float32)
audio_2=audio_2[start:end]
# Convert from int to float representation.
audio_2 /= 2**(8 * audio_segment_2.sample_width)
print('I am alive!')
start = start + 64000
end = end + 64000
#print(audio)
return {'audio': audio,'audio_2': audio_2} | 2,872 |
def generate_annotation_dict(annotation_file):
""" Creates a dictionary where the key is a file name
and the value is a list containing the
- start time
- end time
- bird class.
for each annotation in that file.
"""
annotation_dict = dict()
for line in open(annotation_file):
file_name, start_time, end_time, bird_class = line.strip().split('\t')
if file_name not in annotation_dict:
annotation_dict[file_name] = list()
annotation_dict[file_name].append([start_time, end_time, bird_class])
return annotation_dict | 2,873 |
def ishom(T, check=False, tol=100):
"""
Test if matrix belongs to SE(3)
:param T: SE(3) matrix to test
:type T: numpy(4,4)
:param check: check validity of rotation submatrix
:type check: bool
:return: whether matrix is an SE(3) homogeneous transformation matrix
:rtype: bool
- ``ishom(T)`` is True if the argument ``T`` is of dimension 4x4
- ``ishom(T, check=True)`` as above, but also checks orthogonality of the
rotation sub-matrix and validitity of the bottom row.
.. runblock:: pycon
>>> from spatialmath.base import *
>>> import numpy as np
>>> T = np.array([[1, 0, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]])
>>> ishom(T)
>>> T = np.array([[1, 1, 0, 3], [0, 1, 0, 4], [0, 0, 1, 5], [0, 0, 0, 1]]) # invalid SE(3)
>>> ishom(T) # a quick check says it is an SE(3)
>>> ishom(T, check=True) # but if we check more carefully...
>>> R = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> ishom(R)
:seealso: :func:`~spatialmath.base.transformsNd.isR`, :func:`~isrot`, :func:`~spatialmath.base.transforms2d.ishom2`
"""
return (
isinstance(T, np.ndarray)
and T.shape == (4, 4)
and (
not check
or (
base.isR(T[:3, :3], tol=tol)
and np.all(T[3, :] == np.array([0, 0, 0, 1]))
)
)
) | 2,874 |
def _get_stp_data(step_order=STEP_ORDER, n=N_PER_STEP):
"""Returns np.array of step-type enums data for sample data.
Parameters
----------
step_order : list of (int, char)
List of (Cycle number, step type code) for steps in sample procedure.
n : int
Number of datapoints per step.
Returns
-------
stp_data : np.array(int)
"""
return np.hstack([_get_step_stp_idx_data(step_code, n=n) for _, step_code
in step_order]) | 2,875 |
def actor_discrete_loss(actions, advantages, logits):
"""
Adapted from: http://inoryy.com/post/tensorflow2-deep-reinforcement-learning/
"""
# sparse categorical CE loss obj that supports sample_weight arg on call()
# from_logits argument ensures transformation into normalized probabilities
weighted_sparse_ce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# policy loss is defined by policy gradients, weighted by advantages
# note: we only calculate the loss on the actions we've actually taken
actions = tf.cast(actions, tf.int32)
policy_loss = weighted_sparse_ce(actions, logits, sample_weight=advantages)
# entropy loss can be calculated via CE over itself
# TODO: use this
# entropy_loss = tf.keras.losses.categorical_crossentropy(logits, logits, from_logits=True)
# here signs are flipped because optimizer minimizes
# return policy_loss - self.params['entropy']*entropy_loss
return policy_loss | 2,876 |
def fuse_stride_arrays(dims: Union[List[int], np.ndarray],
strides: Union[List[int], np.ndarray]) -> np.ndarray:
"""
Compute linear positions of tensor elements
of a tensor with dimensions `dims` according to `strides`.
Args:
dims: An np.ndarray of (original) tensor dimensions.
strides: An np.ndarray of (possibly permuted) strides.
Returns:
np.ndarray: Linear positions of tensor elements according to `strides`.
"""
return fuse_ndarrays([
np.arange(0, strides[n] * dims[n], strides[n], dtype=SIZE_T)
for n in range(len(dims))
]) | 2,877 |
def extract_jasmine_summary(line):
"""
Example SUCCESS karma summary line:
PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 SUCCESS (0.205 secs / 0.001 secs)
Exmaple FAIL karma summary line:
PhantomJS 2.1.1 (Linux 0.0.0): Executed 1 of 1 (1 FAILED) ERROR (0.21 secs / 0.001 secs)
"""
# get totals
totals = line.split(' Executed ')[1].split(' ')
executed_tests, total_tests = int(totals[0]), int(totals[2])
# get failed
if 'SUCCESS' in line:
failed_tests = 0
else:
failed_tests = int(totals[3][1:])
return {
'total_tests': total_tests,
'executed_tests': executed_tests,
'failed_tests': failed_tests,
'passed_tests': executed_tests - failed_tests
} | 2,878 |
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
#-------------------------------------------------------------------------
# BEGIN: Per-script Configuration
#-------------------------------------------------------------------------
# set a pattern used to match desired function names
# example of only allowing certain prefixes:
# guard_pattern = r'^demo_'
guard_pattern = None
# set up auto type-conversions for functions that expect parameters to
# be of a specified type
# note: the parameter list must be complete for any specified function.
# the default parser will pass all parameters as strings if the
# function is not listed here.
parameter_types = {
'fun1' : { 'x' : int }
}
# set up auto parameter documentation here
# note: the parameter list must be complete for any specified function.
parameter_docs = {
'fun0' : {
'a' : 'Name',
'b' : 'Desired item',
'c' : 'Number of desired items',
'd' : 'The greeting'
}
}
#-------------------------------------------------------------------------
# END: Per-script Configuration
#-------------------------------------------------------------------------
# imports when using this as a script
# note: it's probably better to put these at the top of the file, but
# we're assuming the application may not rely on these modules.
import argparse
import inspect
import re
# get the name of the current function (most likely "main")
current_name = inspect.currentframe().f_code.co_name
# create a list of functions used to test each function for exposure
tests = [
# only expose functions
inspect.isfunction,
# do not expose conventionally "private" functions
lambda f: f.__name__[ : 1 ] != '_',
# do not expose the current function
lambda f: f.__name__ != current_name
]
# if there's a guard pattern, set up a regular expression to test it
if guard_pattern is not None:
tests.append(
lambda f: re.match( guard_pattern, f.__name__ ) is not None
)
# create a filter function (in a closure) to omit unwanted functions
def create_predicate( tests ):
def predicate( function ):
for test in tests:
if test( function ) == False:
return False
return True
return predicate
test = create_predicate( tests )
# get a reference to the current module
module = sys.modules[ __name__ ]
# construct a list of functions from the module's dictionary
functions = [ m[ 1 ] for m in inspect.getmembers( module, test ) ]
# standard (improved) help argument specification
helpargs = [ '-h', '--help' ]
helpkwargs = {
'default' : argparse.SUPPRESS,
'help' : 'Display this help message and exit.',
'action' : 'help'
}
# create and configure an argument parser
parser = argparse.ArgumentParser(
description = 'Module Shell Script Example',
add_help = False
)
parser.add_argument( *helpargs, **helpkwargs )
parser.add_argument(
'-t',
'--test',
default = argparse.SUPPRESS,
help = 'Execute script self-test.',
action = 'store_true'
)
parser.add_argument(
'-v',
'--version',
default = argparse.SUPPRESS,
help = 'Display script version and exit.',
action = 'version',
version = __version__
)
# set up sub-command parsers
subparsers = parser.add_subparsers(
title = 'Functions',
help = 'The following functions are available.'
)
# add a sub-command parser for each function
for function in functions:
# shortcut for the function name
name = function.__name__
# use the function's docstring for helpful information
docstring = inspect.getdoc( function )
# create a sub-parser for this function
subparser = subparsers.add_parser(
name,
description = docstring,
help = docstring,
add_help = False,
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
# standard help switch
subparser.add_argument( *helpargs, **helpkwargs )
# argument specification of function
fun_args = inspect.getargspec( function )
num_args = len( fun_args.args )
# check for argument defaults
if fun_args.defaults is not None:
defaults = list( fun_args.defaults )
else:
defaults = []
# load arguments into this sub-parser
for arg in fun_args.args:
# keyword arguments used to create the sub-parser argument
kwargs = {}
# check for default values specified in the function
if num_args == len( defaults ):
kwargs[ 'nargs' ] = '?'
kwargs[ 'default' ] = defaults.pop( 0 )
# check for specified parameter types for this function
if name in parameter_types:
kwargs[ 'type' ] = parameter_types[ name ][ arg ]
# check for specified parameter documentation for this function
if name in parameter_docs:
kwargs[ 'help' ] = parameter_docs[ name ][ arg ]
# add the specified argument to the sub-parser
subparser.add_argument( arg, **kwargs )
# decrement number of remaining arguments to add
num_args -= 1
# set the function to be called when this sub-command is issued
subparser.set_defaults( _call = function )
# parse the arguments
args = parser.parse_args( argv[ 1 : ] )
# check for self-test request
if hasattr( args, 'test' ) and args.test == True:
import os
result = 0
script = os.path.basename( __file__ )
tests = [
( script, 'fun0', 'Bob', 'waffles' ),
( script, 'fun0', 'Bob', 'waffles', 3 ),
( script, 'fun0', 'Bob', 'waffles', 4, 'Greetings' )
]
for test in tests:
try:
result = main( *test )
except:
print 'CAUGHT: {}'.format( sys.exc_info()[0] )
raise
else:
if result != 0:
return result
return result
# load arguments into a new dict instance
params = dict( vars( args ) )
# scrub things that aren't arguments to the requested function
# note: this means the function can't have parameters that begin with "_"
for key in params.keys():
if key[ : 1 ] == '_':
del params[ key ]
# call the function that was set for the specified sub-command
result = args._call( **params )
# check return for something non-shell-like
if type( result ) is not int:
print result
return 0
# return result
return result | 2,879 |
def serialize_item(item):
"""
Args:
item: an XBlock
Returns:
fields: a dictionary of an XBlock's field names and values
block_type: the name of the XBlock's type (i.e. 'course'
or 'problem')
"""
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
# convert all fields to a dict and filter out parent and children field
fields = {
field: field_value.read_from(item)
for (field, field_value) in item.fields.items()
if field not in ['parent', 'children']
}
course_key = item.scope_ids.usage_id.course_key
block_type = item.scope_ids.block_type
# set or reset some defaults
fields['edited_on'] = str(getattr(item, 'edited_on', ''))
fields['display_name'] = item.display_name_with_default
fields['org'] = course_key.org
fields['course'] = course_key.course
fields['run'] = course_key.run
fields['course_key'] = str(course_key)
fields['location'] = str(item.location)
fields['block_type'] = block_type
fields['detached'] = block_type in DETACHED_XBLOCK_TYPES
if block_type == 'course':
# prune the checklists field
if 'checklists' in fields:
del fields['checklists']
# record the time this command was run
fields['time_last_dumped_to_neo4j'] = str(timezone.now())
return fields, block_type | 2,880 |
def gen_outfile_name(args):
"""Generate a name for the output file based on the input args.
Parameters
----------
args : argparse
argparse object to print
"""
return args.outfile + gen_identifier(args) | 2,881 |
def check_vg_tags(game_id):
"""Returns a user's tags."""
if game_id:
user_id = session.get('user_id')
user_query = VgTag.query.join(Tag).filter(Tag.user_id == user_id) # Only display user's tags for a specific game.
vg_tags = user_query.filter(VgTag.game_id == game_id).all()
return vg_tags
else:
return None | 2,882 |
def mutation_swap(self: MutateMethodCall, chrom: np.array):
"""
swap gene with random n position gene according to prob_mutate
:param self:
:param chrom: 0/1 type chromosome
"""
for i in range(len(chrom)):
if random.random() < self.prob_mutate:
n = np.random.randint(0, len(chrom), 1)
chrom[i], chrom[n] = chrom[n], chrom[i] | 2,883 |
def add_constituency_result_line(line, valid_codes=None, session=None):
"""Add in a result from a constituency. Any previous result is removed. If
there is an error, ValueError is raised with an informative message.
Session is the database session to use. If None, the global db.session is
used.
If valid_codes is non-None, it is a set containing the party codes which are
allowed in this database. If None, this set is queried from the database.
The session is not commit()-ed.
"""
session = session if session is not None else db.session
valid_codes = (
valid_codes if valid_codes is not None else
_query_valid_party_codes(session)
)
cn, results = parse_result_line(line)
# Check constituency name is non-empty
if cn == '':
raise ValueError('Constituency name cannot be empty')
# Get the constituency or create one if necessary
constituency = Constituency.query.filter(Constituency.name==cn).first()
if constituency is None:
constituency = Constituency(name=cn)
session.add(constituency)
# Delete any prior voting records for this constituency
Voting.query.filter(Voting.constituency_id==constituency.id).delete()
# Is there one result per party?
if len(results) != len(set(p for _, p in results)):
raise ValueError('Multiple results for one party')
# Now add a voting record for each result
for count, party_id in results:
if party_id not in valid_codes:
raise ValueError('Party code "{}" is unknown'.format(party_id))
session.add(Voting(
count=count, party_id=party_id, constituency=constituency)) | 2,884 |
def load_fits(path):
"""
load the fits file
Parameters
----------
path: string, location of the fits file
Output
------
data: numpy array, of stokes images in (row, col, wv, pol)
header: hdul header object, header of the fits file
"""
hdul_tmp = fits.open(f'{path}')
data = np.asarray(hdul_tmp[0].data, dtype = np.float32)
header = hdul_tmp[0].header
return data, header | 2,885 |
def get_image_path(cfg,
metadata,
prefix='diag',
suffix='image',
metadata_id_list='default',):
"""
Produce a path to the final location of the image.
The cfg is the opened global config,
metadata is the metadata dictionairy (for the individual dataset file)
"""
#####
if metadata_id_list == 'default':
metadata_id_list = ['project', 'dataset', 'mip', 'exp', 'ensemble',
'field', 'short_name', 'preprocessor',
'diagnostic', 'start_year', 'end_year', ]
path = folder(cfg['plot_dir'])
if prefix:
path += prefix + '_'
# Check that the keys are in the dict.
intersection = [va for va in metadata_id_list if va in metadata.keys()]
path += '_'.join([str(metadata[b]) for b in intersection])
if suffix:
path += '_' + suffix
image_extention = get_image_format(cfg)
if path.find(image_extention) == -1:
path += image_extention
logger.info("Image path will be: %s", path)
return path | 2,886 |
def test_init_airtable(air):
"""Test that Airtable Interface was imported successfully.
"""
assert air | 2,887 |
def ProjectNameToBinding(project_name, tag_value, location=None):
"""Returns the binding name given a project name and tag value.
Requires binding list permission.
Args:
project_name: project name provided, fully qualified resource name
tag_value: tag value to match the binding name to
location: region or zone
Returns:
binding_name
Raises:
InvalidInputError: project not found
"""
service = ServiceFns['tagBindings']()
with endpoints.CrmEndpointOverrides(location):
req = ListResourceFns['tagBindings'](parent=project_name)
response = service.List(req)
for bn in response.tagBindings:
if bn.tagValue == tag_value:
return bn.name
raise InvalidInputError(
'Binding not found for parent [{}], tagValue [{}]'.format(
project_name, tag_value)) | 2,888 |
def degrees_of_freedom(s1, s2, n1, n2):
"""
Compute the number of degrees of freedom using the Satterhwaite Formula
@param s1 The unbiased sample variance of the first sample
@param s2 The unbiased sample variance of the second sample
@param n1 Thu number of observations in the first sample
@param n2 The number of observations in the second sample
"""
numerator = (s1**2/n1 + s2**2/n2)**2
denominator = ((s1**2/n1)**2)/(n1-1) + ((s2**2/n2)**2)/(n2-1)
degrees_of_freedom = numerator/denominator
return degrees_of_freedom | 2,889 |
async def test_entry_setup_unload(hass, api_factory, gateway_id):
"""Test config entry setup and unload."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_IMPORT_GROUPS: True,
tradfri.CONF_GATEWAY_ID: gateway_id,
},
)
entry.add_to_hass(hass)
with patch.object(
hass.config_entries, "async_forward_entry_setup", return_value=True
) as setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert setup.call_count == len(tradfri.PLATFORMS)
dev_reg = dr.async_get(hass)
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
assert dev_entries
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert unload.call_count == len(tradfri.PLATFORMS)
assert api_factory.shutdown.call_count == 1 | 2,890 |
def get_requires_file(dist):
"""Get the path to the egg-info requires.txt file for a given dist."""
return os.path.join(
os.path.join(dist.location, dist.project_name + ".egg-info"),
"requires.txt",
) | 2,891 |
def get_range_to_list(range_str):
"""
Takes a range string (e.g. 123-125) and return the list
"""
start = int(range_str.split('-')[0])
end = int(range_str.split('-')[1])
if start > end:
print("Your range string is wrong, the start is larger than the end!", range_str)
return range(start, end+1) | 2,892 |
def get_saml_assertion(server, session, access_token, id_token=None):
"""
Exchange access token to saml token to connect to VC
Sample can be found at
https://github.com/vmware/vsphere-automation-sdk-python/blob/master/samples/vsphere/oauth/exchange_access_id_token_for_saml.py
"""
stub_config = StubConfigurationFactory.new_std_configuration(
get_requests_connector(
session=session,
url=HTTP_ENDPOINT.format(server)
)
)
oauth_security_context = create_oauth_security_context(access_token)
stub_config.connector.set_security_context(oauth_security_context)
token_exchange = TokenExchange(stub_config)
exchange_spec = token_exchange.ExchangeSpec(
grant_type=token_exchange.TOKEN_EXCHANGE_GRANT,
subject_token_type=token_exchange.ACCESS_TOKEN_TYPE,
actor_token_type=token_exchange.ID_TOKEN_TYPE,
requested_token_type=token_exchange.SAML2_TOKEN_TYPE,
actor_token=id_token, subject_token=access_token)
response = token_exchange.exchange(exchange_spec)
saml_token = response.access_token
# convert saml token to saml assertion
samlAssertion = etree.tostring(
etree.XML(base64.decodebytes(
bytes(saml_token, 'utf-8')
))
).decode('utf-8')
return samlAssertion | 2,893 |
def insert_cluster_metadata(clconn, name, desc, cli, verbose=False):
"""
Insert the cluster metadata information in the SQL table and return its rowid.
This is the information that describes how the clusters were made.
:param clconn: the database connection
:param name: the name of the clustering approach
:param desc: a human readable description of the clustering
:param cli: the command line command used for the clustering
:param verbose: more output
:return: the clusterdefinition_rowid for this metadata
"""
if verbose:
sys.stderr.write(f"{color.GREEN}Adding the metadata{color.ENDC}\n")
clcur = clconn.cursor()
clcur.execute("INSERT INTO clusterdefinition(name, description, command) values (?,?,?)",
[name, desc, cli])
cd_rowid = clcur.lastrowid
clconn.commit()
return cd_rowid | 2,894 |
def display_value(id, value):
"""
Display a value in a selector-like style.
Parameters
----------
id: int
Id of the value to be displayed
"""
return html.div(
{
"class": "py-3 pl-3 w-full border-[1px] sm:w-[48%] md:w-[121px] bg-nav rounded-[3px] md:mr-2 my-4 before:content-[''] before:border-[6px] before:border-[transparent] before:top-1/2 before:right-5 before:-translate-y-0.5 before:absolute xl:w-[14%]",
},
html.h3(
{"value": id},
value,
),
) | 2,895 |
def helmholtz_adjoint_double_layer_regular(
test_point, trial_points, test_normal, trial_normals, kernel_parameters
):
"""Helmholtz adjoint double layer for regular kernels."""
wavenumber_real = kernel_parameters[0]
wavenumber_imag = kernel_parameters[1]
npoints = trial_points.shape[1]
dtype = trial_points.dtype
factor_real = _np.empty(npoints, dtype=dtype)
factor_imag = _np.empty(npoints, dtype=dtype)
output_real = _np.empty(npoints, dtype=dtype)
output_imag = _np.empty(npoints, dtype=dtype)
diff = _np.empty((3, npoints), dtype=dtype)
dist = _np.zeros(npoints, dtype=dtype)
laplace_grad = _np.zeros(npoints, dtype=dtype)
m_inv_4pi = dtype.type(M_INV_4PI)
for i in range(3):
for j in range(npoints):
diff[i, j] = test_point[i] - trial_points[i, j]
dist[j] += diff[i, j] * diff[i, j]
for j in range(npoints):
dist[j] = _np.sqrt(dist[j])
for i in range(3):
for j in range(npoints):
laplace_grad[j] += diff[i, j] * test_normal[i]
for j in range(npoints):
laplace_grad[j] *= m_inv_4pi / (dist[j] * dist[j] * dist[j])
factor_real[j] = _np.cos(wavenumber_real * dist[j]) * laplace_grad[j]
factor_imag[j] = _np.sin(wavenumber_real * dist[j]) * laplace_grad[j]
if wavenumber_imag != 0:
for j in range(npoints):
factor_real[j] *= _np.exp(-wavenumber_imag * dist[j])
factor_imag[j] *= _np.exp(-wavenumber_imag * dist[j])
for j in range(npoints):
output_real[j] = (-1 - wavenumber_imag * dist[j]) * factor_real[
j
] - wavenumber_real * dist[j] * factor_imag[j]
output_imag[j] = wavenumber_real * dist[j] * factor_real[j] + factor_imag[j] * (
-1 - wavenumber_imag * dist[j]
)
return output_real + 1j * output_imag | 2,896 |
def compute_vad(wav_rspecifier, feats_wspecifier, opts):
"""This function computes the vad based on ltsv features.
The output is written in the file denoted by feats_wspecifier,
and if the test_plot flaf is set, it produces a plot.
Args:
wav_rspecifier: An ark or scp file as in Kaldi, that contains the input audio
feats_wspecifier: An ark or scp file as in Kaldi, that contains the input audio
opts: Options. See main function for list of options
Returns:
The number of successful trials.
"""
num_utts, num_success = 0, 0
with SequentialWaveReader(wav_rspecifier) as reader, \
VectorWriter(feats_wspecifier) as writer:
for num_utts, (key, wave) in enumerate(reader, 1):
if wave.duration < opts.min_duration:
print("File: {} is too short ({} sec): producing no output.".format(key, wave.duration), file=sys.stderr)
continue
num_chan = wave.data().num_rows
if opts.channel >= num_chan:
print("File with id {} has {} channels but you specified "
"channel {}, producing no output.", file=sys.stderr)
continue
channel = 0 if opts.channel == -1 else opts.channel
fr_length_samples = int(opts.frame_window*wave.samp_freq*(10**(-3)))
fr_shift_samples = int(opts.frame_shift*wave.samp_freq*(10**(-3)))
try:
wav_data = np.squeeze(wave.data()[channel].numpy())
sample_freqs, segment_times, spec = signal.spectrogram(wav_data, fs=wave.samp_freq,
nperseg=fr_length_samples, nfft=opts.nfft,
noverlap=fr_length_samples-fr_shift_samples,
scaling='spectrum',mode = 'psd')
specT = np.transpose(spec)
spect_n = ARMA.ApplyARMA(specT, opts.arma_order)
ltsv_f = LTSV.ApplyLTSV(spect_n, opts.ltsv_ctx_window, opts.threshold,
opts.slope, opts.sigmoid_scale)
vad_feat = DCTF.ApplyDCT(opts.dct_num_cep, opts.dct_ctx_window, ltsv_f)
feats = Vector(vad_feat)
if opts.test_plot:
show_plot(segment_times, sample_freqs, spec, wave, wav_data, vad_feat)
except:
print("Failed to compute features for utterance", key,
file=sys.stderr)
continue
writer[key] = feats
num_success += 1
if num_utts % 10 == 0:
print("Processed {} utterances".format(num_utts),
file=sys.stderr)
print("Done {} out of {} utterances".format(num_success, num_utts),
file=sys.stderr)
return num_success != 0 | 2,897 |
def crt(s):
"""
Solve the system given by x == v (mod k),
where (k, v) goes over all key-value pairs of the dictionary s.
"""
x, n = 0, 1
for q, r in s.items():
x += n * ((r-x) * inverse(n, q) % q)
n *= q
return x | 2,898 |
def create_new_tf_session(**kwargs):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(**kwargs)
sess.__enter__()
assert tf.get_default_session()
return sess | 2,899 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.