content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _resolve_link(path):
"""Internal helper function. Takes a path and follows symlinks
until we either arrive at something that isn't a symlink, or
encounter a path we've seen before (meaning that there's a loop).
"""
paths_seen = []
while islink(path):
if path in paths_seen:
# Already seen this path, so we must have a symlink loop
return None
paths_seen.append(path)
# Resolve where the link points to
resolved = os.readlink(path)
if not isabs(resolved):
dir = dirname(path)
path = normpath(join(dir, resolved))
else:
path = normpath(resolved)
return path | 3,200 |
def test_handle_diagonalization_errors(generate_workchain_ph):
"""Test `PhBaseWorkChain.handle_diagonalization_errors`."""
process = generate_workchain_ph(exit_code=PhCalculation.exit_codes.ERROR_COMPUTING_CHOLESKY)
process.setup()
process.validate_parameters()
process.prepare_process()
process.ctx.inputs.parameters['INPUTPH']['diagonalization'] = 'david'
result = process.handle_diagonalization_errors(process.ctx.children[-1])
assert isinstance(result, ProcessHandlerReport)
assert process.ctx.inputs.parameters['INPUTPH']['diagonalization'] == 'cg'
assert result.do_break
result = process.handle_diagonalization_errors(process.ctx.children[-1])
assert result.do_break
result = process.inspect_process()
assert result == PhBaseWorkChain.exit_codes.ERROR_UNRECOVERABLE_FAILURE | 3,201 |
def create(platformDetails):
"""
This function creates a new platform in the platform list
based on the passed in platform data
:param platform: platform to create in platform structure
:return: 201 on success, 406 on platform exists
"""
# Remove id as it's created automatically
if "id" in platformDetails:
del platformDetails["id"]
# Does the platform exist already?
existing_platform = (
db.session.query(Platform)
.filter(Platform.value == platformDetails["value"])
.one_or_none()
)
if existing_platform is None:
schema = PlatformSchema()
new_platform = schema.load(platformDetails, session=db.session)
db.session.add(new_platform)
db.session.commit()
# Serialize and return the newly created deployment
# in the response
data = schema.dump(new_platform)
return data, 201
# Otherwise, it already exists, that's an error
else:
abort(406, "Platform already exists") | 3,202 |
def add_command(name, func):
""" For controls that execute commands, the command must be added to the _COMMAND list so that
it can be added back to the widget during cloning """
logger.debug("Adding to commands: %s - %s", name, func)
_RECREATE_OBJECTS["commands"][str(name)] = func | 3,203 |
def _execute_cell(cell, shell, iopub, timeout=300):
"""
Execute an IPython Notebook Cell and return the cell output.
Parameters
----------
cell : IPython.nbformat.current.NotebookNode
The IPython Notebook cell to execute.
shell : IPython.kernel.blocking.channels.BlockingShellChannel
The shell channel which the cell is submitted to for execution.
iopub : IPython.kernel.blocking.channels.BlockingIOPubChannel
The iopub channel used to retrieve the result of the execution.
timeout : int
The number of seconds to wait for the execution to finish before giving
up.
Returns
-------
cell_outputs : list
The list of NotebookNodes holding the result of the execution.
"""
# Execute input
shell.execute(cell.input)
exe_result = shell.get_shell_msg(timeout=timeout)
if exe_result['content']['status'] == 'error':
raise RuntimeError('Failed to execute cell due to error: {!r}'.format(
str(exe_result['content']['evalue'])))
cell_outputs = list()
# Poll for iopub messages until no more messages are available
while True:
try:
msg = iopub.get_iopub_msg(timeout=0.5)
except Empty:
break
msg_type = msg['msg_type']
if msg_type in ('status', 'pyin', 'execute_input', 'execute_result'):
continue
content = msg['content']
node = NotebookNode(output_type=msg_type)
if msg_type == 'stream':
node.stream = content['name']
if 'text' in content:
# v4 notebook format
node.text = content['text']
else:
# v3 notebook format
node.text = content['data']
bug_text = 'Using Anaconda Cloud api site https://api.anaconda.org'
if bug_text in node.text:
# Ignore conda (spam) messages/warnings
continue
elif msg_type in ('display_data', 'pyout'):
node['metadata'] = content['metadata']
for mime, data in content['data'].items():
attr = mime.split('/')[-1].lower()
attr = attr.replace('+xml', '').replace('plain', 'text')
setattr(node, attr, data)
if msg_type == 'pyout':
node.prompt_number = content['execution_count']
elif msg_type == 'pyerr':
node.ename = content['ename']
node.evalue = content['evalue']
node.traceback = content['traceback']
else:
raise RuntimeError('Unhandled iopub message of type: {}'.format(
msg_type))
cell_outputs.append(node)
return cell_outputs | 3,204 |
def VD_A_DF(data, val_col: str = None, group_col: str = None, sort=True):
"""
:param data: pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
:param val_col: str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains values.
:param group_col: str, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains group names.
:param sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
:return: stats : pandas DataFrame of effect sizes
Stats summary ::
'A' : Name of first measurement
'B' : Name of second measurement
'estimate' : effect sizes
'magnitude' : magnitude
"""
x = data.copy()
if sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x.sort_values(by=[group_col, val_col], ascending=True, inplace=True)
groups = x[group_col].unique()
# Pairwise combinations
g1, g2 = np.array(list(it.combinations(np.arange(groups.size), 2))).T
# Compute effect size for each combination
ef = np.array([VD_A(list(x[val_col][x[group_col] == groups[i]].values),
list(x[val_col][x[group_col] == groups[j]].values)) for i, j in zip(g1, g2)])
return pd.DataFrame({
'A': np.unique(data[group_col])[g1],
'B': np.unique(data[group_col])[g2],
'estimate': ef[:, 0],
'magnitude': ef[:, 1]
}) | 3,205 |
def making_maintf():
"""creating and writing main.tf-file in examples"""
variables_list = get_namelist("variable \"", "\" {", "variables.tf")
text_addition(
"examples/main.tf",
"\nmodule \"" +
"{{ cookiecutter.example_module_name}}" +
"\" {\n"
)
bigest_len = 0
for __var__ in variables_list:
if bigest_len < len(__var__):
bigest_len = len(__var__)
for __var__ in variables_list:
spases_str = spaces_gen(bigest_len - len(__var__))
variable_string = (" " +
__var__ +
spases_str +
" = \"${var." +
__var__ +
"}\"\n")
text_addition("examples/main.tf", variable_string)
text_addition(
"examples/main.tf",
" source" +
spaces_gen(bigest_len - len("source")) +
" = \"../../\"\n}\n"
) | 3,206 |
def getR2(y, y_fitted, chi=None):
"""
calculates the coefficient of determination R^2 for `y_fitted` as prediction for `y` over a region marked by chi>0 defined by
R^2=1 - S_res/S_tot
with S_res=int(chi*(y-y_fitted*1)**2, S_tot=int(chi*(y-m(y)*1)**2), m(y)=int(chi*y)/int(chi)
If R^2=1 then `y_fitted` is predicts `y` exactly. If R^2 then `y_fitted` does not make a better prediction than the mean.
:param y: target distribution
:type y: `esys.escript.Scalar`
:param y_fitted: fitted distribution
:type y_fitted: `esys.escript.Scalar`
:param chi: marker/weighting for region of interest
:type chi: `esys.escript.Scalar` or None
:rtype: `float`
"""
if chi is None:
chi=Scalar(1., Function(y_fitted.getFunctionSpace().getDomain()))
ybar=integrate(chi*y)/integrate(chi)
S_res=integrate(chi*(y-y_fitted)**2)
S_tot=integrate(chi*(y-ybar)**2)
if S_tot > 0:
R2=1-S_res/S_tot
else:
if S_res > 0:
R2=0.
else:
R2=1.
return R2 | 3,207 |
def handle_message(event_data):
"""
Here we'll build a 'message' event handler using the Slack Events Adapter.
"""
# Grab the message from the event payload
message = event_data["event"]
print('TESTING >>>> message = ', message)
# if the user says hello
if "hello" in message.get('text'):
# have our bot respond to the message
mybot.say_hello(message)
else:
# otherwise help us find out what went wrong
print("This isn't the message we expected: \n%r\n" % message) | 3,208 |
def basis(d, point_distribution='uniform', symbolic=True):
"""
Return all local basis function phi as functions of the
local point X in a 1D element with d+1 nodes.
If symbolic=True, return symbolic expressions, else
return Python functions of X.
point_distribution can be 'uniform' or 'Chebyshev'.
"""
X = sym.symbols('X')
if d == 0:
phi_sym = [1]
else:
if point_distribution == 'uniform':
if symbolic:
h = sym.Rational(1, d) # node spacing
nodes = [2*i*h - 1 for i in range(d+1)]
else:
nodes = np.linspace(-1, 1, d+1)
elif point_distribution == 'Chebyshev':
# Just numeric nodes
nodes = Chebyshev_nodes(-1, 1, d)
phi_sym = [Lagrange_polynomial(X, r, nodes)
for r in range(d+1)]
# Transform to Python functions
phi_num = [sym.lambdify([X], phi_sym[r], modules='numpy')
for r in range(d+1)]
return phi_sym if symbolic else phi_num | 3,209 |
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch | 3,210 |
def test_tenant_id_validation():
"""The credential should raise ValueError when given an invalid tenant_id"""
valid_ids = {"c878a2ab-8ef4-413b-83a0-199afb84d7fb", "contoso.onmicrosoft.com", "organizations", "common"}
for tenant in valid_ids:
UsernamePasswordCredential("client-id", "username", "password", tenant_id=tenant)
invalid_ids = {"my tenant", "my_tenant", "/", "\\", '"my-tenant"', "'my-tenant'"}
for tenant in invalid_ids:
with pytest.raises(ValueError):
UsernamePasswordCredential("client-id", "username", "password", tenant_id=tenant) | 3,211 |
def mergeSort(li):
"""Sorts a list by splitting it to smaller and smaller pieces (until they
only have one or less elements) and then merges it back using the function
``merge()``.
>>> mergeSort([1, 2, 3, 4, 5])
[1, 2, 3, 4, 5]
>>> mergeSort([5, 4, 3, 2, 1])
[1, 2, 3, 4, 5]
>>> mergeSort([3, 2, 6, 1, 4, 2, 3, 1, 1, 5, 6, -2, 2.3])
[-2, 1, 1, 1, 2, 2, 2.3, 3, 3, 4, 5, 6, 6]
"""
n = len(li)
if n < 2:
return li
return merge(mergeSort(li[:n//2]), mergeSort(li[n//2:])) | 3,212 |
def mcplayout(pos, amaf_map, disp=False):
""" Start a Monte Carlo playout from a given position,
return score for to-play player at the starting position;
amaf_map is board-sized scratchpad recording who played at a given
position first """
if disp: print('** SIMULATION **', file=sys.stderr)
start_n = pos.n
passes = 0
while passes < 2 and pos.n < MAX_GAME_LEN:
if disp: print_pos(pos)
pos2 = None
# We simply try the moves our heuristics generate, in a particular
# order, but not with 100% probability; this is on the border between
# "rule-based playouts" and "probability distribution playouts".
for c, kind in gen_playout_moves(pos, pos.last_moves_neighbors(), conf['PROB_HEURISTIC']):
if disp and kind != 'random':
print('move suggestion', str_coord(c), kind, file=sys.stderr)
pos2 = pos.move(c)
if pos2 is None:
continue
# check if the suggested move did not turn out to be a self-atari
if random.random() <= (conf['PROB_RSAREJECT'] if kind == 'random' else conf['PROB_SSAREJECT']):
in_atari, ds = fix_atari(pos2, c, singlept_ok=True, twolib_edgeonly=True)
if ds:
if disp: print('rejecting self-atari move', str_coord(c), file=sys.stderr)
pos2 = None
continue
if amaf_map[c] == 0: # Mark the coordinate with 1 for black
amaf_map[c] = 1 if pos.n % 2 == 0 else -1
break
if pos2 is None: # no valid moves, pass
pos = pos.pass_move()
passes += 1
continue
passes = 0
pos = pos2
owner_map = W*W*[0]
score = pos.score(owner_map)
if disp: print('** SCORE B%+.1f **' % (score if pos.n % 2 == 0 else -score), file=sys.stderr)
if start_n % 2 != pos.n % 2:
score = -score
return score, amaf_map, owner_map | 3,213 |
def test_colour_ranges(fake_readme, monkeypatch):
"""
Whatever number we provide as coverage should produce the appropriate colour
"""
readme_file = "README"
def fake_readme_location(*args, **kwargs):
return os.path.join(TESTS_DIR, readme_file)
monkeypatch.setattr(__main__, "readme_location", fake_readme_location)
for total, colour in (
("97", "brightgreen"),
("93", "green"),
("80", "yellowgreen"),
("65", "yellow"),
("45", "orange"),
("15", "red"),
("n/a", "lightgrey"),
):
__main__.get_total = lambda: total
__main__.main([])
assert __main__.get_colour(total) == colour | 3,214 |
def roundedCorner(pc, p1, p2, r):
"""
Based on Stackoverflow C# rounded corner post
https://stackoverflow.com/questions/24771828/algorithm-for-creating-rounded-corners-in-a-polygon
"""
d1 = pc - p1
d2 = pc - p2
# Angle between vector 1 and vector 2 divided by 2
#angle = (atan2(d1.y, d1.x) - atan2(d2.y, d2.x)) / 2.
angle = PVector.angleBetween(d1, d2) / 2.
# The length of segment between angular point and the
# points of intersection with the circle of a given radius
tng = abs(tan(angle))
segment = float(r) / tng if tng != 0 else float(r)
# Check the segment
length1 = d1.mag()
length2 = d2.mag()
min_len = min(length1, length2)
r_max = r
if segment > min_len:
segment = min_len
r_max = min_len * tng
# Points of intersection are calculated by the proportion between
# the coordinates of the vector, length of vector and the length of the
# segment.
p1Cross = GetProportionPoint(pc, segment, length1, d1.x, d1.y)
p2Cross = GetProportionPoint(pc, segment, length2, d2.x, d2.y)
# Calculation of the coordinates of the circle
# center by the addition of angular vectors.
dx = pc.x * 2 - p1Cross.x - p2Cross.x
dy = pc.y * 2 - p1Cross.y - p2Cross.y
L = sqrt(dx * dx + dy * dy)
d = sqrt(segment * segment + r_max * r_max)
circlePoint = GetProportionPoint(pc, d, L, dx, dy)
# StartAngle and EndAngle of arc
startAngle = atan2(p1Cross.y - circlePoint.y, p1Cross.x - circlePoint.x)
endAngle = atan2(p2Cross.y - circlePoint.y, p2Cross.x - circlePoint.x)
# Sweep angle
sweepAngle = endAngle - startAngle
# Some additional checks
if sweepAngle < 0:
startAngle, endAngle = endAngle, startAngle
sweepAngle = -sweepAngle
if sweepAngle > PI:
startAngle, endAngle = endAngle, startAngle
sweepAngle = TWO_PI - sweepAngle
# Draw result using graphics
noFill()
line(p1.x, p1.y, p1Cross.x, p1Cross.y)
line(p2.x, p2.y, p2Cross.x, p2Cross.y)
arc(circlePoint.x, circlePoint.y,
2 * r_max, 2 * r_max,
startAngle, startAngle + sweepAngle)
fill(0, 0, 100)
text(str(int(r)) + " " + str(int(r_max)),
circlePoint.x, circlePoint.y) | 3,215 |
async def test_connect_sync_success(v3_server):
"""Test triggering a synchronous handler upon connection to the websocket."""
async with v3_server:
async with aiohttp.ClientSession() as session:
simplisafe = await API.login_via_credentials(
TEST_EMAIL, TEST_PASSWORD, client_id=TEST_CLIENT_ID, session=session
)
simplisafe.websocket._sio.eio._trigger_event = async_mock()
simplisafe.websocket._sio.eio.connect = async_mock()
on_connect = AsyncMock()
simplisafe.websocket.on_connect(on_connect)
connect_params = {
"ns": f"/v1/user/{TEST_USER_ID}",
"accessToken": TEST_ACCESS_TOKEN,
}
await simplisafe.websocket.async_connect()
simplisafe.websocket._sio.eio.connect.mock.assert_called_once_with(
f"wss://api.simplisafe.com/socket.io?{urlencode(connect_params)}",
engineio_path="socket.io",
headers={},
transports=["websocket"],
)
await simplisafe.websocket._sio._trigger_event("connect", "/")
on_connect.assert_called_once() | 3,216 |
def TestSlipDualReverse(port,N):
""" Drive motor back and forth across marker source for N interations. Assumes marker is primed BEHIND the drive roller"""
for its in range(N):
print "backward iteration " + str(its)
for i in range(10):
IndexS(port,1,-133)
IndexS(port,2,400)
print "forward iteration " + str(its)
for i in range(10):
IndexS(port,1,133)
IndexS(port,2,-400) | 3,217 |
def remove_partitions(
cube, store, conditions=None, ktk_cube_dataset_ids=None, metadata=None
):
"""
Remove given partition range from cube using a transaction.
Remove the partitions selected by ``conditions``. If no ``conditions`` are given,
remove all partitions. For each considered dataset, only the subset of
``conditions`` that refers to the partition columns of the respective dataset
is used. In particular, a dataset that is not partitioned at all is always considered
selected by ``conditions``.
Parameters
----------
cube: kartothek.core.cube.cube.Cube
Cube spec.
store: Union[simplekv.KeyValueStore, Callable[[], simplekv.KeyValueStore]]
Store.
conditions: Union[None, Condition, Iterable[Condition], Conjunction]
Select the partitions to be removed. Must be a condition only on partition columns.
ktk_cube_dataset_ids: Optional[Union[Iterable[Union[Str, Bytes]], Union[Str, Bytes]]]
Ktk_cube dataset IDs to apply the remove action to, optional. Default to "all".
metadata: Optional[Dict[str, Dict[str, Any]]]
Metadata for every the datasets, optional. Only given keys are updated/replaced. Deletion of
metadata keys is not possible.
Returns
-------
datasets: Dict[str, kartothek.core.dataset.DatasetMetadata]
Datasets, updated.
"""
if callable(store):
store_instance = store()
store_factory = store
else:
store_instance = store
def store_factory():
return store
existing_datasets = discover_datasets(cube, store)
for (
ktk_cube_dataset_id,
(ds, mp, delete_scope),
) in prepare_metapartitions_for_removal_action(
cube=cube,
store=store_instance,
conditions=conditions,
ktk_cube_dataset_ids=ktk_cube_dataset_ids,
existing_datasets=existing_datasets,
).items():
mp = mp.store_dataframes(
store=store_instance,
dataset_uuid=ds.uuid,
df_serializer=KTK_CUBE_DF_SERIALIZER,
)
ds_factory = metadata_factory_from_dataset(
ds, with_schema=True, store=store_factory
)
existing_datasets[ktk_cube_dataset_id] = update_dataset_from_partitions(
mp,
store_factory=store_factory,
dataset_uuid=ds.uuid,
ds_factory=ds_factory,
metadata=prepare_ktk_metadata(cube, ktk_cube_dataset_id, metadata),
metadata_merger=None,
delete_scope=delete_scope,
)
return existing_datasets | 3,218 |
def update_t_new_docker_image_names(main, file):
""" Updates the names of the docker images from lasote to conanio
"""
docker_mappings = {
"lasote/conangcc49": "conanio/gcc49",
"lasote/conangcc5": "conanio/gcc5",
"lasote/conangcc6": "conanio/gcc6",
"lasote/conangcc7": "conanio/gcc7",
"lasote/conangcc8": "conanio/gcc8",
"lasote/conanclang39": "conanio/clang39",
"lasote/conanclang40": "conanio/clang40",
"lasote/conanclang50": "conanio/clang50",
"lasote/conanclang60": "conanio/clang60",
}
found_old_name = False
for old, new in docker_mappings.items():
if main.file_contains(file, old):
main.replace_in_file(file, old, new)
found_old_name = True
if found_old_name:
main.output_result_update(title="Travis: Update Docker image names from lasote/ to conanio/")
return True
return False | 3,219 |
def fit(kern, audio, file_name, max_par, fs):
"""Fit kernel to data """
# time vector for kernel
n = kern.size
xkern = np.linspace(0., (n - 1.) / fs, n).reshape(-1, 1)
# initialize parameters
if0 = gpitch.find_ideal_f0([file_name])[0]
init_f, init_v = gpitch.init_cparam(y=audio, fs=fs, maxh=max_par, ideal_f0=if0, scaled=False)[0:2]
init_l = np.array([0., 1.])
# optimization
p0 = np.hstack((init_l, init_v, init_f)) # initialize params
pstar = optimize_kern(x=xkern, y=kern, p0=p0)
# compute initial and learned kernel
kern_init = approximate_kernel(p0, xkern)
kern_approx = approximate_kernel(pstar, xkern)
# get kernel hyperparameters
npartials = (pstar.size - 2) / 2
lengthscale = pstar[1]
variance = pstar[2: npartials + 2]
frequency = pstar[npartials + 2:]
params = [lengthscale, variance, frequency]
return params, kern_init, kern_approx | 3,220 |
def find_next_open_date(location_pid, date):
"""Finds the next day where this location is open."""
location = current_app_ils.location_record_cls.get_record_by_pid(
location_pid
)
_infinite_loop_guard = date + timedelta(days=365)
while date < _infinite_loop_guard:
if _is_open_on(location, date):
return date
date += _ONE_DAY_INCREMENT
# Termination is normally guaranteed if there is at least one weekday open
raise IlsException(
description="Cannot find any date for which the location %s is open after the given date %s."
"Please check opening/closures dates."
% (location_pid, date.isoformat())
) | 3,221 |
def check_dimension(units_in=None, units_out=None):
"""Check dimensions of inputs and ouputs of function.
Will check that all inputs and outputs have the same dimension
than the passed units/quantities. Dimensions for inputs and
outputs expects a tuple.
Parameters
----------
units_in : quantity_like or tuple of quantity_like
quantity_like means an Quantity object or a
numeric value (that will be treated as dimensionless Quantity).
The inputs dimension will be checked with the units_in.
Defaults to None to skip any check.
units_out : quantity_like or tuple of quantity_like
quantity_like means an Quantity object or a
numeric value (that will be treated as dimensionless Quantity).
The outputs dimension will be checked with the units_out.
Default to None to skip any check.
Returns
-------
func:
decorated function with dimension-checked inputs and outputs.
See Also
--------
Other decorators (TODO)
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a Greek symbol like :math:`\omega` inline.
Examples (written in doctest format)
--------
>>> def add_meter(x): return x + 1*m
>>> add_meter = check_dimension((m), (m))(add_meter)
>>> add_meter(1*m)
2 m
>>> add_meter(1*s)
raise DimensionError
"""
# reading args and making them iterable
if units_in:
units_in = _iterify(units_in)
if units_out:
units_out = _iterify(units_out)
# define the decorator
def decorator(func):
# create a decorated func
@functools.wraps(func)
def decorated_func(*args, **kwargs):
# Checking dimension of inputs
args = _iterify(args)
if units_in:
for arg, unit_in in zip(args, units_in):
# make everything dimensions
dim_check_in = dimensionify(unit_in)
dim_arg = dimensionify(arg)
# and checking dimensions
if not dim_arg == dim_check_in:
raise DimensionError(dim_arg, dim_check_in)
# Compute outputs and iterify it
ress = _iterify(func(*args, **kwargs))
# Checking dimension of outputs
if units_out:
for res, unit_out in zip(ress, units_out):
# make everythin dimensions
dim_check_out = dimensionify(unit_out)
dim_res = dimensionify(res)
# and checking dimensions
if not dim_res == dim_check_out:
raise DimensionError(dim_res, dim_check_out)
# still return funcntion outputs
return tuple(ress) if len(ress) > 1 else ress[0]
return decorated_func
return decorator | 3,222 |
def _rowcorr(a, b):
"""Correlations between corresponding matrix rows"""
cs = np.zeros((a.shape[0]))
for idx in range(a.shape[0]):
cs[idx] = np.corrcoef(a[idx], b[idx])[0, 1]
return cs | 3,223 |
def detected():
"""
唤醒成功
"""
print('唤醒成功')
play('./audio/open.wav')
global interrupted
interrupted = True
detector.terminate() | 3,224 |
def gff_to_dict(f_gff, feat_type, idattr, txattr, attributes, input_type):
"""
It reads only exonic features because not all GFF files contain gene and trascript features. From the exonic
features it extracts gene names, biotypes, start and end positions. If any of these attributes do not exit
then they are set to NA.
"""
annotation = defaultdict(lambda: defaultdict(lambda: 'NA'))
exon_pos = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
tx_info = defaultdict(lambda: defaultdict(str))
with open(f_gff) as gff_handle:
for rec in GFF.parse(gff_handle, limit_info=dict(gff_type=[feat_type]), target_lines=1):
for sub_feature in rec.features:
start = sub_feature.location.start
end = sub_feature.location.end
strand = strandardize(sub_feature.location.strand)
try:
geneid = sub_feature.qualifiers[idattr][0]
except KeyError:
print("No '" + idattr + "' attribute found for the feature at position "
+ rec.id + ":" + str(start) + ":" + str(end) + ". Please check your GTF/GFF file.")
continue
annotation[geneid]['chr'] = rec.id
annotation[geneid]['strand'] = strand
if annotation[geneid]['start'] == 'NA' or start <= int(annotation[geneid]['start']):
annotation[geneid]['start'] = start
if annotation[geneid]['end'] == 'NA' or end >= int(annotation[geneid]['end']):
annotation[geneid]['end'] = end
for attr in attributes:
if attr in annotation[geneid]:
continue
try:
annotation[geneid][attr] = sub_feature.qualifiers[attr][0]
except KeyError:
annotation[geneid][attr] = 'NA'
# extract exon information only in case of dexseq output
if input_type != "dexseq":
continue
try:
txid = sub_feature.qualifiers[txattr][0]
tx_info[txid]['chr'] = rec.id
tx_info[txid]['strand'] = strand
exon_pos[txid][int(start)][int(end)] = 1
except KeyError:
print("No '" + txattr + "' attribute found for the feature at position " + rec.id + ":" + str(
start) + ":" + str(end) + ". Please check your GTF/GFF file.")
pass
bed_entries = []
# create BED lines only for deseq output
if input_type == "dexseq":
for txid in exon_pos.keys():
starts = sorted(exon_pos[txid])
strand = tx_info[txid]['strand']
if strand == '-':
starts = reversed(starts)
for c, start in enumerate(starts, 1):
ends = sorted(exon_pos[txid][start])
if strand == '-':
ends = reversed(ends)
for end in ends:
bed_entries.append('\t'.join([tx_info[txid]['chr'], str(start), str(end),
txid + ':' + str(c), '0', strand]))
return annotation, bed_entries | 3,225 |
def _subtract_the_mean(point_cloud):
"""
Subtract the mean in point cloud and return its zero-mean version.
Args:
point_cloud (numpy.ndarray of size [N,3]): point cloud
Returns:
(numpy.ndarray of size [N,3]): point cloud with zero-mean
"""
point_cloud = point_cloud - np.mean(point_cloud, axis=0)
return point_cloud | 3,226 |
def check_filtering(grating_1d, filtered_grating_1d, normalized_contrast):
"""plot
"""
plt.figure(figsize=(25, 5))
plt.plot(grating_1d)
plt.title('1d grating')
plt.figure(figsize=(25, 5))
plt.plot(filtered_grating_1d)
plt.title('Filtered fundamental')
print("Square-wave contrast: %s" % normalized_contrast) | 3,227 |
def ncnr_load(filelist=None, check_timestamps=True):
"""
Load a list of nexus files from the NCNR data server.
**Inputs**
filelist (fileinfo[]): List of files to open.
check_timestamps (bool): verify that timestamps on file match request
**Returns**
output (refldata[]): All entries of all files in the list.
2016-06-29 Brian Maranville
| 2017-08-21 Brian Maranville Change to refldata, force cache invalidate
| 2018-06-18 Brian Maranville Change to nexusref to ignore areaDetector
| 2018-12-10 Brian Maranville get_plottable routines moved to python data container from js
| 2020-03-03 Paul Kienzle Just load. Don't even compute divergence
"""
# NB: used mainly to set metadata for processing, so keep it minimal
# TODO: make a metadata loader that does not send all data to browser
# NB: Fileinfo is a structure with
# { path: "location/on/server", mtime: timestamp }
from .load import url_load_list
datasets = []
for data in url_load_list(filelist, check_timestamps=check_timestamps):
datasets.append(data)
return datasets | 3,228 |
def load_source_dataframe(method, sourcename, source_dict,
download_FBA_if_missing, fbsconfigpath=None):
"""
Load the source dataframe. Data can be a FlowbyActivity or
FlowBySector parquet stored in flowsa, or a FlowBySector
formatted dataframe from another package.
:param method: dictionary, FBS method
:param sourcename: str, The datasource name
:param source_dict: dictionary, The datasource parameters
:param download_FBA_if_missing: Bool, if True will download FBAs from
Data Commons. Default is False.
:param fbsconfigpath, str, optional path to an FBS method outside flowsa
repo
:return: df of identified parquet
"""
if source_dict['data_format'] == 'FBA':
# if yaml specifies a geoscale to load, use parameter
# to filter dataframe
if 'source_fba_load_scale' in source_dict:
geo_level = source_dict['source_fba_load_scale']
else:
geo_level = None
vLog.info("Retrieving Flow-By-Activity for datasource %s in year %s",
sourcename, str(source_dict['year']))
flows_df = flowsa.getFlowByActivity(
datasource=sourcename,
year=source_dict['year'],
flowclass=source_dict['class'],
geographic_level=geo_level,
download_FBA_if_missing=download_FBA_if_missing)
elif source_dict['data_format'] == 'FBS':
vLog.info("Retrieving flowbysector for datasource %s", sourcename)
flows_df = flowsa.getFlowBySector(sourcename)
elif source_dict['data_format'] == 'FBS_outside_flowsa':
vLog.info("Retrieving flowbysector for datasource %s", sourcename)
fxn = source_dict.get("FBS_datapull_fxn")
if callable(fxn):
flows_df = fxn(source_dict, method, fbsconfigpath)
elif fxn:
raise flowsa.exceptions.FBSMethodConstructionError(
error_type='fxn_call')
else:
raise flowsa.exceptions.FBSMethodConstructionError(
message="Data format not specified in method "
f"file for {sourcename}")
return flows_df | 3,229 |
def unpack_nwchem_basis_block(data):
"""Unserialize a NWChem basis data block and extract components
@param data: a JSON of basis set data, perhaps containing many types
@type data : str
@return: unpacked data
@rtype : dict
"""
unpacked = json.loads(data)
return unpacked | 3,230 |
def starify(name):
"""
Replace any ints in a dotted key with stars. Used when applying defaults and widgets to fields
"""
newname = []
for key in name.split('.'):
if is_int(key):
newname.append('*')
else:
newname.append(key)
name = '.'.join(newname)
return name | 3,231 |
def render(states, actions, instantaneous_reward_log, cumulative_reward_log, critic_distributions, target_critic_distributions, projected_target_distribution, bins, loss_log, episode_number, filename, save_directory, time_log, SPOTNet_sees_target_log):
"""
TOTAL_STATE = [relative_x, relative_y, relative_vx, relative_vy, relative_angle, relative_angular_velocity, chaser_x, chaser_y, chaser_theta, target_x, target_y, target_theta, chaser_vx, chaser_vy, chaser_omega, target_vx, target_vy, target_omega] *# Relative pose expressed in the chaser's body frame; everythign else in Inertial frame #*
"""
# Load in a temporary environment, used to grab the physical parameters
temp_env = Environment()
# Checking if we want the additional reward and value distribution information
extra_information = temp_env.ADDITIONAL_VALUE_INFO
# Unpacking state
chaser_x, chaser_y, chaser_theta = states[:,6], states[:,7], states[:,8]
target_x, target_y, target_theta = states[:,9], states[:,10], states[:,11]
# Extracting physical properties
LENGTH = temp_env.LENGTH
DOCKING_PORT_MOUNT_POSITION = temp_env.DOCKING_PORT_MOUNT_POSITION
DOCKING_PORT_CORNER1_POSITION = temp_env.DOCKING_PORT_CORNER1_POSITION
DOCKING_PORT_CORNER2_POSITION = temp_env.DOCKING_PORT_CORNER2_POSITION
ARM_MOUNT_POSITION = temp_env.ARM_MOUNT_POSITION
SHOULDER_POSITION = temp_env.SHOULDER_POSITION
ELBOW_POSITION = temp_env.ELBOW_POSITION
WRIST_POSITION = temp_env.WRIST_POSITION
END_EFFECTOR_POSITION = temp_env.END_EFFECTOR_POSITION
########################################################
# Calculating spacecraft corner locations through time #
########################################################
# All the points to draw of the chaser (except the front-face)
chaser_points_body = np.array([[ LENGTH/2,-LENGTH/2],
[-LENGTH/2,-LENGTH/2],
[-LENGTH/2, LENGTH/2],
[ LENGTH/2, LENGTH/2],
[ARM_MOUNT_POSITION[0],ARM_MOUNT_POSITION[1]],
[SHOULDER_POSITION[0],SHOULDER_POSITION[1]],
[ELBOW_POSITION[0],ELBOW_POSITION[1]],
[WRIST_POSITION[0],WRIST_POSITION[1]],
[END_EFFECTOR_POSITION[0],END_EFFECTOR_POSITION[1]]]).T
# The front-face points on the target
chaser_front_face_body = np.array([[[ LENGTH/2],[ LENGTH/2]],
[[ LENGTH/2],[-LENGTH/2]]]).squeeze().T
# Rotation matrix (body -> inertial)
C_Ib_chaser = np.moveaxis(np.array([[np.cos(chaser_theta), -np.sin(chaser_theta)],
[np.sin(chaser_theta), np.cos(chaser_theta)]]), source = 2, destination = 0) # [NUM_TIMESTEPS, 2, 2]
# Rotating body frame coordinates to inertial frame
chaser_body_inertial = np.matmul(C_Ib_chaser, chaser_points_body) + np.array([chaser_x, chaser_y]).T.reshape([-1,2,1])
chaser_front_face_inertial = np.matmul(C_Ib_chaser, chaser_front_face_body) + np.array([chaser_x, chaser_y]).T.reshape([-1,2,1])
# All the points to draw of the target (except the front-face)
target_points_body = np.array([[ LENGTH/2,-LENGTH/2],
[-LENGTH/2,-LENGTH/2],
[-LENGTH/2, LENGTH/2],
[ LENGTH/2, LENGTH/2],
[DOCKING_PORT_MOUNT_POSITION[0], LENGTH/2], # artificially adding this to make the docking cone look better
[DOCKING_PORT_MOUNT_POSITION[0],DOCKING_PORT_MOUNT_POSITION[1]],
[DOCKING_PORT_CORNER1_POSITION[0],DOCKING_PORT_CORNER1_POSITION[1]],
[DOCKING_PORT_CORNER2_POSITION[0],DOCKING_PORT_CORNER2_POSITION[1]],
[DOCKING_PORT_MOUNT_POSITION[0],DOCKING_PORT_MOUNT_POSITION[1]]]).T
# The front-face points on the target
target_front_face_body = np.array([[[ LENGTH/2],[ LENGTH/2]],
[[ LENGTH/2],[-LENGTH/2]]]).squeeze().T
# Rotation matrix (body -> inertial)
C_Ib_target = np.moveaxis(np.array([[np.cos(target_theta), -np.sin(target_theta)],
[np.sin(target_theta), np.cos(target_theta)]]), source = 2, destination = 0) # [NUM_TIMESTEPS, 2, 2]
# Rotating body frame coordinates to inertial frame
target_body_inertial = np.matmul(C_Ib_target, target_points_body) + np.array([target_x, target_y]).T.reshape([-1,2,1])
target_front_face_inertial = np.matmul(C_Ib_target, target_front_face_body) + np.array([target_x, target_y]).T.reshape([-1,2,1])
#######################
# Plotting the motion #
#######################
# Generating figure window
figure = plt.figure(constrained_layout = True)
figure.set_size_inches(5, 4, True)
if extra_information:
grid_spec = gridspec.GridSpec(nrows = 2, ncols = 3, figure = figure)
subfig1 = figure.add_subplot(grid_spec[0,0], aspect = 'equal', autoscale_on = False, xlim = (0, 3.5), ylim = (0, 2.4))
#subfig1 = figure.add_subplot(grid_spec[0,0], projection = '3d', aspect = 'equal', autoscale_on = False, xlim3d = (-5, 5), ylim3d = (-5, 5), zlim3d = (0, 10), xlabel = 'X (m)', ylabel = 'Y (m)', zlabel = 'Z (m)')
subfig2 = figure.add_subplot(grid_spec[0,1], xlim = (np.min([np.min(instantaneous_reward_log), 0]) - (np.max(instantaneous_reward_log) - np.min(instantaneous_reward_log))*0.02, np.max([np.max(instantaneous_reward_log), 0]) + (np.max(instantaneous_reward_log) - np.min(instantaneous_reward_log))*0.02), ylim = (-0.5, 0.5))
subfig3 = figure.add_subplot(grid_spec[0,2], xlim = (np.min(loss_log)-0.01, np.max(loss_log)+0.01), ylim = (-0.5, 0.5))
subfig4 = figure.add_subplot(grid_spec[1,0], ylim = (0, 1.02))
subfig5 = figure.add_subplot(grid_spec[1,1], ylim = (0, 1.02))
subfig6 = figure.add_subplot(grid_spec[1,2], ylim = (0, 1.02))
# Setting titles
subfig1.set_xlabel("X (m)", fontdict = {'fontsize': 8})
subfig1.set_ylabel("Y (m)", fontdict = {'fontsize': 8})
subfig2.set_title("Timestep Reward", fontdict = {'fontsize': 8})
subfig3.set_title("Current loss", fontdict = {'fontsize': 8})
subfig4.set_title("Q-dist", fontdict = {'fontsize': 8})
subfig5.set_title("Target Q-dist", fontdict = {'fontsize': 8})
subfig6.set_title("Bellman projection", fontdict = {'fontsize': 8})
# Changing around the axes
subfig1.tick_params(labelsize = 8)
subfig2.tick_params(which = 'both', left = False, labelleft = False, labelsize = 8)
subfig3.tick_params(which = 'both', left = False, labelleft = False, labelsize = 8)
subfig4.tick_params(which = 'both', left = False, labelleft = False, right = True, labelright = False, labelsize = 8)
subfig5.tick_params(which = 'both', left = False, labelleft = False, right = True, labelright = False, labelsize = 8)
subfig6.tick_params(which = 'both', left = False, labelleft = False, right = True, labelright = True, labelsize = 8)
# Adding the grid
subfig4.grid(True)
subfig5.grid(True)
subfig6.grid(True)
# Setting appropriate axes ticks
subfig2.set_xticks([np.min(instantaneous_reward_log), 0, np.max(instantaneous_reward_log)] if np.sign(np.min(instantaneous_reward_log)) != np.sign(np.max(instantaneous_reward_log)) else [np.min(instantaneous_reward_log), np.max(instantaneous_reward_log)])
subfig3.set_xticks([np.min(loss_log), np.max(loss_log)])
subfig4.set_xticks([bins[i*5] for i in range(round(len(bins)/5) + 1)])
subfig4.tick_params(axis = 'x', labelrotation = -90)
subfig4.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
subfig5.set_xticks([bins[i*5] for i in range(round(len(bins)/5) + 1)])
subfig5.tick_params(axis = 'x', labelrotation = -90)
subfig5.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
subfig6.set_xticks([bins[i*5] for i in range(round(len(bins)/5) + 1)])
subfig6.tick_params(axis = 'x', labelrotation = -90)
subfig6.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.])
else:
subfig1 = figure.add_subplot(1, 1, 1, aspect = 'equal', autoscale_on = False, xlim = (0, 3.5), ylim = (0, 2.4), xlabel = 'X Position (m)', ylabel = 'Y Position (m)')
# Defining plotting objects that change each frame
chaser_body, = subfig1.plot([], [], color = 'r', linestyle = '-', linewidth = 2) # Note, the comma is needed
chaser_front_face, = subfig1.plot([], [], color = 'k', linestyle = '-', linewidth = 2) # Note, the comma is needed
target_body, = subfig1.plot([], [], color = 'g', linestyle = '-', linewidth = 2)
target_front_face, = subfig1.plot([], [], color = 'k', linestyle = '-', linewidth = 2)
chaser_body_dot = subfig1.scatter(0., 0., color = 'r', s = 0.1)
if extra_information:
reward_bar = subfig2.barh(y = 0, height = 0.2, width = 0)
loss_bar = subfig3.barh(y = 0, height = 0.2, width = 0)
q_dist_bar = subfig4.bar(x = bins, height = np.zeros(shape = len(bins)), width = bins[1]-bins[0])
target_q_dist_bar = subfig5.bar(x = bins, height = np.zeros(shape = len(bins)), width = bins[1]-bins[0])
projected_q_dist_bar = subfig6.bar(x = bins, height = np.zeros(shape = len(bins)), width = bins[1]-bins[0])
time_text = subfig1.text(x = 0.2, y = 0.91, s = '', fontsize = 8, transform=subfig1.transAxes)
reward_text = subfig1.text(x = 0.0, y = 1.02, s = '', fontsize = 8, transform=subfig1.transAxes)
else:
time_text = subfig1.text(x = 0.1, y = 0.9, s = '', fontsize = 8, transform=subfig1.transAxes)
reward_text = subfig1.text(x = 0.62, y = 0.9, s = '', fontsize = 8, transform=subfig1.transAxes)
episode_text = subfig1.text(x = 0.4, y = 0.96, s = '', fontsize = 8, transform=subfig1.transAxes)
episode_text.set_text('Episode ' + str(episode_number))
# Function called repeatedly to draw each frame
def render_one_frame(frame, *fargs):
temp_env = fargs[0] # Extract environment from passed args
# Draw the chaser body
chaser_body.set_data(chaser_body_inertial[frame,0,:], chaser_body_inertial[frame,1,:])
# Draw the front face of the chaser body in a different colour
chaser_front_face.set_data(chaser_front_face_inertial[frame,0,:], chaser_front_face_inertial[frame,1,:])
# Draw the target body
target_body.set_data(target_body_inertial[frame,0,:], target_body_inertial[frame,1,:])
if SPOTNet_sees_target_log[frame]:
target_body.set_color('y')
else:
target_body.set_color('g')
# Draw the front face of the target body in a different colour
target_front_face.set_data(target_front_face_inertial[frame,0,:], target_front_face_inertial[frame,1,:])
# Drawing a dot in the centre of the chaser
chaser_body_dot.set_offsets(np.hstack((chaser_x[frame],chaser_y[frame])))
# Update the time text
time_text.set_text('Time = %.1f s' %(time_log[frame]))
# Update the reward text
reward_text.set_text('Total reward = %.1f' %cumulative_reward_log[frame])
try:
if extra_information:
# Updating the instantaneous reward bar graph
reward_bar[0].set_width(instantaneous_reward_log[frame])
# And colouring it appropriately
if instantaneous_reward_log[frame] < 0:
reward_bar[0].set_color('r')
else:
reward_bar[0].set_color('g')
# Updating the loss bar graph
loss_bar[0].set_width(loss_log[frame])
# Updating the q-distribution plot
for this_bar, new_value in zip(q_dist_bar, critic_distributions[frame,:]):
this_bar.set_height(new_value)
# Updating the target q-distribution plot
for this_bar, new_value in zip(target_q_dist_bar, target_critic_distributions[frame, :]):
this_bar.set_height(new_value)
# Updating the projected target q-distribution plot
for this_bar, new_value in zip(projected_q_dist_bar, projected_target_distribution[frame, :]):
this_bar.set_height(new_value)
except:
pass
#
# Since blit = True, must return everything that has changed at this frame
return chaser_body_dot, time_text, chaser_body, chaser_front_face, target_body, target_front_face
# Generate the animation!
fargs = [temp_env] # bundling additional arguments
animator = animation.FuncAnimation(figure, render_one_frame, frames = np.linspace(0, len(states)-1, len(states)).astype(int),
blit = False, fargs = fargs)
"""
frames = the int that is passed to render_one_frame. I use it to selectively plot certain data
fargs = additional arguments for render_one_frame
interval = delay between frames in ms
"""
# Save the animation!
if temp_env.SKIP_FAILED_ANIMATIONS:
try:
# Save it to the working directory [have to], then move it to the proper folder
animator.save(filename = filename + '_episode_' + str(episode_number) + '.mp4', fps = 30, dpi = 100)
# Make directory if it doesn't already exist
os.makedirs(os.path.dirname(save_directory + filename + '/videos/'), exist_ok=True)
# Move animation to the proper directory
os.rename(filename + '_episode_' + str(episode_number) + '.mp4', save_directory + filename + '/videos/episode_' + str(episode_number) + '.mp4')
except:
("Skipping animation for episode %i due to an error" %episode_number)
# Try to delete the partially completed video file
try:
os.remove(filename + '_episode_' + str(episode_number) + '.mp4')
except:
pass
else:
# Save it to the working directory [have to], then move it to the proper folder
animator.save(filename = filename + '_episode_' + str(episode_number) + '.mp4', fps = 30, dpi = 100)
# Make directory if it doesn't already exist
os.makedirs(os.path.dirname(save_directory + filename + '/videos/'), exist_ok=True)
# Move animation to the proper directory
os.rename(filename + '_episode_' + str(episode_number) + '.mp4', save_directory + filename + '/videos/episode_' + str(episode_number) + '.mp4')
del temp_env
plt.close(figure) | 3,232 |
def load_boundary_conditions(bound_cond, zone_usage, data_class):
"""load use conditions according to DIN 18599 and SIA2024
loads Use conditions specified in the XML, according to DIN 18599,
SIA2024 in addition some AixLib specific use conditions for central AHU
are defined.
Parameters
----------
bound_cond : BoundaryConditions()
Instance of TEASERs
BuildingObjects.BoundaryConditions.BoundaryConditions
zone_usage : str
code list for zone_usage according to 18599
data_class : DataClass()
DataClass containing the bindings for TypeBuildingElement and
Material (typically this is the data class stored in prj.data,
but the user can individually change that.
"""
conditions_bind = data_class.conditions_bind
for usage in conditions_bind.BoundaryConditions:
if usage.usage == zone_usage:
bound_cond.typical_length = usage.typical_length
bound_cond.typical_width = usage.typical_width
bound_cond.usage = usage.usage
bound_cond.usage_time = usage.UsageOperationTime.usage_time
bound_cond.daily_usage_hours = \
usage.UsageOperationTime.daily_usage_hours
bound_cond.yearly_usage_days = \
usage.UsageOperationTime.yearly_usage_days
bound_cond.yearly_usage_hours_day = \
usage.UsageOperationTime.yearly_usage_hours_day
bound_cond.yearly_usage_hours_night = \
usage.UsageOperationTime.yearly_usage_hours_night
bound_cond.daily_operation_ahu_cooling = \
usage.UsageOperationTime.daily_operation_ahu_cooling
bound_cond.yearly_heating_days = \
usage.UsageOperationTime.yearly_heating_days
bound_cond.yearly_ahu_days = \
usage.UsageOperationTime.yearly_ahu_days
bound_cond.yearly_cooling_days = \
usage.UsageOperationTime.yearly_cooling_days
bound_cond.daily_operation_heating = \
usage.UsageOperationTime.daily_operation_heating
if float(data_class.conditions_bind.version) >= 0.4:
bound_cond.maintained_illuminance = \
usage.Lighting.maintained_illuminance
else:
bound_cond.maintained_illuminance = \
usage.Lighting.maintained_illuminace
bound_cond.usage_level_height = usage.Lighting.usage_level_height
bound_cond.red_factor_visual = usage.Lighting.red_factor_visual
bound_cond.rel_absence = usage.Lighting.rel_absence
bound_cond.room_index = usage.Lighting.room_index
bound_cond.part_load_factor_lighting = \
usage.Lighting.part_load_factor_lighting
bound_cond.ratio_conv_rad_lighting = \
usage.Lighting.ratio_conv_rad_lighting
bound_cond.set_temp_heat = usage.RoomClimate.set_temp_heat
bound_cond.set_temp_cool = usage.RoomClimate.set_temp_cool
bound_cond.temp_set_back = usage.RoomClimate.temp_set_back
bound_cond.min_temp_heat = usage.RoomClimate.min_temp_heat
bound_cond.max_temp_cool = usage.RoomClimate.max_temp_cool
bound_cond.rel_humidity = usage.RoomClimate.rel_humidity
bound_cond.cooling_time = usage.RoomClimate.cooling_time
bound_cond.heating_time = usage.RoomClimate.heating_time
bound_cond.min_air_exchange = usage.RoomClimate.min_air_exchange
bound_cond.rel_absence_ahu = usage.RoomClimate.rel_absence_ahu
bound_cond.part_load_factor_ahu = \
usage.RoomClimate.part_load_factor_ahu
bound_cond.persons = usage.InternalGains.persons
bound_cond.profile_persons = usage.InternalGains.profile_persons
bound_cond.machines = usage.InternalGains.machines
bound_cond.profile_machines = usage.InternalGains.profile_machines
bound_cond.lighting_power = usage.InternalGains.lighting_power
bound_cond.profile_lighting = usage.InternalGains.profile_lighting
bound_cond.min_ahu = usage.AHU.min_ahu
bound_cond.max_ahu = usage.AHU.max_ahu
bound_cond.with_ahu = usage.AHU.with_ahu
bound_cond.use_constant_ach_rate = usage.AHU.use_constant_ach_rate
bound_cond.base_ach = usage.AHU.base_ach
bound_cond.max_user_ach = usage.AHU.max_user_ach
bound_cond.max_overheating_ach = usage.AHU.max_overheating_ach
bound_cond.max_summer_ach = usage.AHU.max_summer_ach
bound_cond.winter_reduction = usage.AHU.winter_reduction | 3,233 |
def hydrogens(atom: Atom) -> int:
"""Total number of hydrogen atoms (int).
"""
return atom.GetTotalNumHs() | 3,234 |
def update_url_catalog(meraki):
"""Update the URL catalog available to the helper."""
query_urls = {"mr_radio": "/devices/{serial}/wireless/radio/settings"}
update_urls = {"mr_radio": "/devices/{serial}/wireless/radio/settings"}
query_all_urls = {"mr_rf_profile": "/networks/{net_id}/wireless/rfProfiles"}
meraki.url_catalog["get_one"].update(query_urls)
meraki.url_catalog["update"] = update_urls
meraki.url_catalog["get_all"].update(query_all_urls) | 3,235 |
def to_field(field_tuple):
"""Create a dataframe_field from a tuple"""
return dataframe_field(*field_tuple) | 3,236 |
def Execute(data):
"""Required Execute function"""
global cooldown_list
return_value = ''
sender_user_id = ""
sender_user_display = ""
if data.IsFromTwitch():
sender_user_id = data.UserName.lower()
sender_user_display = data.UserName
elif data.IsFromYoutube() or data.IsFromDiscord():
sender_user_id = data.User
sender_user_display = data.UserName
# does nothing if the stream isn't live with the "OnlyLive" setting ticked
if MySet.OnlyLive and (Parent.IsLive() is False):
return
# addvoteoption
if Parent.HasPermission(sender_user_id, "Caster", "") and data.GetParam(0).lower() == "!addvoteoption":
# getting game name!
data_input = data.Message
if '"' in data_input:
pattern = '"(.+)"\s*(\d*)'
# respond(data, data_input)
match = re.search(pattern, data_input)
game = match.group(1)
vote_value = match.group(2)
if not vote_value:
vote_value = 0
else:
data_input = data_input.split(" ")
vote_value = data.GetParam(data.GetParamCount()-1)
# decides how to handle the final parameter.
try:
vote_value = int(vote_value)
data_input = data_input[1:-1]
except ValueError as e:
vote_value = 0
data_input = data_input[1:]
data_input = ' '.join(data_input)
game = data_input
add_vote_option(game, vote_value)
vote_data = get_vote_data()
if game in vote_data["Profiles"][get_active_profile()].keys():
respond(data, 'Successfully created the option %s!' % game)
else:
respond(data, "Something went wrong. Let Newt know!")
# deletevoteoption
if Parent.HasPermission(sender_user_id, "Caster", "") and data.GetParam(0).lower() == "!deletevoteoption":
# getting game name
data_input = data.Message
data_input = data_input.split(" ")
data_input = data_input[1:]
data_input = ' '.join(data_input)
game = data_input
vote_data = get_vote_data()
del vote_data["Profiles"][get_active_profile()][game]
update_vote_data(vote_data)
if game in vote_data["Profiles"][get_active_profile()].keys():
respond(data, 'Successfully deleted the option %s!' % game)
else:
respond(data, 'Something went wrong. Let Newt know!')
# setvoteprofile
if Parent.HasPermission(sender_user_id, "Caster", "") and data.GetParam(0).lower() == "!setvoteprofile":
set_active_vote_profile(data.GetParam(1))
return_value += "The campfires shift and blur. A new set of campfires fades into existence."
respond(data, return_value)
# deletevoteprofile
if Parent.HasPermission(sender_user_id, "Caster", "") and data.GetParam(0).lower() == "!deletevoteprofile":
delete_vote_location(data.GetParam(1).lower())
return_value += "The old campfire blurs and disappears in front of you. It is no more."
respond(data, return_value)
# showvoteprofile
if data.GetParam(0).lower() == ("!showvoteprofile" or "!showvoteprofiles" or "!displayvoteprofile"):
vote_data = get_vote_data()
for profile in vote_data["Profiles"].keys():
return_value += profile + ', '
return_value = return_value[:-2]
respond(data, return_value)
# !checkoptions
if data.IsChatMessage() and data.GetParam(0).lower() == MySet.CheckOptionsCommand:
check_options(data)
return
# !whittle
if data.IsChatMessage() and data.GetParam(0).lower() == "!whittle":
return
# vote
if data.IsChatMessage() and data.GetParam(0).lower() == MySet.Command.lower():
if data.GetParamCount() < 2:
return_value += 'Missing the correct number of parameters. Correct usage is !vote <game> <number of %ss>' \
% MySet.PointName
respond(data, return_value)
return
if data.GetParamCount() == 2 and data.GetParam(1).lower() == 'stop':
if sender_user_id not in active_continuous_adds.keys():
return_value = 'There is nothing to stop adding to.'
Parent.SendStreamMessage(return_value)
return
else:
del active_continuous_adds[sender_user_id]
return_value = 'You have been removed from the continuous add list.'
Parent.SendStreamMessage(return_value)
return
# getting game name
data_input = data.Message
data_input = data_input.split(" ")
data_input = data_input[1:-1]
data_input = ' '.join(data_input)
game = data_input
# gets the amounts
data_input = data.Message
data_input = data_input.split()
amount = data_input[len(data_input)-1].lower()
if sender_user_id not in cooldown_list.keys() or \
(amount.lower() == 'stop' or amount.lower() == 'all'):
# security checking for data values
target = security_check(game)
# check if the file exists
if not target:
return_value += 'That %s does not exist yet. Try using woodchips to add it.'%MySet.ResultName
respond(data, return_value)
return
# check if the sender_user_id is 5attempting to do a !vote <name> all
if amount.lower() == 'all':
Parent.Log("Vote all", "Adding all logs.")
new_data = (sender_user_id, target, amount.lower())
# only add anything if the sender_user_id isn't on the cooldown list.
if sender_user_id not in cooldown_list.keys():
add_amount = min(Parent.GetPoints(sender_user_id), MySet.voteMaximum)
if sender_user_id not in active_continuous_adds:
active_continuous_adds[sender_user_id] = new_data
return_value += 'You have been added to the continuous add list and are now adding ' + \
MySet.PointName + 's until you run out. '
else:
# if the sender_user_id isn't in the add list, add it and add the data
if sender_user_id not in active_continuous_adds:
active_continuous_adds[sender_user_id] = new_data
response = 'You have been added to the continuous add list and are now adding ' + \
MySet.PointName + 's until you run out. '
Parent.SendStreamMessage(response)
return
else:
active_continuous_adds[sender_user_id] = new_data
response = 'You are already in the the active list. Type "!vote stop" at any time to stop adding. '
Parent.SendStreamMessage(response)
return
# check if the sender_user_id is attempting to stop adding logs automatically
elif amount == 'stop' and MySet.continuousVoting:
if sender_user_id in active_continuous_adds:
return_value += 'You have been removed from the continuous add list for '+str(target)+' '+str(sender_user_display)
Parent.SendStreamWhisper(sender_user_id, return_value)
del active_continuous_adds[sender_user_id]
return
else:
return_value += 'You aren\'t on the continuous add list.'
respond(data, return_value)
return
# add amount
else:
# verify the amount to add is actually an integer
try:
add_amount = int(amount)
except ValueError as ve:
return_value += 'That isn\'t an integer. Please vote using an integer.'
respond(data, return_value)
return
# check the amount is not higher than the sender_user_id can add.
if add_amount > Parent.GetPoints(sender_user_id):
return_value += 'Your %s pales in comparison to the amount you wish to add, %s. You only have %s. Wait to gather more.'\
%(MySet.ResultName, sender_user_display, str(Parent.GetPoints(sender_user_id)))
respond(data, return_value)
# if they're in the auto add list, remove them from that list
if sender_user_id in active_continuous_adds:
del active_continuous_adds[sender_user_id]
return
# if users can add all the time, then ignore cooldowns and just add it
if not MySet.AntiSnipe and add_amount >= 0:
# get the number of points afterwards
result = add_to_campfire(sender_user_id, target, add_amount)
return_value += "%s added %i to %s's %s. There are now %i %ss in the %s. " % (
sender_user_display, add_amount, target, MySet.ResultName, result, MySet.PointName, MySet.ResultName)
respond(data, return_value)
return
# If the sender_user_id tries to add more than the set maximum, change the amount to add to be that maximum.
if add_amount > int(MySet.voteMaximum):
# get the number of seconds this will take to finish
seconds_to_completion = int(((add_amount-float(MySet.voteMaximum))/float(MySet.voteMaximum))*int(MySet.cooldownTime))
minutes_to_completion = 0
hours_to_completion = 0
if seconds_to_completion > 60:
minutes_to_completion = seconds_to_completion/60
seconds_to_completion = seconds_to_completion%60
if minutes_to_completion > 60:
hours_to_completion = minutes_to_completion/60
minutes_to_completion = minutes_to_completion%60
return_value += 'Currently the maximum number of %ss is %s. Removing this amount from your pool. '\
%(MySet.PointName, MySet.voteMaximum)
add_amount = int(MySet.voteMaximum)
# add users to the continuous add list and create a separate dictionary that keeps track of their cap
if sender_user_id not in active_continuous_adds:
# store the new data as a tuple for another function to deal with.
new_data = (sender_user_id, target, int(amount) - add_amount)
active_continuous_adds[sender_user_id] = new_data
# send users a message to inform them how long logs will add for.
if hours_to_completion != 0:
return_value += ("You have been added to the continuous add list. " +
MySet.PointName.capitalize() + ' will continue to add for ' +
str(hours_to_completion) + ' hours and ' +
str(minutes_to_completion) + ' minutes and ' +
str(seconds_to_completion) +
' seconds. Type "!vote stop" to stop voting on this choice. ')
elif minutes_to_completion != 0:
return_value += ("You have been added to the continuous add list. " +
MySet.PointName.capitalize() + 's will continue to add for ' +
str(minutes_to_completion) + ' minutes and ' +
str(seconds_to_completion) +
' seconds. Type "!vote stop" to stop voting on this choice. ')
else:
return_value += ("You have been added to the continuous add list. " +
MySet.PointName.capitalize() + 's will continue to add for ' +
str(seconds_to_completion) +
' seconds. Type "!vote stop" to stop voting on this choice. ')
# check the amount is above 0.
if add_amount <= 0:
# if they're in the auto add list, remove them from that list
if sender_user_id in active_continuous_adds:
del active_continuous_adds[sender_user_id]
return_value += sender_user_display + ' if you got this message, you ran out of ' + MySet.PointName + \
's and have been removed from auto add.'
else:
return_value = '%s, %i is less than or equal to 0. Please offer at least one %ss.' \
% (sender_user_id, add_amount, MySet.PointName)
respond(data, return_value)
return
# add it to the campfire
result = add_to_campfire(sender_user_id, target, add_amount)
# output the result to the sender_user_id
return_value += "%s added %i to %s's %s. There are now %i %ss in the %s. "\
%(sender_user_display, add_amount, target, MySet.ResultName, result, MySet.PointName, MySet.ResultName)
cooldown = MySet.cooldownTime
# set the cooldown and save it
if MySet.dynamicCooldown:
cooldown = add_amount * (float(MySet.cooldownTime)/float(MySet.voteMaximum))
# add a sender_user_id to a dictionary when they use the command.
cooldown_list[sender_user_id] = time.time(), cooldown
else:
# Output the cooldown message
if sender_user_id in cooldown_list.keys():
seconds_to_wait = get_cooldown(sender_user_id)
return_value += "You have to wait " + str(int(seconds_to_wait)) + ' more seconds before you can add ' + \
MySet.PointName + 's again.'
respond(data, return_value)
return
# sends the final message
if not MySet.SilentAdds:
respond(data, return_value)
# debug section
if data.IsChatMessage() and data.GetParam(0).lower() == '!debug':
if data.GetParam(1) == 'get_cooldown' and MySet.get_cooldown == True:
return_value = get_cooldown(data.GetParam(2))
Parent.SendStreamMessage(str(return_value))
if data.GetParam(1) == 'get_live':
return_value = str(get_stream_is_live())
Parent.SendStreamMessage("Stream is live: " + return_value)
Parent.SendStreamMessage("Stream isLive function: " + str(Parent.IsLive()))
return | 3,237 |
def send_fixtures(
patch_client: HTTPClient,
request: Any,
) -> Generator[Tuple[HTTPClient, str], None, None]:
"""Methods that send data to an API: POST, PATCH, PUT"""
yield patch_client, request.param | 3,238 |
def compute_subjobs_for_build(build_id, job_config, project_type):
"""
Calculate subjobs for a build.
:type build_id: int
:type job_config: JobConfig
:param project_type: the project_type that the build is running in
:type project_type: project_type.project_type.ProjectType
:rtype: list[Subjob]
"""
# Users can override the list of atoms to be run in this build. If the atoms_override
# was specified, we can skip the atomization step and use those overridden atoms instead.
if project_type.atoms_override is not None:
atoms_string_list = project_type.atoms_override
atoms_list = [Atom(atom_string_value) for atom_string_value in atoms_string_list]
else:
atoms_list = job_config.atomizer.atomize_in_project(project_type)
# Group the atoms together using some grouping strategy
timing_file_path = project_type.timing_file_path(job_config.name)
grouped_atoms = _grouped_atoms(
atoms_list,
job_config.max_executors,
timing_file_path,
project_type.project_directory
)
# Generate subjobs for each group of atoms
subjobs = []
for subjob_id, subjob_atoms in enumerate(grouped_atoms):
# The atom id isn't calculated until the atom has been grouped into a subjob.
for atom_id, atom in enumerate(subjob_atoms):
atom.id = atom_id
subjobs.append(Subjob(build_id, subjob_id, project_type, job_config, subjob_atoms))
return subjobs | 3,239 |
def pymodbus_mocked(mocker):
"""Patch pymodbus to deliver results."""
class ResponseContent:
"""Fake a response."""
registers = [0]
class WriteStatus:
"""Mock a successful response."""
@staticmethod
def isError():
# pylint: disable=invalid-name,missing-function-docstring
return False
# Patch connection function
mocker.patch("pymodbus.client.sync.ModbusTcpClient.connect")
mocker.patch(
"pymodbus.client.sync.ModbusTcpClient.read_holding_registers",
return_value=ResponseContent,
)
mocker.patch(
"pymodbus.client.sync.ModbusTcpClient.write_registers", return_value=WriteStatus
) | 3,240 |
def user_can_view_assessments(user, **kwargs):
""" Return True iff given user is allowed to view the assessments """
return not appConfig.settings.LOGIN_REQUIRED or user.is_authenticated | 3,241 |
def get_hashes(root_hash: str) -> List[str]:
""" Return a list with the commits since `root_hash` """
cmd = f"git rev-list --ancestry-path {root_hash}..HEAD"
proc = run(cmd)
return proc.stdout.splitlines() | 3,242 |
def unzip_file(zip_src, dst_dir):
"""
解压zip文件
:param zip_src: zip文件的全路径
:param dst_dir: 要解压到的目的文件夹
:return:
"""
r = zipfile.is_zipfile(zip_src)
if r:
fz = zipfile.ZipFile(zip_src, "r")
for file in fz.namelist():
fz.extract(file, dst_dir)
else:
return "请上传zip类型压缩文件" | 3,243 |
def abort_multipart_upload(resource, bucket_name, object_name, upload_id):
"""Abort in-progress multipart upload"""
mpupload = resource.MultipartUpload(bucket_name, object_name, upload_id)
return mpupload.abort() | 3,244 |
def read_data(input_path):
"""Read pre-stored data
"""
train = pd.read_parquet(os.path.join(input_path, 'train.parquet'))
tournament = pd.read_parquet(os.path.join(input_path, 'tournament.parquet'))
return train, tournament | 3,245 |
def double(n):
"""
Takes a number n and doubles it
"""
return n * 2 | 3,246 |
def group(iterable):
"""
Creates a min/max grouping for the inputted list of numbers. This
will shrink a list into the group sets that are available.
:param iterable | <iterable> | (list, tuple, set, etc.)
:return <generator> [(<int> min, <int> max), ..]
"""
numbers = sorted(list(set(iterable)))
for _, grouper in itertools.groupby(numbers, key=lambda i, c=itertools.count(): i - next(c)):
subset = list(grouper)
yield subset[0], subset[-1] | 3,247 |
def Normalize(tensor, mean, std, inplace=False):
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError(
'Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
if not tensor.is_floating_point():
raise TypeError(
'Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))
if tensor.ndim < 3:
raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
'{}.'.format(tensor.size()))
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError(
'std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
tensor.sub_(mean).div_(std)
return tensor | 3,248 |
def main():
"""parse args and perform the automation"""
parser = common.cli_arg_parser()
args = parser.parse_args()
pause = getattr(args, common.PAUSE_ARG)
with common.maybe_pause_at_the_end(pause):
_update(args) | 3,249 |
def test_get_aggregated_tensor_weights(tensor_db):
"""Test that get_aggregated_tensor calculates correctly."""
collaborator_weight_dict = {'col1': 0.1, 'col2': 0.9}
tensor_key = TensorKey('tensor_name', 'agg', 0, False, ())
agg_nparray = tensor_db.get_aggregated_tensor(
tensor_key, collaborator_weight_dict, WeightedAverage())
control_nparray = np.average(
[np.array([0, 1, 2, 3, 4]), np.array([2, 3, 4, 5, 6])],
weights=np.array(list(collaborator_weight_dict.values())),
axis=0
)
assert np.array_equal(agg_nparray, control_nparray) | 3,250 |
def transcribe(transcriber):
"""
"""
directory = transcriber.transcribe_directory
output_directory = transcriber.transcribe_directory
log_directory = os.path.join(output_directory, 'log')
config = transcriber.transcribe_config
mdl_path = os.path.join(directory, 'final.mdl')
corpus = transcriber.corpus
num_jobs = corpus.num_jobs
if config.use_mp and num_jobs > 1:
jobs = [(directory, x, mdl_path, config,
config.feature_config.construct_feature_proc_string(corpus.split_directory(), directory, x),
output_directory)
for x in range(num_jobs)]
else:
jobs = [(directory, x, mdl_path, config,
config.feature_config.construct_feature_proc_string(corpus.split_directory(), directory, x),
output_directory, corpus.original_num_jobs)
for x in range(num_jobs)]
if config.use_mp and num_jobs > 1:
run_mp(decode_func, jobs, log_directory)
else:
run_non_mp(decode_func, jobs, log_directory)
if transcriber.evaluation_mode:
best_wer = 10000
best = None
for lmwt in range(transcriber.min_language_model_weight, transcriber.max_language_model_weight):
for wip in transcriber.word_insertion_penalties:
out_dir = os.path.join(output_directory, 'eval_{}_{}'.format(lmwt, wip))
log_dir = os.path.join(out_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
jobs = [(directory, x, config, out_dir, lmwt, wip)
for x in range(num_jobs)]
if config.use_mp:
run_mp(score_func, jobs, log_dir)
else:
run_non_mp(score_func, jobs, log_dir)
ser, wer = transcriber.evaluate(out_dir, out_dir)
if wer < best_wer:
best = (lmwt, wip)
transcriber.transcribe_config.language_model_weight = best[0]
transcriber.transcribe_config.word_insertion_penalty = best[1]
else:
jobs = [(directory, x, config, output_directory)
for x in range(num_jobs)]
if config.use_mp:
run_mp(score_func, jobs, log_directory)
else:
run_non_mp(score_func, jobs, log_directory) | 3,251 |
def reproduce_candcollection(cc, data=None, wisdom=None, spec_std=None,
sig_ts=[], kalman_coeffs=[]):
""" Uses candcollection to make new candcollection with required info.
Will look for cluster label and filter only for peak snr, if available.
Location (e.g., integration, dm, dt) of each is used to create
canddata for each candidate, if required.
Can calculates features not used directly for search (as defined in
state.prefs.calcfeatures).
"""
from rfpipe import candidates, util
# set up output cc
st = cc.state
cc1 = candidates.CandCollection(prefs=st.prefs, metadata=st.metadata)
if len(cc):
if 'cluster' in cc.array.dtype.fields:
clusters = cc.array['cluster'].astype(int)
cl_rank, cl_count = candidates.calc_cluster_rank(cc)
calcinds = np.unique(np.where(cl_rank == 1)[0]).tolist()
logger.debug("Reproducing cands at {0} cluster peaks"
.format(len(calcinds)))
else:
logger.debug("No cluster field found. Reproducing all.")
calcinds = list(range(len(cc)))
# if candidates that need new feature calculations
if not all([f in cc.array.dtype.fields for f in st.features]):
logger.info("Generating canddata for {0} candidates"
.format(len(calcinds)))
candlocs = cc.locs
snrs = cc.snrtot
normprob = candidates.normprob(snrs, st.ntrials)
snrmax = snrs.max()
logger.info('Zscore/SNR for strongest candidate: {0}/{1}'
.format(normprob[np.where(snrs == snrmax)[0]][0], snrmax))
if ('snrk' in st.features and
'snrk' not in cc.array.dtype.fields and
(spec_std is None or not len(sig_ts) or not len(kalman_coeffs))):
# TODO: use same kalman calc for search as reproduce?
spec_std, sig_ts, kalman_coeffs = util.kalman_prep(data)
# reproduce canddata for each
for i in calcinds:
# TODO: check on best way to find max SNR with kalman, etc
snr = snrs[i]
candloc = candlocs[i]
# kwargs passed to canddata object for plotting/saving
kwargs = {}
if 'cluster' in cc.array.dtype.fields:
logger.info("Cluster {0}/{1} has {2} candidates and max detected SNR {3:.1f} at {4}"
.format(calcinds.index(i), len(calcinds)-1, cl_count[i],
snr, candloc))
# add supplementary plotting and cc info
kwargs['cluster'] = clusters[i]
kwargs['clustersize'] = cl_count[i]
else:
logger.info("Candidate {0}/{1} has detected SNR {2:.1f} at {3}"
.format(calcinds.index(i), len(calcinds)-1, snr,
candloc))
# reproduce candidate and get/calc features
data_corr = pipeline_datacorrect(st, candloc, data_prep=data)
for feature in st.features:
if feature in cc.array.dtype.fields: # if already calculated
kwargs[feature] = cc.array[feature][i]
else: # if desired, but not calculated here or from canddata
if feature == 'snrk':
if 'snrk' not in cc.array.dtype.fields:
spec = data_corr.real.mean(axis=3).mean(axis=1)[candloc[1]]
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
logger.info("Calculated snrk of {0} after detection. "
"Adding it to CandData.".format(snrk))
kwargs[feature] = snrk
cd = pipeline_canddata(st, candloc, data_corr, spec_std=spec_std,
sig_ts=sig_ts, kalman_coeffs=kalman_coeffs, **kwargs)
if st.prefs.saveplots:
candidates.candplot(cd, snrs=snrs) # snrs before clustering
# regenerate cc with extra features in cd
cc1 += candidates.cd_to_cc(cd)
# if candidates that do not need new featuers, just select peaks
else:
logger.info("Using clustering info to select {0} candidates"
.format(len(calcinds)))
cc1.array = cc.array.take(calcinds)
return cc1 | 3,252 |
def hilbert(signal, padding='nextpow'):
"""
Apply a Hilbert transform to a `neo.AnalogSignal` object in order to
obtain its (complex) analytic signal.
The time series of the instantaneous angle and amplitude can be obtained
as the angle (`np.angle` function) and absolute value (`np.abs` function)
of the complex analytic signal, respectively.
By default, the function will zero-pad the signal to a length
corresponding to the next higher power of 2. This will provide higher
computational efficiency at the expense of memory. In addition, this
circumvents a situation where, for some specific choices of the length of
the input, `scipy.signal.hilbert` function will not terminate.
Parameters
----------
signal : neo.AnalogSignal
Signal(s) to transform.
padding : int, {'none', 'nextpow'}, or None, optional
Defines whether the signal is zero-padded.
The `padding` argument corresponds to `N` in
`scipy.signal.hilbert(signal, N=padding)` function.
If 'none' or None, no padding.
If 'nextpow', zero-pad to the next length that is a power of 2.
If it is an `int`, directly specify the length to zero-pad to
(indicates the number of Fourier components).
Default: 'nextpow'
Returns
-------
neo.AnalogSignal
Contains the complex analytic signal(s) corresponding to the input
`signal`. The unit of the returned `neo.AnalogSignal` is
dimensionless.
Raises
------
ValueError:
If `padding` is not an integer or neither 'nextpow' nor 'none' (None).
Examples
--------
Create a sine signal at 5 Hz with increasing amplitude and calculate the
instantaneous phases:
>>> import neo
>>> import numpy as np
>>> import quantities as pq
>>> import matplotlib.pyplot as plt
>>> from elephant.signal_processing import hilbert
>>> t = np.arange(0, 5000) * pq.ms
>>> f = 5. * pq.Hz
>>> a = neo.AnalogSignal(
... np.array(
... (1 + t.magnitude / t[-1].magnitude) * np.sin(
... 2. * np.pi * f * t.rescale(pq.s))).reshape(
... (-1,1)) * pq.mV,
... t_start=0*pq.s,
... sampling_rate=1000*pq.Hz)
...
>>> analytic_signal = hilbert(a, padding='nextpow')
>>> angles = np.angle(analytic_signal)
>>> amplitudes = np.abs(analytic_signal)
>>> print(angles)
[[-1.57079633]
[-1.51334228]
[-1.46047675]
...,
[-1.73112977]
[-1.68211683]
[-1.62879501]]
>>> plt.plot(t, angles)
"""
# Length of input signals
n_org = signal.shape[0]
# Right-pad signal to desired length using the signal itself
if isinstance(padding, int):
# User defined padding
n = padding
elif padding == 'nextpow':
# To speed up calculation of the Hilbert transform, make sure we change
# the signal to be of a length that is a power of two. Failure to do so
# results in computations of certain signal lengths to not finish (or
# finish in absurd time). This might be a bug in scipy (0.16), e.g.,
# the following code will not terminate for this value of k:
#
# import numpy
# import scipy.signal
# k=679346
# t = np.arange(0, k) / 1000.
# a = (1 + t / t[-1]) * np.sin(2 * np.pi * 5 * t)
# analytic_signal = scipy.signal.hilbert(a)
#
# For this reason, nextpow is the default setting for now.
n = 2 ** (int(np.log2(n_org - 1)) + 1)
elif padding == 'none' or padding is None:
# No padding
n = n_org
else:
raise ValueError("Invalid padding '{}'.".format(padding))
output = signal.duplicate_with_new_data(
scipy.signal.hilbert(signal.magnitude, N=n, axis=0)[:n_org])
# todo use flag once is fixed
# https://github.com/NeuralEnsemble/python-neo/issues/752
output.array_annotate(**signal.array_annotations)
return output / output.units | 3,253 |
def _tagged_mosc_id(kubeconfig, version, arch, private) -> str:
"""determine what the most recently tagged machine-os-content is in given imagestream"""
base_name = rgp.default_imagestream_base_name(version)
base_namespace = rgp.default_imagestream_namespace_base_name()
name, namespace = rgp.payload_imagestream_name_and_namespace(base_name, base_namespace, arch, private)
stdout, _ = exectools.cmd_assert(
f"oc --kubeconfig '{kubeconfig}' --namespace '{namespace}' get istag '{name}:machine-os-content'"
" --template '{{.image.dockerImageMetadata.Config.Labels.version}}'",
retries=3,
pollrate=5,
strip=True,
)
return stdout if stdout else None | 3,254 |
def test_namechooser__DontReuseNames__chooseName__9(NameChooserFactory):
"""`chooseName()` omits a name alredy used in the container."""
nc = NameChooserFactory(2)
with mock.patch.object(nc, 'name_in_use', side_effect=[True, False]):
assert u'foo-4' == nc.chooseName('foo', object()) | 3,255 |
def delete_network_config(data):
"""
Delete the network configuration.
Parameters
----------
data : list
The list of network interfaces.
Returns
-------
No return value.
"""
delete_virtual_interfaces(data)
delete_directory_files(__netscripts, data, lambda x: __iface_prefix + x) | 3,256 |
def test_workflow_migration(isolated_runner, old_workflow_project):
"""Check that *.cwl workflows can be migrated."""
result = isolated_runner.invoke(cli, ["migrate"])
assert 0 == result.exit_code
assert "OK" in result.output
result = isolated_runner.invoke(cli, ["log", old_workflow_project["log_path"]])
assert 0 == result.exit_code
for expected in old_workflow_project["expected_strings"]:
assert expected in result.output | 3,257 |
def load_household_size_by_municipality():
"""Return dataframe, index 'Gemeente', column 'HHsize'."""
dfhh = pd.read_csv('data/huishoudens_samenstelling_gemeentes.csv', comment='#')
dfhh.sort_values('Gemeente', inplace=True)
dfhh.set_index('Gemeente', inplace=True)
# remove rows for nonexistent municipalites
dfhh.drop(index=dfhh.index[dfhh['nHH'].isna()], inplace=True)
# rename municipalities
rename_muns = {
'Beek (L.)': 'Beek',
'Hengelo (O.)': 'Hengelo',
'Laren (NH.)': 'Laren',
'Middelburg (Z.)': 'Middelburg',
'Rijswijk (ZH.)': 'Rijswijk',
'Stein (L.)': 'Stein',
'Groningen (gemeente)': 'Groningen',
'Utrecht (gemeente)': 'Utrecht',
"'s-Gravenhage (gemeente)": "'s-Gravenhage",
}
dfhh.rename(index=rename_muns, inplace=True)
return dfhh | 3,258 |
def gcp_iam_service_account_delete_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete service account key.
Args:
client (Client): GCP API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
service_account_name = argToList(args.get('service_account_name'))
command_results_list: List[CommandResults] = []
for account in service_account_name:
try:
client.gcp_iam_service_account_delete_request(account)
command_results_list.append(CommandResults(
readable_output=f'Service account {account} deleted successfully.'
))
except Exception as exception:
error = CommandResults(
readable_output=f'An error occurred while trying to delete {account}.\n {exception}'
)
command_results_list.append(error)
return command_results_list | 3,259 |
def couple_to_string(couple: Union[Span, Tuple[int, int]]) -> str:
"""Return a deduplicated string representation of the given couple or span.
Examples:
>>> couple_to_string((12, 15))
"12-15"
>>> couple_to_string((12, 12))
"12"
>>> couple_to_string(Span(12, 15))
"12-15"
"""
return f"{couple[0]}" + ("" if couple[0] == couple[1] else f"-{couple[1]}") | 3,260 |
def create_study(X, y,
storage=None, # type: Union[None, str, storages.BaseStorage]
sample_method=None,
metrics=None,
study_name=None, # type: Optional[str]
direction='maximize', # type: str
load_cache=False, # type: bool
is_autobin=False,
bin_params=dict(),
sample_params=dict(),
trials_list=list(),
export_model_path=None,
precision=np.float64,
):
# type: (...) -> Study
"""Create a new :class:`~diego.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~diego.study.Study` will not be persistent.
sampler:
A sampler object that implements background algorithm for value suggestion. See also
:class:`~diego.samplers`.
study_name:
Study's name. If this argument is set to None, a unique name is generated
automatically.
is_auto_bin: do autobinning
bin_params: binning method
precision {[np.dtype]} -- precision:
np.dtypes, float16, float32, float64 for data precision to reduce memory size. (default: {np.float64})
Returns:
A :class:`~diego.study.Study` object.
"""
X, y = check_X_y(X, y, accept_sparse='csr')
storage = get_storage(storage)
try:
study_id = storage.create_new_study_id(study_name)
except basic.DuplicatedStudyError:
# 内存中最好study不要重名,而且可以读取已有的Study。 数据存在storage中。
# if load_if_exists:
# assert study_name is not None
# logger = logging.get_logger(__name__)
# logger.info("Using an existing study with name '{}' instead of "
# "creating a new one.".format(study_name))
# study_id = storage.get_study_id_from_name(study_name)
# else:
raise
study_name = storage.get_study_name_from_id(study_id)
study = Study(
study_name=study_name,
storage=storage,
sample_method=sample_method,
is_autobin=is_autobin,
bin_params=bin_params,
export_model_path=export_model_path,
precision=precision,
metrics=metrics)
if direction == 'minimize':
_direction = basic.StudyDirection.MINIMIZE
elif direction == 'maximize':
_direction = basic.StudyDirection.MAXIMIZE
else:
raise ValueError(
'Please set either \'minimize\' or \'maximize\' to direction.')
if metrics in ['logloss']:
_direction = basic.StudyDirection.MINIMIZE
X = X.astype(dtype=precision, copy=False)
study.storage.direction = _direction
study.storage.set_train_storage(X, y)
return study | 3,261 |
def setup_logging(path='log.config', key=None):
"""Setup logging configuration"""
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(key)
return logger | 3,262 |
def qparams_init(net, conv_name="conv", bn_name="batchnorm"):
"""
Initialize quantized parameters for convolution op
:param net: mxnet.gluon.nn.Block
The net to initialize.
:param conv_name: str
:param bn_name: str
:return: mxnet.gluon.nn.Block
The net that has been initialized.
"""
blocks = net.collect_quantized_blocks()
params = net.collect_params()
for m in blocks:
# If fake bn, recalculate weight and initialize some related params
if isinstance(m, nn.Conv2D) and hasattr(m, "gamma"):
name = m.name
weight = m.weight.data()
# Get params of batchnorm
gamma = params[name.replace(conv_name, bn_name) + "_gamma"].data()
beta = params[name.replace(conv_name, bn_name) + "_beta"].data()
mean = params[name.replace(conv_name, bn_name) + "_running_mean"].data()
var = params[name.replace(conv_name, bn_name) + "_running_var"].data()
# Store params of bn at conv
m.gamma.initialize(Constant(gamma))
m.beta.initialize(Constant(beta))
m.running_mean.initialize(Constant(mean))
m.running_var.initialize(Constant(var))
# Enable bias if need, and recalculate weight and bias with fake bn version
w_shape = weight.shape
cout = w_shape[0]
if m.bias is None:
m._kwargs['no_bias'] = False
m.bias = m.params.get('bias',
shape=(cout,), init="zeros",
allow_deferred_init=True)
m.bias.initialize()
if type(m) in (nn.Conv2D, nn.Dense) and m.quantize_args.quantize_input:
m.input_max.initialize(Constant(0))
if type(m) == nn.Activation and m.quantize_args.quantize_act:
m.act_max.initialize(Constant(0)) | 3,263 |
def test_no_access_to_class_property(db):
"""Ensure the implementation doesn't access class properties or declared
attrs while inspecting the unmapped model.
"""
class class_property:
def __init__(self, f):
self.f = f
def __get__(self, instance, owner):
return self.f(owner)
class Duck(db.Model):
id = db.Column(db.Integer, primary_key=True)
class ns:
is_duck = False
floats = False
class Witch(Duck):
@declared_attr
def is_duck(self):
# declared attrs will be accessed during mapper configuration,
# but make sure they're not accessed before that
info = inspect.getouterframes(inspect.currentframe())[2]
assert info[3] != "_should_set_tablename"
ns.is_duck = True
@class_property
def floats(self):
ns.floats = True
assert ns.is_duck
assert not ns.floats | 3,264 |
def get_gb_version(backbone_top_cmake_path):
"""
Find the game backbone version number by searching the top level CMake file
"""
with open(backbone_top_cmake_path, 'r') as file:
cmake_text = file.read()
regex_result = re.search(gb_version_regex, cmake_text)
return regex_result.group(1) | 3,265 |
async def get_processes(name: Optional[str] = None) -> List[Process]:
"""
Get all processes.
Args:
name (Optional[str], optional): Filter by process name. Defaults to None.
Returns:
List[Process]: A list of processes.
"""
if name:
return get_processes_by_name(name)
return get_all_processes() | 3,266 |
def patch_typing_python351():
"""
Python 3.5.1 doesn't have typing.Type, refs:
https://github.com/crystax/android-vendor-python-3-5/issues/1
"""
# TODO: check Python version and only patch if == 3.5.1
if not hasattr(typing, 'Type'):
typing.Type = Type | 3,267 |
def create_file(root_folder, app_name, file, use_template=False):
"""Create a file in the specified target.
Args:
root_folder (str): project root folder.
app_name (str): project name.
file (str): file to be created.
use_template (bool, optional): whether or not to use the templates
Returns:
Nothing.
>>> create_file('C:/temp', 'test_app', 'README.md')
>>> os.path.exists('C:/temp/test_app/README.md')
True
"""
full_file_path = os.path.join(root_folder, app_name, file)
content = ""
if use_template:
if file in ["README.md", "setup.cfg", "setup.py"]:
template_folder = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__))
)
try:
with open(
os.path.join(template_folder, f"data/sample_{file}"),
"r",
encoding="utf-8",
) as sample:
content = sample.read()
except Exception as e:
print(f"Error reading template sample_{file}")
raise e
try:
with open(full_file_path, "w", encoding="utf-8") as new_file:
new_file.writelines(content)
except Exception as e:
print(f"Could not create file {full_file_path}.")
raise e | 3,268 |
def plot_day_of_activation(df, plotname):
"""
Plots Aggregate of Day of Activation.
"""
# todo sort order in logical day order
dotw = {0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'}
df2 = df[df['adults_first_use'] == 1][['user_id', 'day_of_week']]
df2 = df2.groupby('user_id', as_index=False).mean()['day_of_week'].map(dotw).to_frame()
df2 = df2['day_of_week'].value_counts().to_frame()
# todo fix the X axis labeling so it's not hardcoded!
trace = go.Bar(x=['Tuesday', 'Wednesday', 'Friday', 'Thursday', 'Satuday', 'Sunday', 'Monday'],
y=df2.day_of_week,
marker=dict(color='#CC171D'))
layout = go.Layout(
title="Day of Firenze Card Activation",
xaxis=dict(
title='Day of the Week',
nticks=7,
ticks='outside',
),
yaxis=dict(
title='Number of Cards Activated',
ticks='outside',
)
)
fig = go.Figure(data=go.Data([trace]), layout=layout)
plot_url = py.iplot(fig, plotname, sharing='private', auto_open=False)
return df2, plot_url | 3,269 |
def pytest_collection_finish(session):
"""Handle the pytest collection finish hook: configure pyannotate.
Explicitly delay importing `collect_types` until all tests have
been collected. This gives gevent a chance to monkey patch the
world before importing pyannotate.
"""
from pyannotate_runtime import collect_types
collect_types.init_types_collection() | 3,270 |
def run_alf_extractors(session_path):
"""
Extract camera timestamps from the sync matrix
:param session_path: path to ap.bin file from
:return: no return command, alf files are created
"""
extractors.ephys_fpga._get_main_probe_sync(session_path) | 3,271 |
def get_rucio_redirect_url(lfn, scope):
"""
get_rucio_redirect_url: assemble Rucio redirect URL
@params: lfn ... one filename
e.g. user.gangarbt.62544955._2108356106.log.tgz
scope ... scope of the file with lfn
e.g. user.gangarbt, or valid1
returns: the Rucio redirect URL
"""
redirectUrl = ''
### compose the redirecURL
redirectUrl = '%(redirecthost)s/redirect/%(scope)s/%(filename)s%(suffix)s' % \
{\
'redirecthost': get_rucio_redirect_host(), \
'scope': scope, \
'filename': lfn, \
'suffix': '' \
}
_logger.info('get_rucio_redirect_url: redirectUrl=(%s)' % redirectUrl)
### return the redirectURL
return redirectUrl | 3,272 |
def test_store_not_normalized(mini_sentry, relay):
"""
Tests that relay does not normalize when processing is disabled
"""
relay = relay(mini_sentry, {"processing": {"enabled": False}})
project_id = 42
mini_sentry.add_basic_project_config(project_id)
relay.send_event(project_id, {"message": "some_message"})
event = mini_sentry.captured_events.get(timeout=1).get_event()
assert event.get("key_id") is None
assert event.get("project") is None
assert event.get("version") is None | 3,273 |
async def _getRequest(websession, url):
"""Send a GET request."""
async with websession.get(url, headers=HEADER) as response:
if response.status == 200:
data = await response.json(content_type=None)
else:
raise Exception('Bad response status code: {}'.format(response.status))
return data | 3,274 |
def interval_seconds():
"""returns the time interval in seconds
Returns:
int
"""
return int(interval_to_milliseconds(interval())/1000) | 3,275 |
def get_current_git_branch():
"""Get current git branch name.
Returns:
str: Branch name
"""
branch_name = "unknown"
try:
branch_name = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('ascii').strip()
except subprocess.CalledProcessError:
pass
return branch_name | 3,276 |
def get_autonomous_db_versions(compartment_id: Optional[str] = None,
db_workload: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetAutonomousDbVersionsFilterArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutonomousDbVersionsResult:
"""
This data source provides the list of Autonomous Db Versions in Oracle Cloud Infrastructure Database service.
Gets a list of supported Autonomous Database versions.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_autonomous_db_versions = oci.database.get_autonomous_db_versions(compartment_id=var["compartment_id"],
db_workload=var["autonomous_db_version_db_workload"])
```
:param str compartment_id: The compartment [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
:param str db_workload: A filter to return only autonomous database resources that match the specified workload type.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['dbWorkload'] = db_workload
__args__['filters'] = filters
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getAutonomousDbVersions:getAutonomousDbVersions', __args__, opts=opts, typ=GetAutonomousDbVersionsResult).value
return AwaitableGetAutonomousDbVersionsResult(
autonomous_db_versions=__ret__.autonomous_db_versions,
compartment_id=__ret__.compartment_id,
db_workload=__ret__.db_workload,
filters=__ret__.filters,
id=__ret__.id) | 3,277 |
def azimuthal_average(image, center=None, stddev=True, binsize=0.5, interpnan=False):
"""
Calculate the azimuthally averaged radial profile.
Modified based on https://github.com/keflavich/image_tools/blob/master/image_tools/radialprofile.py
Parameters:
imgae (numpy 2-D array): image array.
center (list): [x, y] pixel coordinates. If None, use image center.
Note that x is horizontal and y is vertical, y, x = image.shape.
stdev (bool): if True, the stdev of profile will also be returned.
binsize (float): size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large.
interpnan (bool): Interpolate over NAN values, i.e. bins where there is no data?
Returns:
:
If `stdev == True`, it will return [radius, profile, stdev];
else, it will return [radius, profile].
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0])
r = np.hypot(x - center[0], y - center[1])
# The 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize) + 1)
maxbin = nbins * binsize
bins = np.linspace(0, maxbin, nbins + 1)
# We're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:] + bins[:-1]) / 2.0
# There are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.histogram(r, bins)[0] # nr is how many pixels are within each bin
# Radial profile itself
nan_flag = np.isnan(image) # get rid of nan
#profile = np.histogram(r, bins, weights=image)[0] / nr
profile = np.histogram(r[~nan_flag], bins, weights=image[~nan_flag])[0] / nr
if interpnan:
profile = np.interp(bin_centers, bin_centers[~np.isnan(profile)],
profile[~np.isnan(profile)])
if stddev:
# Find out which radial bin each point in the map belongs to
# recall that bins are from 1 to nbins
whichbin = np.digitize(r.ravel(), bins)
profile_std = np.array([np.nanstd(image.ravel()[whichbin == b]) for b in range(1, nbins + 1)])
profile_std /= np.sqrt(nr) # Deviation of the mean!
return [bin_centers, profile, profile_std]
else:
return [bin_centers, profile] | 3,278 |
def get_setindices(header, setnames):
"""From header like ---ID, coverage, set1_q-value set2_q-value---
this returns indices for different sets {'q-value': {'set1': 2, 'set2: 3}}
"""
setindices = OrderedDict()
for index, field in enumerate(header):
for setname in setnames:
if field.startswith('{}_'.format(setname)):
fieldname = field[len(setname) + 1:]
try:
setindices[fieldname][setname] = index
except KeyError:
setindices[fieldname] = {setname: index}
return setindices | 3,279 |
def _base_and_stride(freqstr):
"""
Return base freq and stride info from string representation
Example
-------
_freq_and_stride('5Min') -> 'Min', 5
"""
groups = opattern.match(freqstr)
if groups.lastindex != 2:
raise ValueError("Could not evaluate %s" % freqstr)
stride = groups.group(1)
if len(stride):
stride = int(stride)
else:
stride = 1
base = groups.group(2)
return (base, stride) | 3,280 |
def generate_label(input_x,threshold):
"""
generate label with input
:param input_x: shape of [batch_size, sequence_length]
:return: y:[batch_size]
"""
batch_size,sequence_length=input_x.shape
y=np.zeros((batch_size,2))
for i in range(batch_size):
input_single=input_x[i]
sum=np.sum(input_single)
if i == 0:print("sum:",sum,";threshold:",threshold)
y_single=1 if sum>threshold else 0
if y_single==1:
y[i]=[0,1]
else: # y_single=0
y[i]=[1,0]
return y | 3,281 |
def store_wmarked_pdata(parameters, train_loader, valid_loader, \
netG, watermark, store_paths):
"""
Train: store the w'marked train data
"""
# init. Generator mode
netG.eval()
# data
is_cuda = parameters['system']['cuda']
blend_f = parameters['wmark']['blend-factor']
# init. data holders
wmarked_data = []
wmarked_labels = []
# create the w'marked train data
for bidx, (data, labels) in enumerate( \
tqdm(train_loader, desc='[store:train]')):
# : prepare inputs [cuda]
batches = data.size()[0]
wbatches = watermark.repeat(batches, 1, 1, 1)
if is_cuda:
data, labels, wbatches = \
data.cuda(), labels.cuda(), wbatches.cuda()
# : synthesize new fake data (fake-w'mark)
data_input = torch.cat((data, wbatches), dim=1)
fake_wmark = netG(data_input)
fake_walpha = fake_wmark[:,3:4] * blend_f
fwmark_data = fake_walpha * fake_wmark[:,0:3] + (1 - fake_walpha) * data
# : move to the data-holders
if is_cuda:
cur_wdatas = fwmark_data.data.cpu().numpy() * 255.0
cur_wdatas = cur_wdatas.astype(np.uint8)
cur_labels = labels.data.cpu().numpy().tolist()
else:
cur_wdatas = fwmark_data.data.numpy() * 255.0
cur_wdatas = cur_wdatas.astype(np.uint8)
cur_labels = labels.data.numpy().tolist()
# : store...
wmarked_data += [each_data for each_data in cur_wdatas]
wmarked_labels += cur_labels
# end for...
# sanity checks
assert len(wmarked_data) == len(wmarked_labels), \
('Error: train data/labels [{}/{}]'.format(len(wmarked_data), len(wmarked_labels)))
# store the data, labels
chunk_data = []
chunk_label = []
chunk_size = parameters['wmark']['store-batch']
for idx in range(len(wmarked_data)):
# : add the data
chunk_data.append(wmarked_data[idx])
chunk_label.append(wmarked_labels[idx])
# : save the data (in every i-th chunk)
if (idx+1) % chunk_size == 0:
store_chunk = {
'data' : np.asarray(chunk_data),
'labels': chunk_label,
}
# :: store to the correct location
chunk_filename = 'train_{}.pkl'.format(int((idx+1)/chunk_size))
chunk_savepath = os.path.join(store_paths['data'], store_paths['prefix'])
if not os.path.exists(chunk_savepath): os.makedirs(chunk_savepath)
pickle.dump(store_chunk, open(os.path.join(chunk_savepath, chunk_filename), 'wb'))
# :: clear the holders
chunk_data = []
chunk_label = []
# for idx...
# remainders
if chunk_data and chunk_label:
store_chunk = {
'data' : np.asarray(chunk_data),
'labels': chunk_label,
}
# : store to the correct location
chunk_filename = 'train_{}.pkl'.format(int(len(wmarked_data)/chunk_size) + 1)
chunk_savepath = os.path.join(store_paths['data'], store_paths['prefix'])
if not os.path.exists(chunk_savepath): os.makedirs(chunk_savepath)
pickle.dump(store_chunk, open(os.path.join(chunk_savepath, chunk_filename), 'wb'))
# end if chunk_data...
"""
Test: store the w'marked test data
"""
# init. data holders
wmarked_data = []
wmarked_labels = []
# create the w'marked valid data
for bidx, (data, labels) in enumerate( \
tqdm(valid_loader, desc='[store:valid]')):
# : prepare inputs [cuda]
batches = data.size()[0]
wbatches = watermark.repeat(batches, 1, 1, 1)
if is_cuda:
data, labels, wbatches = \
data.cuda(), labels.cuda(), wbatches.cuda()
# : synthesize new fake data (fake-w'mark)
data_input = torch.cat((data, wbatches), dim=1)
fake_wmark = netG(data_input)
fake_walpha = fake_wmark[:,3:4] * blend_f
fwmark_data = fake_walpha * fake_wmark[:,0:3] + (1 - fake_walpha) * data
# : move to the data-holders
if is_cuda:
cur_wdatas = fwmark_data.data.cpu().numpy() * 255.0
cur_wdatas = cur_wdatas.astype(np.uint8)
cur_labels = labels.data.cpu().numpy().tolist()
else:
cur_wdatas = fwmark_data.data.numpy() * 255.0
cur_wdatas = cur_wdatas.astype(np.uint8)
cur_labels = labels.data.numpy().tolist()
# : store
wmarked_data += [each_data for each_data in cur_wdatas]
wmarked_labels += cur_labels
# end for...
# sanity checks
assert len(wmarked_data) == len(wmarked_labels), \
('Error: test data/labels [{}/{}]'.format(len(wmarked_data), len(wmarked_labels)))
# store the data, labels
chunk_data = []
chunk_label = []
for idx in range(len(wmarked_data)):
# : add the data
chunk_data.append(wmarked_data[idx])
chunk_label.append(wmarked_labels[idx])
# : save the data (in every i-th chunk)
if (idx+1) % chunk_size == 0:
store_chunk = {
'data' : np.asarray(chunk_data),
'labels': chunk_label,
}
# :: store to the correct location
chunk_filename = 'valid_{}.pkl'.format(int((idx+1)/chunk_size))
chunk_savepath = os.path.join(store_paths['data'], store_paths['prefix'])
if not os.path.exists(chunk_savepath): os.makedirs(chunk_savepath)
pickle.dump(store_chunk, open(os.path.join(chunk_savepath, chunk_filename), 'wb'))
# :: clear the holders
chunk_data = []
chunk_label = []
# for idx...
# remainders
if chunk_data and chunk_label:
store_chunk = {
'data' : np.asarray(chunk_data),
'labels': chunk_label,
}
# : store to the correct location
chunk_filename = 'valid_{}.pkl'.format(int(len(wmarked_data)/chunk_size) + 1)
chunk_savepath = os.path.join(store_paths['data'], store_paths['prefix'])
if not os.path.exists(chunk_savepath): os.makedirs(chunk_savepath)
pickle.dump(store_chunk, open(os.path.join(chunk_savepath, chunk_filename), 'wb'))
# end if chunk_data...
# done. | 3,282 |
def remove(path):
"""Deletes a directory or file.
Args:
path: string, a path, filepath or dirpath.
Raises:
errors. NotFoundError if directory or file doesn't exist.
"""
if exists(path):
if isfile(path):
os.remove(path)
else:
shutil.rmtree(path) | 3,283 |
def erase(input: Any, *args: Any, **kwargs: Any) -> Any:
"""TODO: add docstring"""
... | 3,284 |
def test_delete_a_sharedpublished_volume_whilst_the_nexus_node_is_inaccessible():
"""delete a shared/published volume whilst the nexus node is inaccessible.""" | 3,285 |
def test_sanitize(coresys):
"""Test event sanitation."""
event = {
"tags": [["url", "https://mydomain.com"]],
"request": {
"url": "https://mydomain.com",
"headers": [
["Host", "mydomain.com"],
["Referer", "https://mydomain.com/api/oppio_ingress/xxx-xxx/"],
["X-Forwarded-Host", "mydomain.com"],
["X-Oppio-Key", "xxx"],
],
},
}
coresys.config.diagnostics = True
coresys.core.state = CoreState.RUNNING
with patch("shutil.disk_usage", return_value=(42, 42, 2 * (1024.0 ** 3))):
filtered = filter_data(coresys, event, {})
assert ["url", "https://example.com"] in filtered["tags"]
assert filtered["request"]["url"] == "https://example.com"
assert ["Host", "example.com"] in filtered["request"]["headers"]
assert ["Referer", "https://example.com/api/oppio_ingress/xxx-xxx/"] in filtered[
"request"
]["headers"]
assert ["X-Forwarded-Host", "example.com"] in filtered["request"]["headers"]
assert ["X-Oppio-Key", "XXXXXXXXXXXXXXXXXXX"] in filtered["request"]["headers"] | 3,286 |
def sf_imread(
img_path,
plot=True,
):
"""
Thin wrapper around `skimage.io.imread` that rotates the image if it is
to be used for plotting, but does not if it is to be used for measurements.
Parameters
----------
img_path : str
Path to image
plot : bool
Determines whether or not image will be rotated 90 degrees
Returns
-------
np.array
"""
img_in = io.imread(img_path)
if plot:
img_in = transform.rotate(img_in, -90) # show images going left-right
return img_in | 3,287 |
def init_config():
"""Called at the end of package import to read initial configuration and setup cloud computing.
"""
from . import packages
config.update(CONFIG_DEFAULTS)
path = _get_config_path()
if os.path.exists(path):
try:
with open(path, 'r') as infile:
newconf = yaml.load(infile)
if not isinstance(newconf, dict):
raise TypeError('Cannot read configuration "%s" from %s.' % (newconf, path))
except (IOError, KeyError, TypeError) as e:
print(('WARNING: exception while reading configuration: %s. '
'using built-in default configuration') % e)
else:
config.update(newconf)
_check_override('default_version_tag', DEFAULT_VERSION_TAG, path)
if 'default_version_tag' not in config:
config.default_version_tag = DEFAULT_VERSION_TAG
if future.utils.PY2:
expcted_docker_python_image = compute.get_image_path('moldesign_complete_py2')
else:
expcted_docker_python_image = compute.get_image_path('moldesign_complete')
if config.get('default_python_image', None) is None:
config.default_python_image = expcted_docker_python_image
for pkg, do_remote in list(config.run_remote.items()):
if do_remote:
try:
getattr(packages, pkg).force_remote = True
except AttributeError:
print('WARNING: unknown key "%s" in %s' % (pkg, path),
file=sys.stderr)
for pkg, do_local in list(config.run_local.items()):
if do_local:
try:
getattr(packages, pkg).run_local = True
except AttributeError:
print('WARNING: unknown key "%s" in %s' % (pkg, path),
file=sys.stderr) | 3,288 |
def init():
"""
Turns 'jpg' to reality array
Initializes x,y,result_map to some values
"""
global reality, real_coordinates, bot_center
im = Image.open('map.jpg')
reality = array(im)
# TODO Starting Point Issue
real_coordinates.append([reality.shape[1] / 2, reality.shape[0] / 2])
bot_center = (0, 0) | 3,289 |
def test_plugin_ws_url_attributes(spf, path, query, expected_url):
"""Note, this doesn't _really_ test websocket functionality very well."""
app = spf._app
test_plugin = TestPlugin()
async def handler(request):
return text('OK')
test_plugin.websocket(path)(handler)
spf.register_plugin(test_plugin)
test_client = app.test_client
request, response = test_client.get(path + '?{}'.format(query))
try:
# Sanic 20.3.0 and above
p = test_client.port
except AttributeError:
p = testing.PORT or 0
assert request.url == expected_url.format(testing.HOST, str(p))
parsed = urlparse(request.url)
assert parsed.scheme == request.scheme
assert parsed.path == request.path
assert parsed.query == request.query_string
assert parsed.netloc == request.host | 3,290 |
def less_than(x, y, force_cpu=None, cond=None, name=None):
"""
${comment}
Args:
x(Tensor): ${x_comment}.
y(Tensor): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Tensor, optional): Optional output which can be any created Tensor
that meets the requirements to store the result of *less_than*.
if cond is None, a new Tensor will be created to store the result.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
${out_comment}.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3, 4], dtype='float32')
y = paddle.to_tensor([2, 2, 1, 3], dtype='float32')
result = paddle.less_than(x, y)
print(result) # [True, False, False, False]
"""
check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
"less_than")
check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"],
"less_than")
if cond is not None:
check_type(cond, "cond", Variable, "less_than")
if force_cpu != None:
check_type(force_cpu, "force_cpu", bool, "less_than")
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond | 3,291 |
def get_page_namespace(url_response):
"""
:type element: Tag
:rtype: int
"""
keyword = '"wgNamespaceNumber"'
text = url_response
if keyword in text:
beginning = text[text.find(keyword) + len(keyword):]
ending = beginning[:beginning.find(',')]
ints = re.findall('\d+', ending)
if len(ints) > 0:
return int(ints[0]) | 3,292 |
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)] | 3,293 |
def debug(*args,**kwargs):
"""A super easy way to visualize Klamp't items.
The argument list can be a list of Klamp't items, and can also include
strings or dicts. If a string precedes an item, then it will be labeled
by the string. If a dict follows an item, the dict will specify
attributes for the item. It can also contain the 'animation' key, in
which case it should contain a Trajectory animating the item.
Keyword arguments may include:
- title: the window title
- animation: if only one item is given, sets a looping animation
- centerCamera: the name of the item that the camera should look at, or
True to center on the whole scene.
- followCamera: the name of the item that the camera will follow if
animated, or None (default).
- dialog: True if a dialog should be shown (default), False if a standard
show() should be used.
- anything else: Treated as named klamp't item.
"""
global _backend
_init()
oldWindow = getWindow()
if oldWindow is None:
oldWindow = 0
myWindow = createWindow()
nextName = None
lastName = None
itemcount = 0
if 'world' in kwargs:
add('world',kwargs['world'])
del kwargs['world']
animationDuration = 0
for i,arg in enumerate(args):
if isinstance(arg,str):
nextName = arg
elif isinstance(arg,dict):
if lastName is None:
warnings.warn("vis.debug(): dict of attributes must follow an object")
continue
for (k,v) in arg.items():
if k == 'animation':
animate(lastName,v)
animationDuration = max(animationDuration,v.endTime())
else:
try:
setAttribute(lastName,k,v)
except Exception:
warnings.warn("vis.debug(): Couldn't set attribute {} of item {}".format(k,lastName))
else:
label = None
if nextName is None:
name = None
if hasattr(arg,'getName'):
try:
name = arg.getName()
except Exception:
pass
if hasattr(arg,'name') and isinstance(arg.name,str):
name = arg.name
if name is None:
try:
type = types.objectToTypes(arg)
if isinstance(type,list):
type = type[0]
name = type + '['+str(itemcount)+']'
except ValueError:
name = 'item['+str(itemcount)+']'
else:
name = nextName
label = name
add(name,arg)
itemcount += 1
lastName = name
nextName = None
title = None
doDialog = True
centerCamera = None
followCameraItem = None
animation = None
for k,v in kwargs.items():
if k=='title':
title = v
elif k=='world':
pass
elif k=='dialog':
doDialog = v
elif k=='animation':
animation = v
elif k=='followCamera':
followCameraItem = v
elif k=='centerCamera':
centerCamera = v
else:
add(k,v)
lastName = k
itemcount += 1
if title is not None:
setWindowTitle(title)
else:
setWindowTitle("Klampt debugging: "+','.join(scene().items.keys()))
if animation is not None:
animate(lastName,animation)
if centerCamera is True:
autoFitCamera(rotate=False)
elif centerCamera:
if isinstance(centerCamera,int):
centerCamera = 'item['+str(centerCamera)+']'
elif not isinstance(centerCamera,(str,tuple)):
centerCamera = getItemName(centerCamera)
if centerCamera is None:
warnings.warn("vis.debug(): could not center camera, invalid object name")
else:
vp = getViewport()
try:
autoFitViewport(vp,[scene().getItem(centerCamera)],rotate=False)
setViewport(vp)
except Exception:
warnings.warn("vis.debug(): Centering camera failed")
import traceback
traceback.print_exc()
if followCameraItem is not None:
if followCameraItem is True:
followCamera(lastName,center=True)
else:
if not isinstance(followCameraItem,(str,tuple)):
followCameraItem = getItemName(followCameraItem)
if followCameraItem is None:
warnings.warn("vis.debug(): could not follow camera, invalid object name")
followCamera(followCameraItem,center=True)
if _backend == 'HTML':
#dump out the animation
if animation is not None:
animationDuration = max(animationDuration,animation.endTime())
if animationDuration > 0:
dt = 1.0/30.0
t = 0
while t < animationDuration:
stepAnimation(dt)
t += dt
show()
setWindow(oldWindow)
elif _backend == 'IPython':
#setup a Playback widget from the animation
if animation is not None:
animationDuration = max(animationDuration,animation.endTime())
if animationDuration > 0:
framerate = 30
my_scene = scene()
def advance():
my_scene.stepAnimation(1.0/framerate)
my_scene.update()
def reset():
my_scene.animationTime(0)
from .ipython.widgets import Playback
playback = Playback(nativeWindow(),advance=advance,reset=reset,maxframes=int(animationDuration*framerate),framerate=framerate)
from IPython.display import display
display(playback)
show()
else:
if doDialog:
dialog()
setWindow(oldWindow)
else:
show()
#open ended... | 3,294 |
def cross_validation_visualization_due(params, mse_tr, mse_te, param2, tr2, te2, params_name='', prname2='', title='',
error_name=''):
"""visualization the curves of mse_tr and mse_te."""
plt.semilogx(params, mse_tr, marker=".", color='r', label='train error ' + params_name, linestyle='solid')
plt.semilogx(params, mse_te, marker=".", color='r', label='test error ' + params_name, linestyle='dashed')
plt.semilogx(param2, tr2, marker=".", color='b', label='train error ' + prname2, linestyle='solid')
plt.semilogx(param2, te2, marker=".", color='b', label='test error ' + prname2, linestyle='dashed')
plt.xlabel("Parameters: " + params_name + " " + prname2)
plt.ylabel("Error: " + error_name)
plt.title("cross validation " + title)
plt.legend(loc=2)
plt.grid(True)
plt.savefig(get_plot_path("cross_validation_" + title))
plt.show() | 3,295 |
def get_site_camera_data(site_no):
"""An orchestration method that fetches camera data and returns the site dictionary"""
json_raw = get_json_camera_data()
camera = json_raw_to_dictionary(json_raw)
return find_site_in_cameras(site_no, camera) | 3,296 |
def get_args():
"""
Get arguments to the tool with argparse
:return: The arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("filename", action='store',
help='.xyz file(s) with optimised geometries from which to make .top and .gro files', nargs="+")
parser.add_argument('-id', type=str, default='AAA',
help="Three letter name of the residue/molecule e.g LYS")
parser.add_argument('-c', type=int, default=0,
help="Charge of the molecule Default: %(default)s")
parser.add_argument('-m', type=int, default=1,
help="Multiplicity of the molecule. Default: %(default)s")
parser.add_argument('-notrash', action='store_true', default=False,
help="Don't trash all the output files. Only .gro and .top will be left by default")
return parser.parse_args() | 3,297 |
def test_del_invite_null_email(client):
"""Super admin deletes invite without specifying email."""
response = client.delete(
tests.DDSEndpoint.USER_DELETE,
headers=tests.UserAuth(tests.USER_CREDENTIALS["superadmin"]).token(client),
json={"email": None, "is_invite": True},
)
assert response.status_code == http.HTTPStatus.BAD_REQUEST
assert response.json.get("email").get("message") == "The email cannot be null." | 3,298 |
def find_contam(df, contaminant_prevalence=0.5, use_mad_filter=False):
"""Flag taxa that occur in too many samples."""
taxa_counts = {}
for taxa in df['taxa_name']:
taxa_counts[taxa] = 1 + taxa_counts.get(taxa, 0)
thresh = max(2, contaminant_prevalence * len(set(df['sample_name'])))
contaminants = {taxa for taxa, count in taxa_counts.items() if count >= thresh}
if not use_mad_filter or df.shape[0] <= 2:
return df[~df['taxa_name'].isin(contaminants)]
return median_filter(df, contaminants) | 3,299 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.