content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def has_level_or_node(level: int, *auth_nodes: str) -> Rule:
"""
:param level: 需要群组权限等级
:param auth_nodes: 需要的权限节点
:return: 群组权限等级大于要求等级或者具备权限节点, 权限节点为deny则拒绝
"""
async def _has_level_or_node(bot: Bot, event: Event, state: T_State) -> bool:
auth_node = '.'.join(auth_nodes)
detail_type = event.dict().get(f'{event.get_type()}_type')
group_id = event.dict().get('group_id')
user_id = event.dict().get('user_id')
# level检查部分
if detail_type != 'group':
level_checker = False
else:
level_res = await DBGroup(group_id=group_id).permission_level()
if level_res.result >= level:
level_checker = True
else:
level_checker = False
# node检查部分
if detail_type == 'private':
user_auth = DBAuth(auth_id=user_id, auth_type='user', auth_node=auth_node)
user_tag_res = await user_auth.tags_info()
allow_tag = user_tag_res.result[0]
deny_tag = user_tag_res.result[1]
elif detail_type == 'group':
group_auth = DBAuth(auth_id=group_id, auth_type='group', auth_node=auth_node)
group_tag_res = await group_auth.tags_info()
allow_tag = group_tag_res.result[0]
deny_tag = group_tag_res.result[1]
else:
allow_tag = 0
deny_tag = 0
if allow_tag == 1 and deny_tag == 0:
return True
elif allow_tag == -2 and deny_tag == -2:
return level_checker
else:
return False
return Rule(_has_level_or_node) | 3,300 |
def solve_problem(problem, max_iter_num=MAX_ITER_NUM, max_iter_num_without_adding=MAX_ITER_NUM_WITHOUT_ADDITIONS, iter_num_to_revert_removal=ITER_NUM_TO_REVERT_REMOVAL, remove_prob=ITEM_REMOVAL_PROBABILITY, consec_remove_prob=CONSECUTIVE_ITEM_REMOVAL_PROBABILITY, ignore_removed_item_prob=IGNORE_REMOVED_ITEM_PROBABILITY, modify_prob=PLACEMENT_MODIFICATION_PROBABILITY, calculate_times=False, return_value_evolution=False):
"""Find and return a solution to the passed problem, using an reversible strategy"""
# create an initial solution with no item placed in the container
solution = Solution(problem)
# determine the bounds of the container
min_x, min_y, max_x, max_y = get_bounds(problem.container.shape)
start_time = 0
sort_time = 0
item_discarding_time = 0
item_selection_time = 0
addition_time = 0
removal_time = 0
modification_time = 0
value_evolution_time = 0
if calculate_times:
start_time = time.time()
if return_value_evolution:
value_evolution = list()
else:
value_evolution = None
if calculate_times:
value_evolution_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# sort items by weight, to speed up their discarding (when they would cause the capacity to be exceeded)
items_by_weight = sorted(list(problem.items.items()), key=lambda index_item_tuple: index_item_tuple[1].weight)
if calculate_times:
sort_time += get_time_since(start_time)
iter_count_since_addition = 0
iter_count_since_removal = 0
solution_before_removal = None
if calculate_times:
start_time = time.time()
# discard the items that would make the capacity of the container to be exceeded
items_by_weight = items_by_weight[:get_index_after_weight_limit(items_by_weight, problem.container.max_weight)]
ignored_item_index = -1
if calculate_times:
item_discarding_time += get_time_since(start_time)
# placements can only be possible with capacity and valid items
if problem.container.max_weight and items_by_weight:
# try to add items to the container, for a maximum number of iterations
for i in range(max_iter_num):
if calculate_times:
start_time = time.time()
# perform a random choice of the next item to try to place
list_index, item_index = select_item(items_by_weight)
if calculate_times:
item_selection_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# try to add the item in a random position and with a random rotation; if it is valid, remove the item from the pending list
if solution.add_item(item_index, (random.uniform(min_x, max_x), random.uniform(min_y, max_y)), random.uniform(0, 360)):
if calculate_times:
addition_time += get_time_since(start_time)
# find the weight that can still be added
remaining_weight = problem.container.max_weight - solution.weight
# stop early if the capacity has been exactly reached
if not remaining_weight:
break
# remove the placed item from the list of pending items
items_by_weight.pop(list_index)
if calculate_times:
start_time = time.time()
# discard the items that would make the capacity of the container to be exceeded
items_by_weight = items_by_weight[:get_index_after_weight_limit(items_by_weight, remaining_weight)]
if calculate_times:
item_discarding_time += get_time_since(start_time)
# stop early if it is not possible to place more items, because all have been placed or all the items outside would cause the capacity to be exceeded
if not items_by_weight:
break
# reset the potential convergence counter, since an item has been added
iter_count_since_addition = 0
else:
if calculate_times:
addition_time += get_time_since(start_time)
# register the fact of being unable to place an item this iteration
iter_count_since_addition += 1
# stop early if there have been too many iterations without changes
if iter_count_since_addition == max_iter_num_without_adding:
break
if calculate_times:
start_time = time.time()
# if there are items in the container, try to remove an item with a certain probability (different if there was a recent removal)
if solution.weight > 0 and random.uniform(0., 1.) < (consec_remove_prob if solution_before_removal else remove_prob):
# if there is no solution prior to a removal with pending re-examination
if not solution_before_removal:
# save the current solution before removing, just in case in needs to be restored later
solution_before_removal = copy.deepcopy(solution)
# reset the counter of iterations since removal, to avoid reverting earlier than needed
iter_count_since_removal = 0
# get the index of the removed item, which is randomly chosen
removed_index = solution.remove_random_item()
# with a certain probability, only if not ignoring any item yet, ignore placing again the removed item until the operation gets reverted or permanently accepted
if ignored_item_index < 0 and items_by_weight and random.uniform(0., 1.) < ignore_removed_item_prob:
ignored_item_index = removed_index
# otherwise, add the removed item to the weight-sorted list of pending-to-add items
else:
items_by_weight.insert(get_index_after_weight_limit(items_by_weight, problem.items[removed_index].weight), (removed_index, problem.items[removed_index]))
# if there is a recent removal to be confirmed or discarded after some time
if solution_before_removal:
# re-examine a removal after a certain number of iterations
if iter_count_since_removal == iter_num_to_revert_removal:
# if the value in the container has improved since removal, accept the operation in a definitive way
if solution.value > solution_before_removal.value:
# if an item had been ignored, make it available for placement again
if ignored_item_index >= 0:
items_by_weight.insert(get_index_after_weight_limit(items_by_weight, problem.items[ignored_item_index].weight), (ignored_item_index, problem.items[ignored_item_index]))
# otherwise, revert the solution to the pre-removal state
else:
solution = solution_before_removal
# after reverting a removal, have some margin to try to add items
iter_count_since_addition = 0
# reset removal data
solution_before_removal = None
iter_count_since_removal = 0
ignored_item_index = -1
# the check will be done after more iterations
else:
iter_count_since_removal += 1
if calculate_times:
removal_time += get_time_since(start_time)
if calculate_times:
start_time = time.time()
# if there are still items in the container (maybe there was a removal), modify existing placements with a certain probability
if solution.weight > 0 and random.uniform(0., 1.) < modify_prob:
# perform a random choice of the item to try to affect
_, item_index = select_item(items_by_weight)
# move to a random position of the container with a probability of 50%
if random.uniform(0., 1.) < 0.5:
solution.move_item_to(item_index, (random.uniform(min_x, max_x), random.uniform(min_y, max_y)))
# otherwise, perform a random rotation
else:
solution.rotate_item_to(item_index, random.uniform(0, 360))
if calculate_times:
modification_time += get_time_since(start_time)
if return_value_evolution:
if calculate_times:
start_time = time.time()
value_evolution.append(solution.value)
if calculate_times:
value_evolution_time += get_time_since(start_time)
# in the end, revert the last unconfirmed removal if it did not improve the container's value
if solution_before_removal and solution.value < solution_before_removal.value:
solution = solution_before_removal
if return_value_evolution:
if calculate_times:
start_time = time.time()
value_evolution[-1] = solution.value
if calculate_times:
value_evolution_time += get_time_since(start_time)
# encapsulate all times informatively in a dictionary
if calculate_times:
approx_total_time = sort_time + item_selection_time + item_discarding_time + addition_time + removal_time + modification_time + value_evolution_time
time_dict = {"Weight-sort": (sort_time, sort_time / approx_total_time), "Stochastic item selection": (item_selection_time, item_selection_time / approx_total_time), "Item discarding": (item_discarding_time, item_discarding_time / approx_total_time), "Addition (with geometric validation)": (addition_time, addition_time / approx_total_time), "Removal and reverting-removal": (removal_time, removal_time / approx_total_time), "Placement modification (with geometric validation)": (modification_time, modification_time / approx_total_time), "Keeping value of each iteration": (value_evolution_time, value_evolution_time / approx_total_time)}
if return_value_evolution:
return solution, time_dict, value_evolution
return solution, time_dict
if return_value_evolution:
return solution, value_evolution
return solution | 3,301 |
def test_task_types() -> None:
"""
mypy should find type errors related to redun task calls.
"""
workflow_file = get_test_file("test_data/typing/workflow_fail.py.txt")
stdout, stderr, ret_code = api.run(
[
"--show-traceback",
workflow_file,
"redun",
]
)
print(stdout)
assert ret_code == 1
# Parse found type check errors.
stdout_lines = stdout.split("\n")[:-2]
found_errors = {line.split(":", 2)[1] for line in stdout_lines}
# Get lines with expected errors.
with open(workflow_file) as infile:
expected_errors = {str(i) for i, line in enumerate(infile, 1) if "ERROR" in line}
assert found_errors == expected_errors | 3,302 |
def write_config(config):
"""Writes contents of configparser to properties.conf"""
with open(__file__.replace('core.py', 'properties.conf'), 'w') as configfile:
config.write(configfile) | 3,303 |
def _pil_apply_edit_steps_mask(image, mask, edit_steps, inplace=False):
"""
Apply edit steps from unmasking method on a PIL image.
Args:
image (PIL.Image): The input image.
mask (Union[int, tuple[int, int, int], PIL.Image]): The mask to apply on the image, could be a single grey
scale intensity [0, 255], an RBG tuple or a PIL Image.
edit_steps (list[EditStep]): Edit steps to be drawn.
inplace (bool): True to draw on the input image, otherwise draw on a cloned image.
Returns:
PIL.Image, the result image.
"""
if not inplace:
image = image.copy()
if isinstance(mask, PIL.Image.Image):
for step in edit_steps:
box = step.to_coord_box()
cropped = mask.crop(box)
image.paste(cropped, box=box)
else:
if isinstance(mask, int):
mask = (mask, mask, mask)
draw = ImageDraw.Draw(image)
for step in edit_steps:
draw.rectangle(step.to_coord_box(), fill=mask)
return image | 3,304 |
def do_let_form(expressions, env):
"""Evaluate a let form."""
check_form(expressions, 2)
let_env = make_let_frame(expressions.first, env)
return eval_all(expressions.second, let_env) | 3,305 |
def bubble(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories}) | 3,306 |
def get_coherence(model, token_lists, measure='c_v'):
"""
Get model coherence from gensim.models.coherencemodel
:param model: Topic_Model object
:param token_lists: token lists of docs
:param topics: topics as top words
:param measure: coherence metrics
:return: coherence score
"""
if model.method == 'LDA':
cm = CoherenceModel(model=model.ldamodel, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
else:
topics = get_topic_words(token_lists, model.cluster_model.labels_)
cm = CoherenceModel(topics=topics, texts=token_lists, corpus=model.corpus, dictionary=model.dictionary,
coherence=measure)
return cm.get_coherence() | 3,307 |
def run_vscode_command(
command_id: str,
*args: str,
wait_for_finish: bool = False,
return_command_output: bool = False,
):
"""Runs a VSCode command, using command server if available
Args:
command_id (str): The ID of the VSCode command to run
wait_for_finish (bool, optional): Whether to wait for the command to finish before returning. Defaults to False.
return_command_output (bool, optional): Whether to return the output of the command. Defaults to False.
Raises:
Exception: If there is an issue with the file-based communication, or
VSCode raises an exception
Returns:
Object: The response from the command, if requested.
"""
# NB: This is a hack to work around the fact that talon doesn't support
# variable argument lists
args = [x for x in args if x is not NotSet]
communication_dir_path = get_communication_dir_path()
if not communication_dir_path.exists():
if args or return_command_output:
raise Exception("Must use command-server extension for advanced commands")
print("Communication dir not found; falling back to command palette")
run_vscode_command_by_command_palette(command_id)
return
request_path = communication_dir_path / "request.json"
response_path = communication_dir_path / "response.json"
# Generate uuid that will be mirrored back to us by command server for
# sanity checking
uuid = str(uuid4())
request = Request(
command_id=command_id,
args=args,
wait_for_finish=wait_for_finish,
return_command_output=return_command_output,
uuid=uuid,
)
# First, write the request to the request file, which makes us the sole
# owner because all other processes will try to open it with 'x'
write_request(request, request_path)
# We clear the response file if it does exist, though it shouldn't
if response_path.exists():
print("WARNING: Found old response file")
robust_unlink(response_path)
# Then, perform keystroke telling VSCode to execute the command in the
# request file. Because only the active VSCode instance will accept
# keypresses, we can be sure that the active VSCode instance will be the
# one to execute the command.
actions.user.trigger_command_server_command_execution()
try:
decoded_contents = read_json_with_timeout(response_path)
finally:
# NB: We remove response file first because we want to do this while we
# still own the request file
robust_unlink(response_path)
robust_unlink(request_path)
if decoded_contents["uuid"] != uuid:
raise Exception("uuids did not match")
for warning in decoded_contents["warnings"]:
print(f"WARNING: {warning}")
if decoded_contents["error"] is not None:
raise Exception(decoded_contents["error"])
actions.sleep("25ms")
return decoded_contents["returnValue"] | 3,308 |
def clc_prepare(reference, outdir, source):
"""
create a CLC subset resampled to a reference image.
Parameters
----------
reference: str
the reference file with the target CRS and extent
outdir: str
the directory to write the new file to;
new files are named clc{index}.tif, e.g. clc1.tif.
source: str
the original product to be subsetted
Returns
-------
str
the name of the file written to `outdir`
"""
with Raster(reference) as ras:
xRes, yRes = ras.res
epsg = ras.epsg
ext = ras.extent
#########################################################################
warp_opts = {'options': ['-q'], 'format': 'GTiff', 'multithread': True,
'dstNodata': -99, 'resampleAlg': 'mode'}
if not os.path.isdir(outdir):
os.makedirs(outdir)
clc_subs = finder(outdir, ['clc[0-9].tif'], regex=True)
match = False
if len(clc_subs) > 0:
for j, sub in enumerate(clc_subs):
with Raster(sub) as ras:
if ras.extent == ext:
clc_sub = sub
match = True
if not match:
clc_sub = os.path.join(outdir, 'clc{}.tif'.format(len(clc_subs)))
print('creating', clc_sub)
warp_opts['dstSRS'] = 'EPSG:{}'.format(epsg)
warp_opts['xRes'] = xRes
warp_opts['yRes'] = yRes
warp_opts['outputBounds'] = (ext['xmin'], ext['ymin'],
ext['xmax'], ext['ymax'])
gdalwarp(src=source, dst=clc_sub, options=warp_opts)
return clc_sub | 3,309 |
def find_calibrations_for_sensor(
sensor_id: str,
folder: Optional[path_t] = None,
recursive: bool = True,
filter_cal_type: Optional[str] = None,
custom_validator: Optional[Callable[["CalibrationInfo"], bool]] = None,
ignore_file_not_found: Optional[bool] = False,
) -> List[Path]:
"""Find possible calibration files based on the filename.
As this only checks the filenames, this might return false positives depending on your folder structure and naming.
Parameters
----------
sensor_id :
The for 4 letter/digit identifier of a sensor_type, as obtained from
:py:meth:`nilspodlib.header.Header.sensor_id`
folder :
Basepath of the folder to search. If None, tries to find a default calibration
recursive :
If the folder should be searched recursive or not.
filter_cal_type :
Whether only files obtain with a certain calibration type should be found.
This will look for the `CalType` inside the json file and could cause performance issues with many calibration
files.
If None, all found files will be returned.
For possible values, see the `imucal` library.
custom_validator :
A custom function that will be called with the CalibrationInfo object of each potential match.
This needs to load the json file of each match and could cause performance issues with many calibration files.
ignore_file_not_found :
If True this function will not raise an error, but rather return an empty list, if no
calibration files were found for the specific sensor_type.
Returns
-------
list_of_cals
List of paths pointing to available calibration objects.
"""
if not folder:
folder = _check_ref_cal_folder()
from imucal.management import find_calibration_info_for_sensor # noqa: F401
return find_calibration_info_for_sensor(
sensor_id=sensor_id,
folder=folder,
recursive=recursive,
filter_cal_type=filter_cal_type,
custom_validator=custom_validator,
ignore_file_not_found=ignore_file_not_found,
) | 3,310 |
def reward_displacement(navenv):
""" Reward = distance to previous position"""
r = dist(navenv.current_pos, navenv.old_pos)
return r | 3,311 |
def MakeLinuxFirmware(save=True, **kwargs):
"""Create and return a LinuxFirmware for test."""
defaults = {
'manufacturer': 'Lonovo',
'serial': 'blah',
'password': '123456789',
'machine_uuid': str(uuid.uuid4()).upper(),
'owner': 'someone',
'asset_tags': ['12345'],
'hostname': 'zerocool.example.com',
}
defaults.update(kwargs)
entity = firmware.LinuxFirmwarePassword(**defaults)
if save:
entity.put()
return entity | 3,312 |
def get_unity_filesystem_parameters():
"""This method provide parameters required for the ansible filesystem
module on Unity"""
return dict(
filesystem_name=dict(required=False, type='str'),
filesystem_id=dict(required=False, type='str'),
nas_server_name=dict(required=False, type='str'),
nas_server_id=dict(required=False, type='str'),
description=dict(required=False, type='str'),
pool_name=dict(required=False, type='str'),
pool_id=dict(required=False, type='str'),
size=dict(required=False, type='int'),
cap_unit=dict(required=False, type='str', choices=['GB', 'TB']),
is_thin=dict(required=False, type='bool'),
data_reduction=dict(required=False, type='bool'),
supported_protocols=dict(required=False, type='str',
choices=['NFS', 'CIFS', 'MULTIPROTOCOL']),
smb_properties=dict(type='dict', options=dict(
is_smb_sync_writes_enabled=dict(type='bool'),
is_smb_notify_on_access_enabled=dict(type='bool'),
is_smb_op_locks_enabled=dict(type='bool'),
is_smb_notify_on_write_enabled=dict(type='bool'),
smb_notify_on_change_dir_depth=dict(type='int')
)),
access_policy=dict(required=False, type='str',
choices=['NATIVE', 'UNIX', 'WINDOWS']),
locking_policy=dict(required=False, type='str',
choices=['ADVISORY', 'MANDATORY']),
tiering_policy=dict(required=False, type='str', choices=[
'AUTOTIER_HIGH', 'AUTOTIER', 'HIGHEST', 'LOWEST']),
snap_schedule_name=dict(required=False, type='str'),
snap_schedule_id=dict(required=False, type='str'),
quota_config=dict(required=False, type='dict', options=dict(
grace_period=dict(required=False, type='int'),
grace_period_unit=dict(required=False, type='str', choices=['minutes', 'hours', 'days']),
default_hard_limit=dict(required=False, type='int'),
default_soft_limit=dict(required=False, type='int'),
is_user_quota_enabled=dict(required=False, type='bool'),
quota_policy=dict(required=False, type='str', choices=['FILE_SIZE', 'BLOCKS']),
cap_unit=dict(required=False, type='str', choices=['MB', 'GB', 'TB']),
), mutually_exclusive=[['is_user_quota_enabled', 'quota_policy']]),
state=dict(required=True, type='str', choices=['present', 'absent'])
) | 3,313 |
def get_time_difference(row, start_col, end_col, start_format, end_format, unit='days'):
"""
Returns a Series object of days
Unit can be D for Days, or Y for Years
"""
start_date = row[start_col]
end_date = row[end_col]
if pd.isnull(start_date) or pd.isnull(end_date):
return np.nan
else:
time_delta = get_time_delta(start_date, end_date, start_format, end_format)
if unit == 'days':
return time_delta.days
elif unit == 'years':
return float(time_delta.days)/365 | 3,314 |
def main(config):
"""Initialize model, data loaders and trainer based on config file and run training."""
model = config.init_model()
data_loaders = config.init_data_loaders(model)
trainer = config.init_trainer(model, data_loaders["train"], data_loaders["dev"])
trainer.train() | 3,315 |
def get_account(account_name, password):
"""Displays account data from the wallet.
--- Definitions ---
{"name": "account_name", "prompt": "Alias of account", "default": "Myaccount"}
{"name": "password", "prompt": "Password to decrypt private key", "default": "Mypassword"}
"""
db = get_wallet_db()
account = db.execute(
'SELECT * FROM testaccount WHERE name = ?', (account_name,)
).fetchone()
if account is None:
return None
private_key = Account.decrypt(account["privatekey"], password)
acc = Account.from_key(private_key)
return acc | 3,316 |
def invert(img):
"""
Function to invert colors of an image
"""
r, g, b, a = colorsys_getRGBA(img) # Get r, g, b, a
r, g, b = 255 - r, 255 - g, 255 - b # Invert all colors
img_arr = np.dstack((r, g, b, a))
return img_arr | 3,317 |
def spearman_kendall_test(df, item, alpha=0.05, increasing=True,
rank_in='Rank',
category_in='category',
dataset_in='dataset',
userid_in='userid'
):
"""
Do spearman's and kendall's test for the increasing or decreasing trend.
:param df: dataframe, it should include both column 'item' and column 'ranking'
:param item: string, column of target's label
:param rank_in:string, column of rank's label
:param category_in: string, column of category's label
:param userid_in: string, column of userid's label
:param dataset_in: string, column of dataset's label
:param alpha: significant level
:param increasing: bool, test for increasing trend or decreasing trend
:return: dataframe filled in all test results
"""
category = sorted(list(set(df[category_in].tolist())))
dataset = sorted(list(set(df[dataset_in].tolist())))
test_result = []
for ds in dataset:
for cat in category:
count_sm, count_kd = 0, 0
df_temp = df[(df[dataset_in] == ds) & (df[category_in] == cat)]
ur_ds = df_temp[userid_in].unique().tolist()
for user in ur_ds:
rank = df_temp[df_temp[userid_in] == user][rank_in].tolist()
item_specify = df_temp[df_temp[userid_in] == user][item].tolist()
coef_sm, p_sm = spearmanr(rank, item_specify)
coef_kd, p_kd = kendalltau(rank, item_specify)
if increasing:
if (coef_sm > 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd > 0) & (p_kd < alpha):
count_kd += 1
else:
if (coef_sm < 0) & (p_sm < alpha):
count_sm += 1
if (coef_kd < 0) & (p_kd < alpha):
count_kd += 1
test_result.append([ds, cat,
count_sm, count_sm / len(ur_ds),
count_kd, count_kd / len(ur_ds),
len(ur_ds)]
)
stats_test = pd.DataFrame(test_result, columns=[dataset_in,
category_in,
'SpN', 'SpP', 'Kn', 'Kp',
'total']
).sort_values([dataset_in, category_in])
return stats_test | 3,318 |
def test_restore_from_minimized(session):
"""
12. If the visibility state of the top-level browsing context's
active document is hidden, restore the window.
[...]
To restore the window, given an operating system level window with
an associated top-level browsing context, run implementation-specific
steps to restore or unhide the window to the visible screen. Do not
return from this operation until the visibility state of the top-level
browsing context's active document has reached the visible state,
or until the operation times out.
"""
session.window.minimize()
assert session.execute_script("return document.hidden") is True
response = set_window_rect(session, {"width": 450, "height": 450})
value = assert_success(response)
assert value["width"] == 450
assert value["height"] == 450
assert session.execute_script("return document.hidden") is False | 3,319 |
def circle(*args, **kwargs):
"""
The circle command creates a circle or partial circle (arc).
Returns: `string[]` Object name and node name
"""
pass | 3,320 |
def array2string(array, _depth=0):
"""
Recursively create a initializer list style string from an iterable with
multiple dimensions.
Args:
array (iterable): input iterable which is expected to have elements that
can be converted to strings with `str()`.
_depth (int): variable tracking the current recursion depth
"""
if hasattr(array, 'name'):
return array.name
elif not hasattr(array, '__len__'):
return float_nsf(array)
else:
string = ''
array_len = len(array)
for i in range(array_len):
string += array2string(array[i], _depth=_depth + 1) + ', '
if (array_len > 1) or (_depth == 0) :
return '{' + string[0:-2] + '}'
else:
return string[0:-2] | 3,321 |
def do_appl_error(row_id, text):
"""If the line is a APPL-ERROR then add it to the list."""
if text.find('APPL-ERROR') != -1:
add_appl_error = ApplError(row_id)
appl_list.append(add_appl_error) | 3,322 |
def rail_help_wrapper(prog):
""" So formatter_class's max_help_position can be changed. """
return RailHelpFormatter(prog, max_help_position=40) | 3,323 |
def evaluate(args):
"""Function to predict for a single image or folder of images"""
val_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
path_file=args.val_file,
is_train=False,
config=args)
val_loader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False)
print(f"-> Loading model from {args.pretrained_weights}")
encoder_path = os.path.join(args.pretrained_weights, "encoder.pth")
semantic_decoder_path = os.path.join(args.pretrained_weights, "semantic.pth")
print("=> Loading pretrained encoder")
# --- Init model ---
encoder = ResnetEncoder(num_layers=args.network_layers, pretrained=False).to(args.device)
loaded_dict_enc = torch.load(encoder_path, map_location=args.device)
filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}
encoder.load_state_dict(filtered_dict_enc)
encoder.eval()
print("=> Loading pretrained decoder")
decoder = SemanticDecoder(encoder.num_ch_enc, n_classes=args.semantic_num_classes).to(args.device)
loaded_dict = torch.load(semantic_decoder_path, map_location=args.device)
decoder.load_state_dict(loaded_dict)
decoder.eval()
metric = utils.IoU(args.semantic_num_classes, args.dataset, ignore_index=None)
acc_meter = AverageMeter()
for inputs in tqdm(val_loader):
for key, ipt in inputs.items():
inputs[key] = ipt.to(args.device)
features = encoder(inputs["color", 0, 0])
outputs = decoder(features)
_, predictions = torch.max(outputs["semantic", 0].data, 1)
metric.add(predictions, inputs["semantic_labels", 0, 0])
acc, pix = AverageMeter.accuracy(predictions, inputs["semantic_labels", 0, 0])
acc_meter.update(acc, pix)
class_iou, mean_iou = metric.value()
print(f"Mean_IoU: {mean_iou}")
for k, v in class_iou.items():
print(f"{k}: {v:.3f}")
print(f"Accuracy: {acc_meter.average() * 100}") | 3,324 |
def main(path_config_yaml):
"""
フォルダを指定して全体の処理をやる
"""
with open(path_config_yaml, 'r') as fy:
config = yaml.load(fy, Loader=yaml.FullLoader)
out_dir = expanduser(config['out_dir'])
full_align_round_seg_files = natsorted(glob(f'{out_dir}/full_align_round_seg/*.lab'))
full_score_round_seg_files = natsorted(glob(f'{out_dir}/full_score_round_seg/*.lab'))
wav_files = natsorted(glob(f'{out_dir}/wav/*.wav', recursive=True))
# フルラベルをtimelag用のフォルダに保存する。
print('Preparing data for time-lag models')
timelag_dir = f'{out_dir}/timelag'
prepare_data_for_timelag_models(full_align_round_seg_files,
full_score_round_seg_files, timelag_dir)
# フルラベルのオフセット修正をして、duration用のフォルダに保存する。
print('Preparing data for acoustic models')
duration_dir = f'{out_dir}/duration'
prepare_data_for_duration_models(full_align_round_seg_files, duration_dir)
# フルラベルのオフセット修正をして、acoustic用のフォルダに保存する。
# wavファイルをlabファイルのセグメントに合わせて切断
# wavファイルの前後にどのくらい余白を設けるか
print('Preparing data for acoustic models')
acoustic_dir = f'{out_dir}/acoustic'
prepare_data_for_acoustic_models(full_align_round_seg_files,
full_score_round_seg_files,
wav_files, acoustic_dir) | 3,325 |
def project_dashboard(request):
"""
The function calling Project Dashboard page.
:param request:
:return:
"""
global all_vuln, \
total_web, \
all_high, \
total_network, \
all_medium, \
all_low, \
all_web_high, \
all_web_medium, \
all_network_medium, \
all_web_low, \
all_network_low, \
all_network_high
all_project = project_db.objects.all()
return render(request,
'project_dashboard.html',
{'all_project': all_project}) | 3,326 |
def getSelfRole(store):
"""
Retrieve the Role which corresponds to the user to whom the given store
belongs.
"""
return getAccountRole(store, userbase.getAccountNames(store)) | 3,327 |
def warnings_to_stdout():
""" Redirect all warnings to stdout.
"""
showwarning_orig = warnings.showwarning
def showwarning(msg, cat, fname, lno, file=None, line=0):
showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
warnings.showwarning = showwarning
# warnings.simplefilter('always')
| 3,328 |
def forwardCOMDQ(robot, m = 0, symbolic = False):
"""
Using Dual Quaternions, this function computes forward kinematics to m - th center of mass, given joints positions in radians. Robot's kinematic parameters have to be set before using this function
robot: object (robot.jointsPositions, robot.linksLengths)
m: int
"""
framesDQ, fkDQ = forwardDQ(robot, m = m, symbolic = symbolic)
# Initial conditions
framesCOMDQ = [np.array([[1], [0], [0], [0], [0], [0], [0], [0]]) if not symbolic else Matrix([1, 0, 0, 0, 0, 0, 0, 0])]
# Gets Denavit - Hartenberg Matrix
if not symbolic:
if not robot.dhParametersCOM:
comDH = dh.centersOfMass(robot)
else:
comDH = np.array(robot.dhParameters([float(q) for q in robot.jointsPositions], [float(Lcom) for Lcom in robot.centersOfMass]))
else:
comDH = robot.symbolicDHParametersCOM
i = 1
for frame in comDH[1 : , :]:
if i > m:
break
else:
if not symbolic:
# Center of Mass Homogeneous Transformation Matrix
COM = dq.leftOperator(dq.Rz(frame[0])).dot(dq.rightOperator(dq.Rx(frame[3]))).dot(dq.rightOperator(dq.Tx(frame[2]))).dot(dq.Tz(frame[1]))
# Rigid body's Dual Quaternion
B = dq.leftOperator(dq.conjugate(framesDQ[i - 1])).dot(framesDQ[i])
# Forward kinematics to Center of Mass
fkCOMDQ = dq.leftOperator(framesDQ[i]).dot(dq.rightOperator(COM)).dot(dq.conjugate(B))
else:
# Center of Mass Homogeneous Transformation Matrix
COM = dq.symbolicLeftOperator(dq.symbolicRz(frame[0])) * dq.symbolicRightOperator(dq.symbolicRx(frame[3])) * dq.symbolicRightOperator(dq.symbolicTx(frame[2])) * dq.symbolicTz(frame[1])
# Rigid body's Dual Quaternion
B = dq.symbolicLeftOperator(dq.symbolicConjugate(framesDQ[i - 1])) * framesDQ[i]
# Forward kinematics to Center of Mass
fkCOMDQ = nsimplify(simplify(dq.symbolicLeftOperator(framesDQ[i]) * dq.symbolicRightOperator(COM) * dq.symbolicConjugate(B)), tolerance = 1e-10, rational = False)
framesCOMDQ.append(fkCOMDQ)
i += 1
return framesCOMDQ, fkCOMDQ | 3,329 |
def gen_mode():
"""获取玩家想要考试的模式"""
while True:
mode = input("如何考试?\n输入1顺序考试\n输入2乱序考试\n>>")
if mode in ("1", "2"):
return mode
else:
print()
print("非法输入,请输入\"1\"或\"2\"")
print("你不需要输入双引号")
print("--------------------------------") | 3,330 |
def find_movers(threshold, timeframe: Timeframe, increasing=True, decreasing=False, max_price=None):
"""
Return a dataframe with row index set to ASX ticker symbols and the only column set to
the sum over all desired dates for percentage change in the stock price. A negative sum
implies a decrease, positive an increase in price over the observation period.
"""
assert threshold >= 0.0
# NB: missing values will be imputed here, for now.
cip = company_prices(all_stocks(), timeframe, fields="change_in_percent", missing_cb=None)
movements = cip.sum(axis=0)
results = movements[movements.abs() >= threshold]
print("Found {} movers before filtering: {} {}".format(len(results), increasing, decreasing))
if not increasing:
results = results.drop(results[results > 0.0].index)
if not decreasing:
results = results.drop(results[results < 0.0].index)
#print(results)
if max_price is not None:
ymd = latest_quotation_date('ANZ')
stocks_lte_max_price = [q.asx_code for q in valid_quotes_only(ymd) if q.last_price <= max_price]
results = results.filter(stocks_lte_max_price)
print("Reporting {} movers after filtering".format(len(results)))
return results | 3,331 |
def create_pl_pruning_callback(*args, **kwargs):
"""Create PyTorchLightning Pruning Callback. Optuna Only."""
from bigdl.nano.deps.automl.hpo_api import create_optuna_pl_pruning_callback
return create_optuna_pl_pruning_callback(*args, **kwargs) | 3,332 |
def get_all_token_volume_by_direction(chain:str, direction:str):
"""
chain: Allowed: ethereum ┃ avalanche ┃ bsc ┃ polygon ┃ arbitrum ┃ fantom ┃ harmony ┃ boba ┃ optimism ┃ moonriver ┃ aurora
direction: Allowed: in ┃ out
"""
chain = chain.lower()
direction = direction.lower()
chains = ["ethereum", "avalanche", "bsc", "polygon", "arbitrum", "fantom", "harmony", "boba", "optimism", "moonriver", "aurora"]
directions = ["in", "out"]
params_ok = False
if chain in chains and direction in directions:
params_ok = True
if params_ok:
endpoint = f"{server}/api/v1/analytics/volume/{chain}/{direction}"
data = requests.get(endpoint)
if data.status_code == 200:
return data.json()["data"]
else:
print("request failed")
else:
print("wrong parameters") | 3,333 |
def _interactively_fix_missing_variables(project, result):
"""Return True if we need to re-prepare."""
if project.problems:
return False
if not console_utils.stdin_is_interactive():
return False
# We don't ask the user to manually enter CONDA_PREFIX
# (CondaEnvRequirement) because it's a bizarre/confusing
# thing to ask.
can_ask_about = [status
for status in result.statuses
if (not status and isinstance(status.requirement, EnvVarRequirement) and not isinstance(
status.requirement, CondaEnvRequirement))]
if can_ask_about:
print("(Use Ctrl+C to quit.)")
start_over = False
values = dict()
for status in can_ask_about:
reply = console_utils.console_input("Value for " + status.requirement.env_var + ": ",
encrypted=status.requirement.encrypted)
if reply is None:
return False # EOF
reply = reply.strip()
if reply == '':
start_over = True
break
values[status.requirement.env_var] = reply
if len(values) > 0:
status = project_ops.set_variables(project, result.env_spec_name, values.items(), result)
if status:
return True
else:
console_utils.print_status_errors(status)
return False
else:
return start_over | 3,334 |
def build_node_descr(levels, switch=False):
"""
Produces a node description of the above binary trees
"""
num_parents = sum([2**i for i in range(levels-1)])
parents, children = tee(character_iterator(switch))
next(children)
node_descr = []
for parent_ident in islice(parents, num_parents):
node_descr.append((parent_ident, next(children), "L"))
node_descr.append((parent_ident, next(children), "R"))
return node_descr | 3,335 |
def save_json_in_s3(json_data: dict, key: str, bucket=DEFAULT_S3_BUCKET):
"""
Saves the given JSON data as bytes using the given key.
"""
s3.put_object(
Bucket=bucket,
Key=key,
# convert JSON dict to a byte string:
Body=json.dumps(json_data).encode()
) | 3,336 |
def random_sources(xSize, ySize, zSize, number):
""" returns a list of random positions in the grid where the sources of nutrients (blood vessels) will be """
src = []
for _ in range(number):
x = random.randint(0, xSize-1)
y = random.randint(0, ySize-1)
z = random.randint(0, zSize-1)
if (x, y, z) not in src:
src.append((x,y,z))
return src | 3,337 |
def sqeuclidean(
x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.reduce_sum(
(tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2
).numpy() | 3,338 |
def get(*, db_session, tag_id: int) -> Optional[Tag]:
"""Gets a tag by its id."""
return db_session.query(Tag).filter(Tag.id == tag_id).one_or_none() | 3,339 |
def plot_lines(axes, xdata, ydata, yerrors=None, cdata=None, cmap=None, line_spec='-o', *args, **kwargs):
"""
Plot lines on given matplotlib axes subplot
Uses matplotlib.plot or matplotlib.errorbar if yerrors is not None
:param axes: matplotlib figure or subplot axes, None uses current axes
:param xdata: array[n] data on x axis
:param ydata: list[n] of array[n] data on y axis
:param yerrors: list[m] of array[n] errors on y axis (or None)
:param cdata: list[n] of values to define line colour
:param cmap: name of colormap to generate colour variation in lines
:param line_spec: str or list[m] of str matplotlib.plot line_spec
:param args: additional arguments
:param kwargs: additional arguments
:return: output of plt.plot [line], or plt.errorbar [line, xerrors, yerrors]
"""
if axes is None:
axes = plt.gca()
nplots = len(ydata)
if xdata is None:
xdata = [range(len(y)) for y in ydata]
elif len(xdata) != nplots:
xdata = [xdata] * nplots
if yerrors is None:
yerrors = [None] * nplots
elif len(yerrors) != nplots:
yerrors = [yerrors] * nplots
if cmap is None:
cmap = 'viridis'
if cdata is None:
cdata = np.arange(nplots)
else:
cdata = np.asarray(cdata)
cnorm = cdata - cdata.min()
cnorm = cnorm / cnorm.max()
cols = plt.get_cmap(cmap)(cnorm)
line_spec = fn.liststr(line_spec)
if len(line_spec) != nplots:
line_spec = line_spec * nplots
print(axes)
print(len(xdata), xdata)
print(len(ydata), ydata)
print(len(yerrors), yerrors)
print(len(line_spec), line_spec)
print(len(cols), cols)
lines = []
for n in range(nplots):
lines += plot_line(axes, xdata[n], ydata[n], yerrors[n], line_spec[n], c=cols[n], *args, **kwargs)
return lines | 3,340 |
def conv_binary_prevent_overflow(array, structure):
"""
Make sure structure array has great enough positive bitdepth
to be convolved with binary primary array.
Parameters
----------
array : ndarray of bool or int, 2D
Primary integer array to convolve.
Must be a binary array of only zero/False and one/True.
structure : ndarray of bool or int, 2D
Secondary, smaller integer array to convolve with `array`.
Must be a binary array of only zero/False and one/True.
Returns
-------
structure : ndarray, possible uint cast of `structure`
Either the same `structure` array or a cast or `structure`
to a uint data type with more positive bitdepth than the
input array.
"""
# Get upper bound on minimum positive bitdepth for convolution.
conv_bitdepth_pos = math.log(np.prod(structure.shape)+1, 2)
dtype_bitdepths_pos = (1, 7, 8, 15, 16, 31, 32, 63, 64)
for b in dtype_bitdepths_pos:
if conv_bitdepth_pos <= b:
conv_bitdepth_pos = b
break
# Parse input array and structure data type for bitdepth.
input_bitdepth_pos = 0
for arr in (array, structure):
arr_dtype = arr.dtype
if arr.dtype == np.bool:
arr_posbits = 1
elif np.issubdtype(arr_dtype, np.int):
arr_posbits = int(str(arr.dtype).replace('int', '')) - 1
elif np.issubdtype(arr_dtype, np.uint):
arr_posbits = int(str(arr.dtype).replace('uint', ''))
elif np.issubdtype(arr_dtype, np.floating):
arr_posbits = np.inf
else:
arr_posbits = 0
input_bitdepth_pos = max(input_bitdepth_pos, arr_posbits)
if input_bitdepth_pos == 0:
# Handle unknown data type by casting structure to
# maximum possible bitdepth.
structure = structure.astype(np.uint64)
else:
# If maximum positive bitdepth from inputs is too low,
# cast structure to minimum positive bitdepth for conovlution.
if input_bitdepth_pos < conv_bitdepth_pos:
if (conv_bitdepth_pos % 2) != 0:
conv_bitdepth_pos += 1
structure = structure.astype(eval('np.uint{}'.format(conv_bitdepth_pos)))
return structure | 3,341 |
def filter_freq_and_csq(mt: hl.MatrixTable, data_type: str, max_freq: float, least_consequence: str):
"""
Filters MatrixTable to include variants that:
1. Have a global AF <= `max_freq`
2. Have a consequence at least as severe as `least_consequence` (based on ordering from CSQ_ORDER)
:param MatrixTable mt: Input MT
:param str data_type: One of 'exomes' or 'genomes'
:param float max_freq: Max. AF to keep
:param str least_consequence: Least consequence to keep.
:return: Filtered MT
:rtype: MatrixTable
"""
vep_ht = hl.read_table(annotations_ht_path(data_type, 'vep'))
freq = hl.read_table(annotations_ht_path(data_type, 'frequencies'))
mt = mt.select_rows(
vep=vep_genes_expr(vep_ht[mt.row_key].vep, least_consequence),
af=hl.float32(freq[mt.row_key].freq[0].AF)
)
mt = mt.filter_rows(hl.is_defined(mt.vep) & (hl.len(mt.vep) > 0) & (mt.af > 0) & (mt.af <= max_freq))
mt = mt.explode_rows(mt.vep)
mt = mt.rename({'vep': 'gene_id'})
return mt | 3,342 |
def get_regular_intervals(
pre_sfes: list,
post_sfes: list,
pre_keep_flag: bool,
post_keep_flag: bool,
) -> list:
"""
Calculates the intervals for the "regular" egg laying epoch. If pre_keep_flag,
the "regular" epoch is the pre-breakpoint region. If post_keep_flag, the
"regular" epoch is the post-breakpoint region. If both flags are True,
the whole egg-laying trajectory is considered "regular".
Args:
pre_sfes (list): list of pre region SFES
post_sfes (list): list of post region SFES
pre_keep_flag (bool): True if the pre region intervals are considered regular
post_keep_flag (bool): True if the post region intervals are considered regular
Returns:
A list of intervals considered regular
"""
reg_intervals = []
if pre_keep_flag:
pre_sfes_sec = [(x * 60 * 60) for x in pre_sfes]
pre_intervals = np.diff(pre_sfes_sec, n=1)
pre_intervals = normalize_tiny_intervals(pre_intervals)
reg_intervals.extend(pre_intervals)
if post_keep_flag:
post_sfes_sec = [(x * 60 * 60) for x in post_sfes]
post_intervals = np.diff(post_sfes_sec, n=1)
post_intervals = normalize_tiny_intervals(post_intervals)
reg_intervals.extend(post_intervals)
return reg_intervals | 3,343 |
def plot(data: np.ndarray):
"""
Convenience function for plotting radar data.
"""
fig = pl.figure(figsize=(10, 8))
wrl.vis.plot_ppi(data, fig=fig, proj="cg") | 3,344 |
async def async_setup(hass, config):
"""Platform setup, do nothing."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config[DOMAIN])
)
)
return True | 3,345 |
def get_new_bucket(target=None, name=None, headers=None):
"""
Get a bucket that exists and is empty.
Always recreates a bucket from scratch. This is useful to also
reset ACLs and such.
"""
if target is None:
target = targets.main.default
connection = target.connection
if name is None:
name = get_new_bucket_name()
# the only way for this to fail with a pre-existing bucket is if
# someone raced us between setup nuke_prefixed_buckets and here;
# ignore that as astronomically unlikely
bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
return bucket | 3,346 |
def update_markdown(path: Union[str, pathlib.Path]) -> None:
"""Update the given markdown file."""
with open(path) as file:
lines = [line.rstrip("\n") for line in file]
assert lines[0] == "---"
idx = min(i for i, line in enumerate(lines[1:], start=1) if line == "---")
# Load the data like it is YAML
data = yaml.safe_load(StringIO("\n".join(lines[1:idx])))
repository = get_repository(data)
with open(path, "w") as file:
print("---", file=file)
for line in lines[1:idx]:
print(line, file=file)
if repository:
print("repository:", repository, file=file)
print("---", file=file)
for line in lines[idx + 1 :]:
print(line, file=file) | 3,347 |
def angle_rms(ang, axis=None, period=2*np.pi):
"""returns the rms of angles, uses the property that rms(x)**2 = mean(x)**2 + std(x)**2"""
#rms(x)**2 = mean(x)**2 + std(x)**2
#sqrt(E[X**2]) = E[X]**2 + sqrt(E[(X - E[X])**2])
m,s = angle_mean_std(ang,axis,period)
return np.hypot(m, s) | 3,348 |
def check_units(*units_by_pos, **units_by_name):
"""Create a decorator to check units of function arguments."""
def dec(func):
# Match the signature of the function to the arguments given to the decorator
sig = signature(func)
bound_units = sig.bind_partial(*units_by_pos, **units_by_name)
# Convert our specified dimensionality (e.g. "[pressure]") to one used by
# pint directly (e.g. "[mass] / [length] / [time]**2). This is for both efficiency
# reasons and to ensure that problems with the decorator are caught at import,
# rather than runtime.
dims = {name: (orig, units.get_dimensionality(orig.replace('dimensionless', '')))
for name, orig in bound_units.arguments.items()}
defaults = {name: sig.parameters[name].default for name in sig.parameters
if sig.parameters[name].default is not Parameter.empty}
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Match all passed in value to their proper arguments so we can check units
bound_args = sig.bind(*args, **kwargs)
bad = list(_check_argument_units(bound_args.arguments, defaults, dims))
# If there are any bad units, emit a proper error message making it clear
# what went wrong.
if bad:
msg = f'`{func.__name__}` given arguments with incorrect units: '
msg += ', '.join(f'`{arg}` requires "{req}" but given "{given}"'
for arg, given, req in bad)
if 'none' in msg:
msg += ('\nAny variable `x` can be assigned a unit as follows:\n'
' from metpy.units import units\n'
' x = x * units.meter / units.second')
raise ValueError(msg)
return func(*args, **kwargs)
return wrapper
return dec | 3,349 |
def check_nifti_dim(fname, data, dim=4):
"""
Remove extra dimensions.
Parameters
----------
fname : str
The name of the file representing `data`
data : np.ndarray
The data which dimensionality needs to be checked
dim : int, optional
The amount of dimensions expected/desired in the data.
Returns
-------
np.ndarray
If `len(data.shape)` = `dim`, returns data.
If `len(data.shape)` > `dim`, returns a version of data without the
dimensions above `dim`.
Raises
------
ValueError
If `data` has less dimensions than `dim`
"""
if len(data.shape) < dim:
raise ValueError(f'{fname} does not seem to be a {dim}D file. '
f'Plase provide a {dim}D nifti file.')
if len(data.shape) > dim:
LGR.warning(f'{fname} has more than {dim} dimensions. Removing D > {dim}.')
for ax in range(dim, len(data.shape)):
data = np.delete(data, np.s_[1:], axis=ax)
return np.squeeze(data) | 3,350 |
def bar_chart(x_data=None, y_data=None, title="Chart Title", x_label=None, y_label=None,
color="blue", figsize=(10,5)):
"""
This function requires two Pandas data series for x and y data.
Optionally: the x label, y label, color, title, and size may be set.
This function returns a bar chart with the specified parameters.
"""
if x_data is None or y_data is None:
print("No data passed.")
return None
if x_label is None:
x_label = x_data.name
if y_label is None:
y_label = y_data.name
fig = plt.figure(figsize=figsize) #Sets size of the bar chart.
plt.bar(x_data, y_data, color=color) #Plots x and y and set the color.
plt.title(title) #Sets title of the chart.
plt.xlabel(x_label) #Sets x-axis label.
plt.ylabel(y_label) #Sets y-axis label.
plt.xticks(x_data, rotation='45') #Setting x-tick labels and rotating 45 degrees.
return plt | 3,351 |
def get_qtobject_for_uipath(pathstr):
""" Returns the QtObject for a Maya UI path.
Ensure that the path starts from the Maya main window and that there are no \
empty elements in it as this will fail.
"""
split_pathstr = pathstr.split("|")
return _find_qobject(get_maya_main_window(), split_pathstr) | 3,352 |
def parse_study_with_run(soup):
"""Given a BeautifulSoup object representing a study, parse out relevant
information.
:param soup: a BeautifulSoup object representing a study
:type soup: bs4.BeautifulSoup
:return: a dictionary containing study information and run information
:rtype: dict
"""
accession = soup.find('PRIMARY_ID', text=PROJECT_PARSER).text
title = soup.find('STUDY_TITLE').text
abstract = soup.find('STUDY_ABSTRACT').text
# Returns all of the runs associated with a study
runs = []
run_parsed = soup.find('ID', text=RUN_PARSER)
if run_parsed:
run_ranges = run_parsed.text.split(",")
for run_range in run_ranges:
if '-' in run_range:
runs += parse_run_range(run_range)
else:
runs.append(run_range)
else:
logger.warning(
'Failed to parse run information from ENA XML. Falling back to '
'ENA search...'
)
# Sometimes the SRP does not contain a list of runs (for whatever reason).
# A common trend with such projects is that they use ArrayExpress.
# In the case that no runs could be found from the project XML,
# fallback to ENA SEARCH.
runs = search_ena_study_runs(accession)
return {
'accession': accession,
'title': title,
'abstract': abstract,
'runlist': runs
} | 3,353 |
def create_embed(**kwargs) -> Embed:
"""Creates a discord embed object."""
embed_type = kwargs.get('type', Embed.Empty)
title = kwargs.get('title', Embed.Empty)
description = kwargs.get('description', Embed.Empty)
color = kwargs.get('color', get_default_color())
timestamp = kwargs.get('timestamp', Embed.Empty)
url = kwargs.get('url', Embed.Empty)
return Embed(
type=embed_type,
title=title,
description=description,
url=url,
color=color,
timestamp=timestamp
) | 3,354 |
def validate_commit(commit: Commit, out_errors: List[str] = None, ignore_validators: List[str] = None) -> bool:
"""Validates a commit against all validators
:param commit: The commit to validate
:param out_errors: if not None, will populate with the list of errors given by the validators
:param ignore_validators: Optional list of CommitValidator classes to ignore, by class name
:return: True if there are no validation errors, and False otherwise
"""
failed_count = 0
passed_count = 0
start_time = time.time()
# Find all the validators in the validators package (recursively)
validator_classes = []
validators_dir = os.path.join(os.path.dirname(__file__), 'validators')
for _, module_name, is_package in pkgutil.iter_modules([validators_dir]):
if not is_package:
module = importlib.import_module('commit_validation.validators.' + module_name)
validator = module.get_validator()
if ignore_validators and validator.__name__ in ignore_validators:
print(f"Disabled validation for '{validator.__name__}'")
else:
validator_classes.append(validator)
error_summary = {}
# Process validators
for validator_class in validator_classes:
validator = validator_class()
validator_name = validator.__class__.__name__
error_list = []
passed = validator.run(commit, errors = error_list)
if passed:
passed_count += 1
print(f'{validator.__class__.__name__} PASSED')
else:
failed_count += 1
print(f'{validator.__class__.__name__} FAILED')
error_summary[validator_name] = error_list
end_time = time.time()
if failed_count:
print("VALIDATION FAILURE SUMMARY")
for val_name in error_summary.keys():
errors = error_summary[val_name]
if errors:
for error_message in errors:
first_line = True
for line in error_message.splitlines():
if first_line:
first_line = False
print(f'VALIDATOR_FAILED: {val_name} {line}')
else:
print(f' {line}') # extra detail lines do not need machine parsing
stats_strs = []
if failed_count > 0:
stats_strs.append(f'{failed_count} failed')
if passed_count > 0:
stats_strs.append(f'{passed_count} passed')
stats_str = ', '.join(stats_strs) + f' in {end_time - start_time:.2f}s'
print()
print(stats_str)
return failed_count == 0 | 3,355 |
def test_examples(example_file):
"""Loop through all example files, verify that the ast_parser can parse the file.
Examples located at: ``openqasm/examples``.
"""
with open(example_file) as f:
source = f.read()
openqasm3.parse(source) | 3,356 |
def _safe_types(
*, test_data: Any, cached_data: Any, key_rules: List[KeyRule],
) -> Dict:
"""Convert data and key_rules to safe data types for diffing.
Args:
test_data: data to compare
cached_data: data to compare
key_rules: list of key rules to apply
Returns:
Dict: safe keyword args for diff_with_rules
"""
wrapped_key_rules = []
for key_rule in key_rules:
if isinstance(cached_data, list):
key_rule.pattern = [_WRAP_KEY] + key_rule.pattern
wrapped_key_rules.append(key_rule)
return {
'old_dict': _wrap_data(cached_data),
'new_dict': _wrap_data(test_data),
'key_rules': wrapped_key_rules,
} | 3,357 |
def _flatten_output(attr_dict, skip: list=[]):
"""
flaten output dict node
node_collection is a list to accumulate the nodes that not unfolded
:param skip: is a list of keys (format with parent_key.key) of Dict name that
will not collected into the json file.
For output nodes not being expanded, write down the uuid and datatype for future query.
"""
# do_not_unfold = ["band_parameters", "scf_parameters", "seekpath_parameters"]
for key, value in attr_dict.items():
if key in skip:
continue
if isinstance(value, AttributeDict):
# keep on unfold if it is a namespace
_flatten_output(value, skip)
elif isinstance(value, orm.Dict):
attr_dict[key] = value.get_dict()
elif isinstance(value, orm.Int):
attr_dict[key] = value.value
else:
# node type not handled attach uuid
attr_dict[key] = {
'uuid': value.uuid,
'datatype': type(value),
}
# print(archive_uuids)
return attr_dict | 3,358 |
def validate_threatbus_config(config: Settings):
"""
Validates the given Dynaconf object, potentially adding new entries for the default values.
Throws if the config is invalid.
"""
validators = [
Validator("logging.console", is_type_of=bool, required=True, default=True),
Validator("logging.file", is_type_of=bool, required=True, default=False),
Validator(
"logging.console_verbosity",
is_in=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="INFO",
),
Validator(
"logging.file_verbosity",
is_in=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
default="INFO",
),
Validator(
"logging.filename",
required=True,
when=Validator("logging.file", eq=True, default="threatbus.log"),
),
Validator("plugins.apps", "plugins.backbones", required=True),
]
config.validators.register(*validators)
config.validators.validate() | 3,359 |
def get_tpu_estimator(
working_dir,
model_fn,
iterations_per_loop=320,
keep_checkpoint_max=20,
use_tpu=False,
train_batch_size=64):
"""Obtain an TPU estimator from a directory.
Args:
working_dir: the directory for holding checkpoints.
model_fn: an estimator model function.
iterations_per_loop: number of steps to run on TPU before outfeeding
metrics to the CPU. If the number of iterations in the loop would exceed
the number of train steps, the loop will exit before reaching
--iterations_per_loop. The larger this value is, the higher
the utilization on the TPU. For CPU-only training, this flag is equal to
`num_epochs * num_minibatches`.
keep_checkpoint_max: the maximum number of checkpoints to save in checkpoint
directory.
use_tpu: if True, training happens on TPU.
train_batch_size: minibatch size for training which is equal to total number
of data // number of batches.
Returns:
Returns a TPU estimator.
"""
# If `TPUConfig.per_host_input_for_training` is `True`, `input_fn` is
# invoked per host rather than per core. In this case, a global batch size
# is transformed a per-host batch size in params for `input_fn`,
# but `model_fn` still gets per-core batch size.
run_config = tf.estimator.tpu.RunConfig(
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=working_dir,
save_checkpoints_steps=iterations_per_loop,
save_summary_steps=iterations_per_loop,
keep_checkpoint_max=keep_checkpoint_max,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop,
per_host_input_for_training=True,
tpu_job_name=FLAGS.tpu_job_name))
return tf.estimator.tpu.TPUEstimator(
use_tpu=use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=train_batch_size) | 3,360 |
def rho_err(coeffs, rho, z, density_func):
"""
Returns the difference between the estimated and actual data
"""
soln = density_func(z, coeffs)
return rho - soln | 3,361 |
def ui_form_stations():
"""
This function lists all stations
"""
# get _all_ the stations
stations = station_get(0)
# render stations in HTML template
return render_template("stations.html", result=stations) | 3,362 |
def create_digital_object(obj):
"""
Create a digitial object for a cilantro object in AtoM.
:param Object obj: THE cilantro object
:return: str The generated URI for the digital object
"""
url = f"{atom_uri}/api/digitalobjects"
headers = {'REST-API-Key': atom_api_key,
'Content-Type': 'application/json'}
data = _get_digital_object_data(obj)
json_data = json.dumps(data, indent=4)
log.debug(f"Digital object: {json_data}")
response = requests.post(url, data=json_data, headers=headers)
response.raise_for_status()
return f"{atom_uri}/{response.json()['slug']}" | 3,363 |
def fitcreds():
"""
returns the ['credentials'] dictionary
:return: dictionary or None
"""
return fitcfg().get('credentials', None) | 3,364 |
def brace_expand(str):
"""Perform brace expansion, a lá bash."""
match = re.search('{(.+?)(,.*?)?}', str)
if match:
strings = brace_expand(replace_range(str,
match.start(),
match.end(),
match.group(1)))
if match.group(2):
strings.extend(brace_expand(replace_range(str,
match.start(),
match.end(),
match.group(2)[1:])))
return strings
else: # No braces were in the string.
return [str] | 3,365 |
def create(number):
"""
create() : Add document to Firestore collection with request body.
Ensure you pass a custom ID as part of json body in post request,
e.g. json={'id': '1', 'title': 'Write a blog post'}
"""
try:
id = request.json['id']
todo_ref = user_ref.document(number).collection("todos")
todo_ref.document(id).set(request.json)
all_todos = [doc.to_dict() for doc in todo_ref.stream()]
return jsonify(all_todos), 200
except Exception as e:
return f"An Error Occured: {e}" | 3,366 |
def turn_on(entity_id):
"""
Turn on a switch
Parameters
----------
entity_id : str
"""
call_service('{"entity_id": "' + entity_id + '" }', "switch/turn_on") | 3,367 |
def sort_terms(node, parent_children, hierarchy):
"""Recursively create a list of nodes grouped by category."""
for c in parent_children.get(node, []):
hierarchy.append(c)
sort_terms(c, parent_children, hierarchy)
return hierarchy | 3,368 |
def main() -> None:
""" Make a jazz noise here """
args = get_args()
syllables = args.syllable
song = {
'Do': 'A deer, a female deer',
'Re': 'A drop of golden sun',
'Mi': 'A name I call myself',
'Fa': 'A long long way to run',
'Sol': 'A needle pulling thread',
'La': 'A note to follow sol',
'Ti': 'A drink with jam and bread'
}
for syllable in syllables:
if syllable not in song.keys():
print(f'I don\'t know "{syllable}"')
continue
print(f'{syllable}, {song[syllable]}') | 3,369 |
def sumPm(mirror):
"""Returns sum of all mechanical power from active machines"""
sysPm = 0.0
# for each area
for area in mirror.Area:
# reset current sum
area.cv['Pm'] = 0.0
# sum each active machine Pm to area agent
for mach in area.Machines:
if mach.cv['St'] == 1:
area.cv['Pm'] += mach.cv['Pm']
# sum area agent totals to system
sysPm += area.cv['Pm']
return sysPm | 3,370 |
async def create_tables(conn):
"""initialize db"""
await conn.execute("""
CREATE TABLE IF NOT EXISTS embyusers (
discordid BIGINT,
embyuser TEXT,
primary key (discordid, embyuser)
)
""") | 3,371 |
def cmd_publish(sensor,hours):
"""Publish a sensor"""
if sensor == '':
name = click.promt('Please enter a name for your sensor', type=str)
endpoint = click.promt('Measurement endpoint (e.g. http://10.5.20.6:3001/measurement)', type=str)
price = click.promt('Price per measurement in satoshi', type=int)
mtype = click.promt('Measurement type (e.g. temperature)', type=str)
unit = click.promt('Measurement type (e.g. Kelvin)', type=str)
datatype = click.promt('Data type (e.g. float)', type=str)
sensor = { 'name': name, 'endpoint': endpoint, 'price': price, 'datatype': datatype, 'type': mtype, 'unit':unit}
if isinstance(sensor, str):
sensor = json.loads(sensor)
sensor_keys = list(sensor.keys())
# check if all mandatory keys are set
for key in mandatory_keys:
if key not in sensor_keys:
print(key + " is mandatory.")
raise SystemExit
# check if there are only valid keys
for key in sensor_keys:
if key not in valid_keys:
print(key + " is not allowed.")
raise SystemExit
#sensor['public_key'] = wallet.get_message_signing_public_key()
# Encode to json object
sensor_json = json.dumps(sensor)
url = server_url+'publish?hours={0}'
response = requests.post(url=url.format(hours),data = sensor_json)
click.echo(response.text) | 3,372 |
def chunk(rs, n, column=None):
"""Returns a list of rows in chunks
:param rs: a list of rows
:param n:
- int => returns 3 rows about the same size
- list of ints, [0.3, 0.4, 0.3] => returns 3 rows of 30%, 40$, 30%
- list of nums, [100, 500, 100] => returns 4 rows with break points\
100, 500, 1000, but you must pass the column name\
for the break points like
chunk(rs, [100, 500, 100], 'col')
:param column: column name for break points
:returns: a list of rows
"""
size = len(rs)
if isinstance(n, int):
start = 0
result = []
for i in range(1, n + 1):
end = int((size * i) / n)
# must yield anyway
result.append(rs[start:end])
start = end
return result
# n is a list of percentiles
elif not column:
# then it is a list of percentiles for each chunk
assert sum(n) <= 1, "Sum of percentils for chunks must be <= 1.0"
ns = [int(x * size) for x in accumulate(n)]
result = []
for a, b in zip([0] + ns, ns):
result.append(rs[a:b])
return result
# n is a list of break points
else:
rs.sort(key=lambda r: r[column])
start, end = 0, 0
result = []
for bp in n:
while (rs[end][column] < bp) and end < size:
end += 1
result.append(rs[start:end])
start = end
result.append(rs[end:])
return result | 3,373 |
def get_file_detail(traffic_file):
"""
Args:
traffic_file: a name
Returns:
roadnet_file and flow_file name
"""
phase = None
roadnet_file = None
flow_file = None
for category in TRAFFIC_CATEGORY:
if traffic_file in list(TRAFFIC_CATEGORY[category].keys()):
phase = TRAFFIC_CATEGORY[category][traffic_file][0]
roadnet_file = TRAFFIC_CATEGORY[category][traffic_file][1]
flow_file = TRAFFIC_CATEGORY[category][traffic_file][2]
return phase, roadnet_file, flow_file | 3,374 |
def setup_logging(default_path='logging.json',
default_level=logging.INFO,
env_key='LOG_CFG'):
"""Setup logging configuration """
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level) | 3,375 |
async def root():
"""
:return: welcoming page returning Made by @woosal1337
"""
try:
return {f"Made by @woosal1337"}
except Exception as e:
return {f"{e} has happened!"} | 3,376 |
def test_tah_pocitace_skoro_plne_konec_2():
"""
Hra na skoro plné pole (2× volno na konci).
"""
pole = "xooxxooxoxoxoxooxx--"
result = tah_pocitace(pole)
assert len(result) == 20
assert result.count("x") == 9
assert result.count("o") == 10
assert result.count("-") == 1 | 3,377 |
def sequence_accuracy_score(y_true, y_pred):
"""
Return sequence accuracy score. Match is counted only when two sequences
are equal.
"""
total = len(y_true)
if not total:
return 0
matches = sum(1 for yseq_true, yseq_pred in zip(y_true, y_pred)
if yseq_true == yseq_pred)
return matches / total | 3,378 |
def scale_axes(fig: Figure,
vertices: List[np.ndarray],
scale: float = 1.3):
"""Scale the axes of the figure to fit the given set of vertices.
Args:
fig (Figure): Figure whose axes will get re-scaled.
vertices (List[np.ndarray]): Set of vertices to be contained.
scale (float): The factor to multiply the minumum axis lengths by.
"""
x_list = [list(x[:,0]) for x in vertices]
limits = [max(i)*scale for i in list(zip(*x_list))]
fig.set_axis_limits(limits) | 3,379 |
def load_content(sentence_file):
"""Load input file with sentences to build LSH.
Args:
sentence_file (str): Path to input with txt file with sentences to Build LSH.
Returns:
dict: Dict with strings and version of string in lower case and without comma.
"""
sentences = {}
with open(sentence_file) as content:
for line in content:
line = line.strip()
line_clean = line.replace(",", "")
line_clean = line_clean.lower()
sentences[line_clean] = line
return sentences | 3,380 |
def _analyse_graph(graph):
""" Analyses a connected graph to find a set of distinct paths and a
topologically ordered sequence.
"""
# Make a copy without self-cycles to modify
g = graph.clean_copy()
g0 = g.copy()
# Start with the diameter of the graph
diameter = g.diameter()
if not diameter:
# The graph has no edges so return sorted list of isolated vertices
return [], sorted(g.isolated, key=graph.sort)
diameter_ac = diameter.make_acyclic()
# Remove diameter from graph and search the rest
g.remove_path(diameter)
paths, sequence = [diameter], list(diameter_ac)
stack = collections.deque()
# Search paths both forwards and backwards
# All diverging branches are searched backwards and vice versa
stack.extend((v, True) for v in reversed(diameter))
stack.extend((v, False) for v in diameter)
while stack:
vertex, forward = stack.pop()
try:
new_paths = g.search_paths(vertex, forward=forward).values()
except KeyError:
continue
if not any(new_paths):
continue
# Add paths to list
longest = max(sorted(new_paths, key=graph.path_sort), key=len)
g.remove_path(longest)
paths.append(longest)
# Merge paths into sequence
longest_ac = longest.make_acyclic()
index = sequence.index(vertex)
if forward:
_merge_forward(g0, sequence, longest_ac, index)
else:
_merge_backward(g0, sequence, longest_ac, index)
# Add new paths to stack for searching
stack.extendleft((v, True) for v in reversed(longest_ac))
stack.extendleft((v, False) for v in longest_ac)
# Maybe another distinct path here - return vertex to queue
stack.append((vertex, forward))
if g.vertices:
# Expect all vertices and edges to be removed from connected graph.
raise ValueError(
f"Vertices {g.vertices!r} still left over from graph {g0!r}"
)
_rearrange_cycles(g0, sequence)
return paths, sequence | 3,381 |
def rare_last_digit(first):
"""Given a leading digit, first, return all possible last digits of a rare number"""
if first == 2:
return (2,)
elif first == 4:
return (0,)
elif first == 6:
return (0,5)
elif first == 8:
return (2,3,7,8)
else:
raise ValueError(f"Invalid first digit of rare number: {first}") | 3,382 |
def decorrelation_witer(W):
"""
Iterative MDUM decorrelation that avoids matrix inversion.
"""
lim = 1.0
tol = 1.0e-05
W = W/(W**2).sum()
while lim > tol:
W1 = (3.0/2.0)*W - 0.5*dot(dot(W,W.T),W)
lim = npmax(npabs(npabs(diag(dot(W1,W.T))) - 1.0))
W = W1
return W | 3,383 |
def run_task(task, request):
""" Run/enqueue the given task for the given request
Note that the request should be validated before
this is called.
@param task: Name of the task. One of package_url,
package_dwc_archive or package_datastore
@param request: Dictionary containing the request
"""
logger = get_task_logger(__name__)
if task == 'package_url':
UrlPackageTask(request, config).run(logger)
elif task == 'package_dwc_archive':
DwcArchivePackageTask(request, config).run(logger)
elif task == 'package_datastore':
DatastorePackageTask(request, config).run(logger) | 3,384 |
def _open_public_images(username):
"""
:param username: username of a given person
:return:
"""
try:
new_url = "https://www.facebook.com/" + username + "/photos_all"
webbrowser.open_new_tab(new_url)
return 1
except Exception as e:
print(e)
return -1 | 3,385 |
def fermat_number(n: int) -> int:
"""
https://en.wikipedia.org/wiki/Fermat_number
https://oeis.org/A000215
>>> [fermat_number(i) for i in range(5)]
[3, 5, 17, 257, 65537]
"""
return 3 if n == 0 else (2 << ((2 << (n - 1)) - 1)) + 1 | 3,386 |
def get_id_ctx(node):
"""Gets the id and attribute of a node, or returns a default."""
nid = getattr(node, "id", None)
if nid is None:
return (None, None)
return (nid, node.ctx) | 3,387 |
def fitsfile_clumpy(filename,ext=None,header=True,**kwargs):
"""Read a (CLUMPY) fits file.
Parameters:
-----------
filename : str
Path to the CLUMPY .fits file to be read.
ext : {int,str}
The FITS extension to be read. Either as EXTVER, specifying
the HDU by an integer, or as EXTNAME, giving the HDU by name.
For CLUMPY FITS files:
0 or 'imgdata' is the image cube
` 1 or 'clddata' is the projected map of cloud number per ine-of-sight.
header : bool
If True, the HDU header will also be read. Not used currently.
"""
if 'hypercubenames' in kwargs:
ext = kwargs['hypercubenames'][0]
assert (isinstance(ext,(int,str))),\
"'ext' must be either integer or a string, specifying the FITS extension by number or by name, respectively."
dataset, header = pyfits.getdata(filename,ext,header=header) # dataset.shape is (Nwave,Nypix,Nxpix) for 3D, and (Nypix,Nxpix) for 2D.
x = N.arange(float(header['NAXIS1']))
y = N.arange(float(header['NAXIS2']))
# x = range(header['NAXIS1'])
# y = range(header['NAXIS2'])
if dataset.ndim == 2:
axes = None
axnames = ['x','y']
axvals = [x,y]
elif dataset.ndim == 3:
axes = (0,2,1)
wave = N.array([v for k,v in header.items() if k.startswith('LAMB')])
axnames = ['wave','x','y']
axvals = [wave,x,y]
dataset = N.transpose(dataset,axes=axes) # now it's (Nwave,Nxpix,Nypix) for 3D, and (Nxpix,Nypix) for 2D.
datasets = [dataset] # has to be a list for function 'convert'
hypercubenames = kwargs['hypercubenames']
return datasets, axnames, axvals, hypercubenames | 3,388 |
def Node(object):
"""
"""
@property
def location(self):
"""
"""
@station.setter
def location(self, value):
"""
"""
@property
def name(self):
"""
"""
@name.setter
def name(self):
"""
"""
@property
def comments(self):
"""
"""
@comments.setter
def comments(self):
"""
"""
def __init__(self):
self.name
self.location
self.comments
# This means nodes are directed, anything incoming is previous
# anything outgoing is next... change names maybe?
self.previous_nodes
self.next_nodes | 3,389 |
def is_tkg_plus_enabled(config: Optional[dict] = None) -> bool:
"""
Check if TKG plus is enabled by the provider in the config.
:param dict config: configuration provided by the user.
:return: whether TKG+ is enabled or not.
:rtype: bool
"""
if not config:
try:
config = get_server_runtime_config()
except Exception:
return False
service_section = config.get('service', {})
tkg_plus_enabled = service_section.get('enable_tkg_plus', False)
if isinstance(tkg_plus_enabled, bool):
return tkg_plus_enabled
elif isinstance(tkg_plus_enabled, str):
return utils.str_to_bool(tkg_plus_enabled)
return False | 3,390 |
def da_cma(max_evaluations = 50000, da_max_evals = None, cma_max_evals = None,
popsize=31, stop_fitness = -math.inf):
"""Sequence differential evolution -> CMA-ES."""
daEvals = np.random.uniform(0.1, 0.5)
if da_max_evals is None:
da_max_evals = int(daEvals*max_evaluations)
if cma_max_evals is None:
cma_max_evals = int((1.0-daEvals)*max_evaluations)
opt1 = Da_cpp(max_evaluations = da_max_evals, stop_fitness = stop_fitness)
opt2 = Cma_cpp(popsize=popsize, max_evaluations = cma_max_evals,
stop_fitness = stop_fitness)
return Sequence([opt1, opt2]) | 3,391 |
def j_hashset(s: Set = None) -> jpy.JType:
"""Creates a Java HashSet from a set."""
if s is None:
return None
r = jpy.get_type("java.util.HashSet")()
for v in s:
r.add(v)
return r | 3,392 |
def get_year(h5, songidx=0):
"""
Get release year from a HDF5 song file, by default the first song in it
"""
return h5.root.musicbrainz.songs.cols.year[songidx] | 3,393 |
def _create_simulation_parametrization():
"""Convert named scenarios to parametrization.
Each named scenario is duplicated with different seeds to capture the uncertainty in
the simulation..
"""
named_scenarios = get_named_scenarios()
scenarios = []
for name, specs in named_scenarios.items():
is_resumed = specs.get("is_resumed", "fall")
save_last_states = specs.get("save_last_states", False)
for seed in range(specs["n_seeds"]):
produces = {
"period_outputs": create_path_to_period_outputs_of_simulation(
name, seed
)
}
if specs.get("save_rapid_test_statistics", False):
rapid_test_statistics_path = create_path_to_raw_rapid_test_statistics(
name, seed
)
produces["rapid_test_statistics"] = rapid_test_statistics_path
# since we use "append" mode to build this we need to delete the
# present file with every run
if rapid_test_statistics_path.exists():
rapid_test_statistics_path.unlink()
else:
rapid_test_statistics_path = None
if save_last_states:
produces["last_states"] = create_path_to_last_states_of_simulation(
name, seed
)
depends_on = get_simulation_dependencies(
debug=FAST_FLAG == "debug",
is_resumed=is_resumed,
)
if is_resumed:
depends_on["initial_states"] = create_path_to_last_states_of_simulation(
f"{is_resumed}_baseline", seed
)
spec_tuple = (
depends_on,
specs["sim_input_scenario"],
specs["params_scenario"],
specs["start_date"],
specs["end_date"],
save_last_states,
produces,
500 + 100_000 * seed,
is_resumed,
rapid_test_statistics_path,
)
scenarios.append(spec_tuple)
signature = (
"depends_on, sim_input_scenario, params_scenario, "
+ "start_date, end_date, save_last_states, produces, seed, "
+ "is_resumed, rapid_test_statistics_path"
)
return signature, scenarios | 3,394 |
def calc_buffer(P, T, buffer):
"""
Master function to calc any buffer given a name.
Parameters
----------
P: float
Pressure in GPa
T: float or numpy array
Temperature in degrees K
buffer: str
Name of buffer
Returns
-------
float or numpy array
logfO2
"""
if buffer == 'NNO':
return calc_NNO(P, T)
elif buffer == 'QFM':
return calc_QFM(P, T)
elif buffer == 'IW':
return calc_IW(P, T)
elif buffer == 'CrCr2O3':
return calc_CrCr2O3(P, T)
elif buffer == 'SiSiO2':
return calc_SiSiO2(P, T)
elif buffer == 'HM':
return calc_HM(P, T)
elif buffer == 'CoCoO':
return calc_CoCoO(P, T)
elif buffer == 'ReReO':
return calc_ReReO(P, T)
elif buffer == 'Graphite':
return calc_Graphite(P, T)
elif buffer == 'QIF':
return calc_QIF(P, T)
elif buffer == 'MoMoO2':
return calc_MoMoO2(P,T)
elif buffer == 'CaCaO':
return calc_CaCaO(P,T)
elif buffer == 'AlAl2O3':
return calc_AlAl2O3(P,T)
elif buffer == 'KK2O':
return calc_KK2O(P,T)
elif buffer == 'MgMgO':
return calc_MgMgO(P,T)
elif buffer == 'MnMnO':
return calc_MnMnO(P,T)
elif buffer == 'NaNa2O':
return calc_NaNa2O(P,T)
elif buffer == 'TiTiO2':
return calc_TiTiO2(P,T)
else:
raise InputError('Buffer name not recognized') | 3,395 |
def printImproperDihedral(dihedral, alchemical = False):
"""Generate improper dihedral line
Parameters
----------
dihedral : dihedral Object
dihedral Object
Returns
-------
dihedralLine : str
Improper dihedral line data
"""
V2 = dihedral.V2*0.5
V2_B = dihedral.V2_B*0.5
label = 'imptors %7s %5s %5s %5s %8.3f %4.1f %2d\n' % \
(dihedral.atomA.typeA, dihedral.atomB.typeA, dihedral.atomC.typeA, dihedral.atomD.typeA, V2, 180.0, 2)
if alchemical: label = 'imptors %7s %5s %5s %5s %8.3f %4.1f %2d\n' % \
(dihedral.atomA.typeB, dihedral.atomB.typeB, dihedral.atomC.typeB, dihedral.atomD.typeB, V2_B, 180.0, 2)
return label | 3,396 |
def test_cache_permission(mocker, monkeypatch, tmpdir):
"""Emit a warning once that this can't cache the latest PSL."""
warning = mocker.patch.object(logging.getLogger("tldextract.cache"), "warning")
def no_permission_makedirs(*args, **kwargs):
raise PermissionError(
"""[Errno 13] Permission denied:
'/usr/local/lib/python3.7/site-packages/tldextract/.suffix_cache"""
)
monkeypatch.setattr(os, "makedirs", no_permission_makedirs)
for _ in range(0, 2):
my_extract = tldextract.TLDExtract(cache_dir=tmpdir)
assert_extract(
"http://www.google.com",
("www.google.com", "www", "google", "com"),
funs=(my_extract,),
)
assert warning.call_count == 1
assert warning.call_args[0][0].startswith("unable to cache") | 3,397 |
def inv_erf(z):
"""
Inverse error function.
:param z: function input
:type z: float
:return: result as float
"""
if z <= -1 or z >= 1:
return "None"
if z == 0:
return 0
result = ndtri((z + 1) / 2.0) / math.sqrt(2)
return result | 3,398 |
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text)) | 3,399 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.