content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def temporal_filter(record_date_time, time_or_period, op):
"""
Helper function to perform temporal filters on feature set
:param record_date_time: datetime field value of a feature
:type record_date_time: :class:`datetime.datetime`
:param time_or_period: the time instant or time span to use as a filter
:type time_or_period: :class:`datetime.datetime` or a tuple of two
datetimes or a tuple of one datetime and one
:class:`datetime.timedelta`
:param op: the comparison operation
:type op: str
:return: a comparison expression result
:rtype: bool
"""
d = datetime.strptime(record_date_time, "%Y-%m-%dT%H:%M:%SZ")
result = None
# perform before and after operations
if op in ['BEFORE', 'AFTER']:
query_date_time = datetime.strptime(
time_or_period.value, "%Y-%m-%dT%H:%M:%SZ")
if op == 'BEFORE':
return d <= query_date_time
elif op == 'AFTER':
return d >= query_date_time
# perform during operation
elif 'DURING' in op:
low, high = time_or_period
low = datetime.strptime(low.value, "%Y-%m-%dT%H:%M:%SZ")
high = datetime.strptime(high.value, "%Y-%m-%dT%H:%M:%SZ")
result = d >= low and d <= high
if 'BEFORE' in op:
result = d <= high
elif 'AFTER' in op:
result = d >= low
return result | 3,700 |
def fill_replay_buffer(env, replay_buffer: ReplayBuffer, desired_size: int):
""" Fill replay buffer with random transitions until size reaches desired_size. """
assert (
0 < desired_size and desired_size <= replay_buffer._replay_capacity
), f"It's not true that 0 < {desired_size} <= {replay_buffer._replay_capacity}."
assert replay_buffer.size < desired_size, (
f"Replay buffer already has {replay_buffer.size} elements. "
f"(more than desired_size = {desired_size})"
)
logger.info(f"Starting to fill replay buffer to size: {desired_size}.")
random_policy = make_random_policy_for_env(env)
post_step = add_replay_buffer_post_step(replay_buffer, env=env)
agent = Agent.create_for_env(
env, policy=random_policy, post_transition_callback=post_step
)
max_episode_steps = env.max_steps
with tqdm(
total=desired_size - replay_buffer.size,
desc=f"Filling replay buffer from {replay_buffer.size} to size {desired_size}",
) as pbar:
mdp_id = 0
while replay_buffer.size < desired_size:
last_size = replay_buffer.size
max_steps = desired_size - replay_buffer.size - 1
if max_episode_steps is not None:
max_steps = min(max_episode_steps, max_steps)
run_episode(env=env, agent=agent, mdp_id=mdp_id, max_steps=max_steps)
size_delta = replay_buffer.size - last_size
# The assertion below is commented out because it can't
# support input samples which has seq_len>1. This should be
# treated as a bug, and need to be fixed in the future.
# assert (
# size_delta >= 0
# ), f"size delta is {size_delta} which should be non-negative."
pbar.update(n=size_delta)
mdp_id += 1
if size_delta <= 0:
# replay buffer size isn't increasing... so stop early
break
if replay_buffer.size >= desired_size:
logger.info(f"Successfully filled replay buffer to size: {replay_buffer.size}!")
else:
logger.info(
f"Stopped early and filled replay buffer to size: {replay_buffer.size}."
) | 3,701 |
def configure_logging():
"""
Configure logging for pandoc subprocess.
This is for use when passing Python filter modules to pandoc using
the --filter option.
"""
format_string = ("%s: %%(name)s: [%%(levelname)s] %%(message)s" %
__name__)
logging.basicConfig(format=format_string, level=logging.INFO)
log.debug("Debug logging enabled.") | 3,702 |
def package_conda_env(folder: Union[str, Path]) -> Path:
"""Creates a .rar file of the current conda environment for use in jobs.
For efficiency, existing tarred env are not updated.
Parameter
---------
folder: str/Path
folder where the environment must be dumped
Returns
-------
Path
Path of the created .rar file
"""
# TODO(lowik): could be faster to create tar locally, then copy it
folder = Path(folder).expanduser().absolute()
env_key = "CONDA_DEFAULT_ENV"
if env_key not in os.environ:
raise RuntimeError(
"This executor requires to be executed from a conda environment. Check out README for help."
)
name = os.environ[env_key]
env_path = Path(os.environ["CONDA_PREFIX"])
_check_python_inside(env_path)
tarred_env = (folder / name).with_suffix(".tar")
if tarred_env.exists():
os.remove(str(tarred_env))
output = shutil.make_archive(str(tarred_env.with_suffix("")), "tar", str(env_path))
return Path(output) | 3,703 |
def get_couchbase_superuser_password(manager, plaintext: bool = True) -> str:
"""Get Couchbase superuser's password from file (default to
``/etc/gluu/conf/couchbase_superuser_password``).
To change the location, simply pass ``GLUU_COUCHBASE_SUPERUSER_PASSWORD_FILE`` environment variable.
:params manager: An instance of :class:`~pygluu.containerlib.manager._Manager`.
:params plaintext: Whether to return plaintext or encoded password.
:returns: Plaintext or encoded password.
"""
password_file = os.environ.get(
"GLUU_COUCHBASE_SUPERUSER_PASSWORD_FILE", "/etc/gluu/conf/couchbase_superuser_password"
)
with open(password_file) as f:
password = f.read().strip()
if not plaintext:
password = encode_text(password, manager.secret.get("encoded_salt")).decode()
return password | 3,704 |
def GetAvailableDialogs():
"""Returns available dialogs in a list"""
list_path = sys.path
found = 0
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/dialogs"):
found = 1
break
if found == 0:
print ("Could not find /dialogs directory!")
raise IOError
return None
list_dlg = os.listdir(list_path[i]+"/dialogs")
remove = []
for i in range(len(list_dlg)):
list_dlg[i] = "/dialogs/"+list_dlg[i]
if not list_dlg[i].endswith(".csv") and not list_dlg[i].endswith(".dlg"):
remove.append(i)
## remove non csv files
remove.reverse()
for i in remove:
list_dlg.pop(i)
return list_dlg | 3,705 |
def convert_rgb2gray(image, convert_dic):
"""convert rgb image to grayscale
Parameters
----------
image: array
RGB image. Channel order should be RGB.
convert_dic: dict
dictionary key is str(rgb list), value is grayscale value
Returns
-------
image_gray: array
Grayscale image
"""
image_r = image[:, :, 0]
image_g = image[:, :, 1]
image_b = image[:, :, 2]
im_shape = image_r.shape
image_gray = np.zeros(im_shape)
for i in range(im_shape[0]):
for j in range(im_shape[1]):
image_gray[i, j] = convert_dic[str([image_r[i, j], image_g[i, j], image_b[i, j]])]
return image_gray | 3,706 |
def get_external_storage_path():
"""Returns the external storage path for the current app."""
return _external_storage_path | 3,707 |
def organize_stanford_data(path=None, clear_previous_afq=False):
"""
If necessary, downloads the Stanford HARDI dataset into DIPY directory and
creates a BIDS compliant file-system structure in AFQ data directory:
~/AFQ_data/
└── stanford_hardi
├── dataset_description.json
└── derivatives
├── freesurfer
│ ├── dataset_description.json
│ └── sub-01
│ └── ses-01
│ └── anat
│ ├── sub-01_ses-01_T1w.nii.gz
│ └── sub-01_ses-01_seg.nii.gz
└── vistasoft
├── dataset_description.json
└── sub-01
└── ses-01
└── dwi
├── sub-01_ses-01_dwi.bval
├── sub-01_ses-01_dwi.bvec
└── sub-01_ses-01_dwi.nii.gz
If clear_previous_afq is True and there is an afq folder in derivatives,
it will be removed.
"""
logger = logging.getLogger('AFQ.data')
# fetches data for first subject and session
logger.info('fetching Stanford HARDI data')
dpd.fetch_stanford_hardi()
if path is None:
if not op.exists(afq_home):
logger.info(f'creating AFQ home directory: {afq_home}')
os.makedirs(afq_home, exist_ok=True)
path = afq_home
bids_path = op.join(path, 'stanford_hardi',)
derivatives_path = op.join(bids_path, 'derivatives')
dmriprep_folder = op.join(derivatives_path, 'vistasoft')
freesurfer_folder = op.join(derivatives_path, 'freesurfer')
if clear_previous_afq:
afq_folder = op.join(derivatives_path, 'afq')
if op.exists(afq_folder):
shutil.rmtree(afq_folder)
if not op.exists(derivatives_path):
logger.info(f'creating derivatives directory: {derivatives_path}')
# anatomical data
anat_folder = op.join(freesurfer_folder, 'sub-01', 'ses-01', 'anat')
os.makedirs(anat_folder, exist_ok=True)
t1_img = dpd.read_stanford_t1()
nib.save(t1_img, op.join(anat_folder, 'sub-01_ses-01_T1w.nii.gz'))
seg_img = dpd.read_stanford_labels()[-1]
nib.save(seg_img, op.join(anat_folder,
'sub-01_ses-01_seg.nii.gz'))
# diffusion-weighted imaging data
dwi_folder = op.join(dmriprep_folder, 'sub-01', 'ses-01', 'dwi')
os.makedirs(dwi_folder, exist_ok=True)
dwi_img, gtab = dpd.read_stanford_hardi()
nib.save(dwi_img, op.join(dwi_folder, 'sub-01_ses-01_dwi.nii.gz'))
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bvec'), gtab.bvecs)
np.savetxt(op.join(dwi_folder, 'sub-01_ses-01_dwi.bval'), gtab.bvals)
else:
logger.info('Dataset is already in place. If you want to fetch it '
+ 'again please first remove the folder '
+ derivatives_path)
# Dump out the description of the dataset
to_bids_description(bids_path,
**{"Name": "Stanford HARDI", "Subjects": ["sub-01"]})
# And descriptions of the pipelines in the derivatives:
to_bids_description(dmriprep_folder,
**{"Name": "Stanford HARDI",
"PipelineDescription": {"Name": "vistasoft"}})
to_bids_description(freesurfer_folder,
**{"Name": "Stanford HARDI",
"PipelineDescription": {"Name": "freesurfer"}}) | 3,708 |
def make_and_print_table(datas):
"""
Generated a contamination table
columns are reference genomes, and rows are samples.
"""
sample_names = {}
header = ['sample_id']
with open(datas[0]) as data_file:
for line in data_file:
header.append(line.split()[0])
for data_f in datas:
sample_name = data_f.split('.')[0]
sample_names[sample_name] = []
with open(data_f) as data_file:
for line in data_file:
float_f = line.split()[1]
sample_names[sample_name].append(float_f)
print('\t'.join(header))
for sample, line in sample_names.items():
print(sample+'\t'+'\t'.join(line)) | 3,709 |
def at_server_start():
"""
This is called every time the server starts up, regardless of
how it was shut down.
"""
# load data
from muddery.server.database.worlddata.worlddata import WorldData
WorldData.reload()
# reset settings
from muddery.server.utils.game_settings import GAME_SETTINGS
GAME_SETTINGS.reset()
# reload local strings
from muddery.server.utils.localized_strings_handler import LOCALIZED_STRINGS_HANDLER
LOCALIZED_STRINGS_HANDLER.reload()
# clear dialogues
from muddery.server.utils.dialogue_handler import DIALOGUE_HANDLER
DIALOGUE_HANDLER.clear()
# reload equipment types
from muddery.server.utils.equip_type_handler import EQUIP_TYPE_HANDLER
EQUIP_TYPE_HANDLER.reload()
# localize model fields
from muddery.server.utils.localiztion_handler import localize_model_fields
localize_model_fields()
# load condition descriptions
from muddery.server.utils.desc_handler import DESC_HANDLER
DESC_HANDLER.reload()
# load honours
from muddery.server.database.gamedata.honours_mapper import HONOURS_MAPPER
HONOURS_MAPPER.reload()
# create the world
try:
from muddery.server.server import Server
Server.create_the_world()
print("The world has been created.")
except Exception as e:
traceback.print_exc() | 3,710 |
def test_elem_o004_elem_o004_v(mode, save_output, output_format):
"""
TEST :3.3.2 XML Representation of Element Declaration Schema
Components : Document with element with complexType
"""
assert_bindings(
schema="msData/element/elemO004.xsd",
instance="msData/element/elemO004.xml",
class_name="Root",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 3,711 |
def blur(old_img):
"""
:param old_img: a original image
:return: a blurred image
"""
blur_img = SimpleImage.blank(old_img.width, old_img.height)
for x in range(old_img.width):
for y in range(old_img.height):
if x == 0 and y == 0: # Upper left corner
old_pixel_00 = old_img.get_pixel(x, y) # Reference point
old_pixel_s = old_img.get_pixel(x, y + 1) # South
old_pixel_e = old_img.get_pixel(x + 1, y) # East
old_pixel_se = old_img.get_pixel(x + 1, y + 1) # Southeast
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_00.red + old_pixel_s.red + old_pixel_e.red + old_pixel_se.red) // 4
blur_pixel.green = (old_pixel_00.green + old_pixel_s.green + old_pixel_e.green + old_pixel_se.green) \
// 4
blur_pixel.blue = (old_pixel_00.blue + old_pixel_s.blue + old_pixel_e.blue + old_pixel_se.blue) // 4
elif x == 0 and y == old_img.height - 1: # Bottom left
old_pixel_0h = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1) # North
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1) # Northeast
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_0h.red + old_pixel_n.red + old_pixel_e.red + old_pixel_ne.red) // 4
blur_pixel.green = (old_pixel_0h.green + old_pixel_n.green + old_pixel_e.green + old_pixel_ne.green) \
// 4
blur_pixel.blue = (old_pixel_0h.blue + old_pixel_n.blue + old_pixel_e.blue + old_pixel_ne.blue) // 4
elif x == old_img.width - 1 and y == 0: # Upper right corner
old_pixel_w0 = old_img.get_pixel(x, y)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_w = old_img.get_pixel(x - 1, y) # West
old_pixel_sw = old_img.get_pixel(x - 1, y + 1) # Southwest
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_w0.red + old_pixel_s.red + old_pixel_w.red + old_pixel_sw.red) // 4
blur_pixel.green = (old_pixel_w0.green + old_pixel_s.green + old_pixel_w.green + old_pixel_sw.green) \
// 4
blur_pixel.blue = (old_pixel_w0.blue + old_pixel_s.blue + old_pixel_w.blue + old_pixel_sw.blue) // 4
elif x == old_img.width - 1 and y == old_img.height - 1: # Bottom right corner
old_pixel_wh = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1) # Northwest
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_wh.red + old_pixel_n.red + old_pixel_w.red + old_pixel_nw.red) // 4
blur_pixel.green = (old_pixel_wh.green + old_pixel_n.green + old_pixel_w.green + old_pixel_nw.green) \
// 4
blur_pixel.blue = (old_pixel_wh.blue + old_pixel_n.blue + old_pixel_w.blue + old_pixel_nw.blue) // 4
elif x == 0 and y != 0 and y != old_img.height - 1: # Left side except for head and tail
old_pixel_0y = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_0y.red + old_pixel_n.red + old_pixel_s.red + old_pixel_ne.red +
old_pixel_e.red + old_pixel_se.red) // 6
blur_pixel.green = (old_pixel_0y.green + old_pixel_n.green + old_pixel_s.green + old_pixel_ne.green +
old_pixel_e.green + old_pixel_se.green) // 6
blur_pixel.blue = (old_pixel_0y.blue + old_pixel_n.blue + old_pixel_s.blue + old_pixel_ne.blue +
old_pixel_e.blue + old_pixel_se.blue) // 6
elif y == 0 and x != 0 and x != old_img.width - 1: # Top except for head and tail
old_pixel_x0 = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_x0.red + old_pixel_w.red + old_pixel_s.red + old_pixel_sw.red +
old_pixel_e.red + old_pixel_se.red) // 6
blur_pixel.green = (old_pixel_x0.green + old_pixel_w.green + old_pixel_s.green + old_pixel_sw.green +
old_pixel_e.green + old_pixel_se.green) // 6
blur_pixel.blue = (old_pixel_x0.blue + old_pixel_w.blue + old_pixel_s.blue + old_pixel_sw.blue +
old_pixel_e.blue + old_pixel_se.blue) // 6
elif x == old_img.width - 1 and y != 0 and y != old_img.height - 1: # right side except for head and tail
old_pixel_wy = old_img.get_pixel(x, y)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_wy.red + old_pixel_n.red + old_pixel_s.red + old_pixel_nw.red +
old_pixel_w.red + old_pixel_sw.red) // 6
blur_pixel.green = (old_pixel_wy.green + old_pixel_n.green + old_pixel_s.green + old_pixel_nw.green +
old_pixel_w.green + old_pixel_sw.green) // 6
blur_pixel.blue = (old_pixel_wy.blue + old_pixel_n.blue + old_pixel_s.blue + old_pixel_nw.blue +
old_pixel_w.blue + old_pixel_sw.blue) // 6
elif y == old_img.height - 1 and x != 0 and x != old_img.width - 1: # Bottom except for head and tail
old_pixel_xh = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_xh.red + old_pixel_w.red + old_pixel_nw.red + old_pixel_n.red +
old_pixel_e.red + old_pixel_ne.red) // 6
blur_pixel.green = (old_pixel_xh.green + old_pixel_w.green + old_pixel_nw.green + old_pixel_n.green +
old_pixel_e.green + old_pixel_ne.green) // 6
blur_pixel.blue = (old_pixel_xh.blue + old_pixel_w.blue + old_pixel_nw.blue + old_pixel_n.blue +
old_pixel_e.blue + old_pixel_ne.blue) // 6
else: # middle parts having 8 neighbors
old_pixel_xy = old_img.get_pixel(x, y)
old_pixel_w = old_img.get_pixel(x - 1, y)
old_pixel_nw = old_img.get_pixel(x - 1, y - 1)
old_pixel_n = old_img.get_pixel(x, y - 1)
old_pixel_ne = old_img.get_pixel(x + 1, y - 1)
old_pixel_s = old_img.get_pixel(x, y + 1)
old_pixel_sw = old_img.get_pixel(x - 1, y + 1)
old_pixel_e = old_img.get_pixel(x + 1, y)
old_pixel_se = old_img.get_pixel(x + 1, y + 1)
blur_pixel = blur_img.get_pixel(x, y)
blur_pixel.red = (old_pixel_xy.red + old_pixel_w.red + old_pixel_nw.red + old_pixel_n.red +
old_pixel_e.red + old_pixel_ne.red + old_pixel_s.red + old_pixel_sw.red +
old_pixel_se.red) // 9
blur_pixel.green = (old_pixel_xy.green + old_pixel_w.green + old_pixel_nw.green + old_pixel_n.green +
old_pixel_e.green + old_pixel_ne.green + old_pixel_s.green + old_pixel_sw.green +
old_pixel_se.green) // 9
blur_pixel.blue = (old_pixel_xy.blue + old_pixel_w.blue + old_pixel_nw.blue + old_pixel_n.blue +
old_pixel_e.blue + old_pixel_ne.blue + old_pixel_s.blue + old_pixel_sw.blue +
old_pixel_se.blue) // 9
return blur_img | 3,712 |
def main():
"""Main training program."""
# Disable CuDNN.
torch.backends.cudnn.enabled = False
# Timer.
timers = Timers()
# Arguments.
args = get_args()
# if args.load_huggingface:
# args.make_vocab_size_divisible_by = 1
# Pytorch distributed.
initialize_distributed(args)
if torch.distributed.get_rank() == 0:
print('Pretrain GPT3 model')
print_args(args)
# Random seeds for reproducability.
set_random_seed(args.seed)
# Data stuff.
train_data, val_data, test_data, args.vocab_size, args.eod_token, tokenizer = get_train_val_test_data(args)
# Model, optimizer, and learning rate.
model, optimizer, lr_scheduler = setup_model_and_optimizer(args)
# Resume data loader if necessary.
if args.resume_dataloader:
if train_data is not None:
train_data.batch_sampler.start_iter = args.iteration % len(train_data)
print_rank_0(f"Resume train set from iteration {train_data.batch_sampler.start_iter}")
if val_data is not None:
start_iter_val = (args.train_iters // args.save_interval) * args.eval_interval
val_data.batch_sampler.start_iter = start_iter_val % len(val_data)
if train_data is not None:
train_data_iterator = iter(train_data)
else:
train_data_iterator = None
iteration = 0
if args.train_iters > 0:
if args.do_train:
iteration, skipped = train(model, optimizer,
lr_scheduler,
train_data_iterator,
val_data,
timers,
args,
tokenizer)
if args.do_valid:
prefix = 'the end of training for val data'
# val_loss, val_ppl
_ = evaluate_and_print_results(prefix, iter(val_data) if val_data else None,
model, args, timers, False)
if args.save and iteration != 0:
save_checkpoint(iteration, model, optimizer, lr_scheduler, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed)
if args.do_test:
# Run on test data.
prefix = 'the end of training for test data'
evaluate_and_print_results(prefix, iter(test_data) if test_data else None,
model, args, timers, True) | 3,713 |
def user_mysqrt():
"""Calculate mysqrt() of a number and compare to the math.sqrt()."""
# Show initial message
print("Program calculates square root of a given number using "
"Newton's guess method and compares it to the Python's "
"math.sqrt() function.")
# Take user's input
x = int(input("Enter a number: "))
steps_str = input("Enter the number of steps <10>: ")
# Calculate the root
if steps_str:
mysqrt_val = mysqrt(x, int(steps_str))
else:
mysqrt_val = mysqrt(x)
# Present results
print("The square root by Newton's method is:", mysqrt_val)
print("This result is", mysqrt_val-math.sqrt(x), "bigger than "
"the math.sqrt() one.") | 3,714 |
def _log_validation_error(func_name, **kwargs):
"""
function responsible for handling the logging of the failed validations
:param func_name:
:param type:
:param kwargs:
:return:
"""
logged_string = f'{func_name} - failed to meet this value : {kwargs.get("validation_value")}'
if kwargs.get("row_number"):
logged_string += f' - Row#: {kwargs["row_number"]}'
if kwargs.get("column"):
logged_string += f' - Column name: {kwargs["column"]}'
if kwargs.get("column_value"):
logged_string += f' - Column value: {kwargs["column_value"]}'
if kwargs.get("Exception"):
logged_string += f' - Exception: {kwargs["Exception"]}'
LOGGER.error(logged_string) | 3,715 |
def print_sentence(sentence):
"""Displays words in terminal.
Param: sentence(list of str)
"""
for word in sentence:
print(word, end=" ")
# move to next line in terminal
print("") | 3,716 |
def generate_table_row(log_file, ancestry, official_only, code):
""" Takes an imported log and ancestry and converts it into a properly formatted pandas table.
Keyword arguments:
log_file -- output from import_log()
ancestry -- a single ancestry code
official_only -- a boolean indicating if all fields should be imported
into the table, or only the official ones.
Returns:
dict of arguments: new values
"""
# verify that ancestry is correct
matches = [l for l in log_file if re.search('Searching for ancestry: ' + \
ancestry, l)]
if len(matches) == 0:
raise ValueError('ALERT: Incorrect ancestry passed in for code ' + code +
'. Passed in value: ' + ancestry)
dict_of_vals = {'ancestry': ancestry, 'phenotype_code': code}
nrow_orig = num_cols = None
for line in log_file:
nrow_orig = _parse_single_term(nrow_orig, 'Original number of rows: ([0-9]*)',
line, int)
num_cols = _parse_single_term(num_cols, 'Found ([0-9]*) ancestry specific columns:',
line, int)
dict_of_vals.update({'original_nrow': nrow_orig,
'ancestry_specific_ncols': num_cols})
if dict_of_vals['ancestry_specific_ncols'] != 0:
tf_boundary = [idx for idx, l in enumerate(log_file) if re.search('Now running LDSC in (vanilla|stratified) mode.',l)]
log_file_official = log_file[(tf_boundary[0]+1):(len(log_file)+1)]
log_file_unofficial = log_file[0:tf_boundary[0]]
if not official_only:
unofficial_dict = _parse_unofficial_log(log_file_unofficial)
dict_of_vals.update(unofficial_dict)
official_dict, error_str = _parse_official_log(log_file_official)
else:
if not official_only:
unofficial_dict = _parse_unofficial_log(log_file)
dict_of_vals.update(unofficial_dict)
official_dict, _ = _parse_official_log(log_file)
error_str = 'No ' + ancestry + '-specific columns found.'
dict_of_vals.update(official_dict)
if error_str is not None:
dict_of_vals.update({'missing_data_note': error_str})
return pd.DataFrame(dict_of_vals, index=[ancestry + ':' + code]) | 3,717 |
def read_preflib_file(filename, setsize=1, relative_setsize=None, use_weights=False):
"""Reads a single preflib file (soi, toi, soc or toc).
Parameters:
filename: str
Name of the preflib file.
setsize: int
Number of top-ranked candidates that voters approve.
In case of ties, more than `setsize` candidates are approved.
Paramer `setsize` is ignored if `relative_setsize` is used.
relative_setsize: float in (0, 1]
Indicates which proportion of candidates of the ranking
are approved (rounded up). In case of ties, more
candidates are approved.
E.g., if a voter has 10 approved candidates and `relative_setsize` is 0.75,
then the approval set contains the top 8 candidates.
use_weights: bool
If False, treat vote count in preflib file as the number of duplicate ballots,
i.e., the number of voters that have this approval set.
If True, treat vote count as weight and use this weight in class Voter.
Returns:
profile: abcvoting.preferences.Profile
Preference profile extracted from preflib file,
including names of candidates
"""
if setsize <= 0:
raise ValueError("Parameter setsize must be > 0")
if relative_setsize and (relative_setsize <= 0.0 or relative_setsize > 1.0):
raise ValueError("Parameter relative_setsize not in interval (0, 1]")
with open(filename, "r") as f:
line = f.readline()
num_cand = int(line.strip())
candidate_map = {}
for _ in range(num_cand):
parts = f.readline().strip().split(",")
candidate_map[int(parts[0].strip())] = ",".join(parts[1:]).strip()
parts = f.readline().split(",")
try:
voter_count, _, unique_orders = [int(p.strip()) for p in parts]
except ValueError:
raise PreflibException(
f"Number of voters ill specified ({str(parts)}), should be triple of integers"
)
approval_sets = []
lines = [line.strip() for line in f.readlines() if line.strip()]
if len(lines) != unique_orders:
raise PreflibException(
f"Expected {unique_orders} lines that specify voters in the input, "
f"encountered {len(lines)}"
)
for line in lines:
parts = line.split(",")
if len(parts) < 1:
continue
try:
count = int(parts[0])
except ValueError:
raise PreflibException(f"Each ranking must start with count/weight ({line})")
ranking = parts[1:] # ranking starts after count
if len(ranking) == 0:
raise PreflibException("Empty ranking: " + str(line))
if relative_setsize:
num_appr = int(ceil(len(ranking) * relative_setsize))
else:
num_appr = setsize
approval_set = _approval_set_from_preflib_datastructures(num_appr, ranking, candidate_map)
approval_sets.append((count, approval_set))
# normalize candidates to 0, 1, 2, ...
cand_names = []
normalize_map = {}
for cand in candidate_map.keys():
cand_names.append(candidate_map[cand])
normalize_map[cand] = len(cand_names) - 1
profile = Profile(num_cand, cand_names=cand_names)
for count, approval_set in approval_sets:
normalized_approval_set = []
for cand in approval_set:
normalized_approval_set.append(normalize_map[cand])
if use_weights:
profile.add_voter(Voter(normalized_approval_set, weight=count))
else:
profile.add_voters([normalized_approval_set] * count)
if use_weights:
if len(profile) != unique_orders:
raise PreflibException("Number of voters wrongly specified in preflib file.")
else:
if len(profile) != voter_count:
raise PreflibException("Number of voters wrongly specified in preflib file.")
return profile | 3,718 |
def get_row(client, instance, file_=None):
"""Get one row of a family table.
Args:
client (obj):
creopyson Client.
instance (str):
Instance name.
`file_` (str, optional):
File name. Defaults is currently active model.
Returns:
(dict):
colid (str):
Column ID.
value (depends on datatype):
Cell value.
datatype (str):
Data type.
coltype (str):
Column Type; a string corresponding to the Creo column type.
"""
data = {"instance": instance}
if file_ is not None:
data["file"] = file_
else:
active_file = client.file_get_active()
if active_file:
data["file"] = active_file["file"]
return client._creoson_post("familytable", "get_row", data, "columns") | 3,719 |
def callback_red(*args):
""" red slider event handler """
global red_int
col = "red"
str_val = str(r_slide_val.get())
red_int = code_shrtn(str_val, 20, 30, 60, 80, col)
update_display(red_int, green_int, blue_int) | 3,720 |
def expdb_exp(batch=False,batch_size=10):
"""
从exploit-db.com中提取CVE exp label,弥补不足
更新策略:增量覆盖
"""
create_table(db='exp2',table='expdb',key=['edb_id','cve_id','author','type','platform','date'],primary_key='edb_id')
so,exist_eid=cve_exists(db='exp2',table='expdb',key='edb_id')
all_eid=expdb_exists()
all_eid=[str(i) for i in all_eid]
add_eid=list(set(all_eid)^set(exist_eid))
print(add_eid)
j=0
cve_exp=dict()
for eid in add_eid:
try:
exp_values=expdb_parser(eid)
if exp_values:
cve_exp[eid]=tuple(exp_values.values())
print(cve_exp)
j=j+1
if batch:
if j%batch_size==0: ### todo
sql='replace INTO expdb (edb_id, cve_id, author, type, platform, date) VALUES (?, ?, ?, ?, ?, ?)'
so.executemany(sql,cve_exp.values())
else:
sql='replace INTO expdb (edb_id, cve_id, author, type, platform, date) VALUES (?, ?, ?, ?, ?, ?)'
so.execute(sql,tuple(exp_values.values()))
else:
print("[!] exp-values Not Found")
except Exception as e:
print("[!] DOWNLOAD ERROR %s error:%s" %(eid,repr(e)))
so.close() | 3,721 |
def hospitalization_to_removed(clip_low=2, clip_high=32.6, mean=8.6, std=6.7):
"""
Returns the time for someone to either get removed after being
hospitalized in days within range(clip_low, clip_high),
of a truncated_norm(mean, std).
"""
return sample_truncated_norm(clip_low, clip_high, mean, std) | 3,722 |
def test_inheritance():
"""
test inheritance from different module
"""
# test module
test_data = doc.MatObject.matlabify('test_data')
test_submodule = test_data.getter('test_submodule')
sfdm = test_submodule.getter('super_from_diff_mod')
ok_(isinstance(sfdm, doc.MatClass))
eq_(sfdm.bases,['MyAbstractClass', 'MyHandleClass'])
bases = sfdm.getter('__bases__')
eq_(bases['MyAbstractClass'].module, 'test_data')
eq_(bases['MyHandleClass'].module, 'test_data')
return sfdm | 3,723 |
def test_create():
"""
Test creating a row in the Project table.
"""
project = models.Project.create(DummyLoader())
# Check columns populated by ORM
assert project.id is not None
assert project.createdAt is not None
assert project.finished is None # None until project is done
assert project.frame is not None
assert project.channel is not None
assert project.feature is not None
# Check relationships
assert project.labels is not None
assert project.raw_frames is not None
assert project.label_frames is not None
raw_frames = project.raw_frames
raw_frame = raw_frames[0].frame
label_frames = project.label_frames
label_frame = label_frames[0].frame
assert raw_frame.shape[-1] == project.num_channels
assert label_frame.shape[-1] == project.num_features
assert len(raw_frames) == project.num_frames
assert raw_frame.shape[0] == project.height
assert raw_frame.shape[1] == project.width
# Check we've assigned a token
assert project.token is not None
# Test that an action has been initialized
assert project.action is not None
assert project.num_actions == 1 | 3,724 |
def split_mon_unmon(data, labels):
"""
Splits into monitored and unmonitored data
If a data point only happens once, we also consider it unmonitored
@return monitored_data, monitored_label, unmonitored_data
"""
from collections import Counter
occurence = Counter(labels)
monitored_data, unmonitored_data = [], []
monitored_label = []
for d, l in zip(data, labels):
if l == UNKNOWN_WEBPAGE or occurence[l] == 1:
unmonitored_data.append(d)
else:
monitored_data.append(d)
monitored_label.append(l)
return monitored_data, monitored_label, unmonitored_data | 3,725 |
def test_template_filedates_now(setlocale):
"""Test {now}"""
autofile.plugins.templates.filedates.TODAY = None
template = FileTemplate(PHOTO_FILE)
rendered = template.render("{now}", options=RenderOptions())
assert rendered == ["2021-10-29T05:39:00.012345-07:00"]
rendered = template.render("{now}", options=RenderOptions())
assert rendered == ["2021-10-29T05:39:00.012345-07:00"] | 3,726 |
def test_profiles_manager_filter_method_empty():
""" Should filter if profile manager is None. """
mocked_api = MagicMock()
mocked_api.get.return_value = [{"a": "b"}, {"a": "c"}]
profiles = Profiles(api=mocked_api)
assert profiles.filter(a="b") == [Profile(mocked_api, {"a": "b"})] | 3,727 |
def supress_stdout(func):
"""Wrapper, makes a function non-verbose.
Args:
func: function to be silenced
"""
import contextlib
import os
def wrapper(*a, **ka):
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull):
func(*a, **ka)
return wrapper | 3,728 |
def sum_and_count(x, y):
"""A function used for calculating the mean of a list from a reduce.
>>> from operator import truediv
>>> l = [15, 18, 2, 36, 12, 78, 5, 6, 9]
>>> truediv(*reduce(sum_and_count, l)) == 20.11111111111111
True
>>> truediv(*fpartial(sum_and_count)(l)) == 20.11111111111111
True
"""
try:
return (x[0] + y, x[1] + 1)
except TypeError:
return ((x or 0) + (y or 0), len([i for i in [x, y] if i is not None])) | 3,729 |
def animTempCustom():
"""
Temporarily play a custom animation for a set amount of time.
API should expect a full `desc` obect in json alongside a timelimit, in ms.
"""
colorList = request.form.get('colors').split(',')
colorsString = ""
for colorName in colorList:
c = Color(colorName)
colorsString += "[{},{},{}],".format(
int(255*c.red), int(255*c.green), int(255*c.blue)
)
colorsString = colorsString[0:-1]
print(colorsString)
colors = colorsString
bp.newAnim(
'$bpa.strip.Twinkle',
colors
)
return "Animation animation set to RGB!" | 3,730 |
def datatype(self, dt):
"""Set data type of histogram variable.
Set data type of the variable represented by the histogram.
:param type dt: type of the variable represented by the histogram
:raises RunTimeError: if datatype has already been set, it will not overwritten
"""
if hasattr(self, "_datatype"):
raise RuntimeError("datatype already set")
self._datatype = dt | 3,731 |
def mock_stripe_invoice(monkeypatch):
"""Fixture to monkeypatch stripe.Invoice.* methods"""
mock = Mock()
monkeypatch.setattr(stripe, "Invoice", mock)
return mock | 3,732 |
def read_project(output_dir):
"""Read existing project data
"""
try:
yaml = YAML()
with open(project_yaml_file(output_dir), encoding='utf-8') as project:
project_data = yaml.load(project)
for key, value in project_data.items():
if value == None:
project_data[key] = []
except FileNotFoundError:
project_data = {
'name': "Test Project",
'scenario_sets': [],
'narrative_sets': [],
'region_definitions': [],
'interval_definitions': [],
'units': [],
'scenarios': [],
'narratives': []
}
return project_data | 3,733 |
def test_create_as_anonymous(default_client, weekly_business_post_data):
"""An anonymous user should not be able to create weekly_business."""
response = default_client.post(
_get_weekly_business_url(), weekly_business_post_data
)
assert response.status_code == status.HTTP_403_FORBIDDEN | 3,734 |
def process_data():
"""INPUT: chunk
OUTPUT: Dictionary of Tuples
"""
pass | 3,735 |
def test_bool():
"""Check bool(Property)."""
assert bool(Property('Name', '')) is False
assert bool(Property('Name', 'value')) is True
assert bool(Property('Name', [])) is False
assert bool(Property('Name', [
Property('Key', 'Value')
])) is True | 3,736 |
def bytes_to_int(byte_array: bytes) -> int:
""" Bytes to int """
return int.from_bytes(byte_array, byteorder='big') | 3,737 |
def ask(query, default=None):
"""Ask a question."""
if default:
default_q = ' [{0}]'.format(default)
else:
default_q = ''
inp = input("{query}{default_q}: ".format(query=query, default_q=default_q)).strip()
if inp or default is None:
return inp
else:
return default | 3,738 |
def _clean_kwargs(keep_name=False, **kwargs):
"""
Sanatize the arguments for use with shade
"""
if "name" in kwargs and not keep_name:
kwargs["name_or_id"] = kwargs.pop("name")
return __utils__["args.clean_kwargs"](**kwargs) | 3,739 |
def transfer_file(user, source_endpoint, source_path,
dest_endpoint, dest_path, label):
"""
:param user: Must be a Django user with permissions to initiate the
transfer
:param source_endpoint: Source Endpoint UUID
:param source_path: Source path, including the filename
:param dest_endpoint: Destination Endpoint UUID
:param dest_path: Destination path, including the filename
:param label: Label to use for the transfer
:return: A globus SDK task object.
"""
log.debug('transferring {}:{} to {}'.format(source_endpoint, source_path,
dest_endpoint))
tc = load_transfer_client(user)
tdata = globus_sdk.TransferData(tc, source_endpoint, dest_endpoint,
label=label, sync_level="checksum")
tdata.add_item(source_path,
os.path.join(dest_path, os.path.basename(source_path))
)
return tc.submit_transfer(tdata) | 3,740 |
def calculate_pnl_per_equity(df_list):
"""Method that calculate the P&L of the strategy per equity and returns a list of P&L"""
pnl_per_equity = [] # initialize the list of P&L per equity
for df in df_list: # iterates over the dataframes of equities
pnl = df['Strategy Equity'].iloc[-1] - df['Buy and Hold Equity'].iloc[-1] # calculating the difference at the last point
pnl_per_equity.append(pnl)
return pnl_per_equity | 3,741 |
def search(q_str: str) -> dict:
"""search in genius
Args:
q_str (str): query string
Returns:
dict: search response
"""
data = {'songs': [], 'lyric': []}
response = http.get(
'https://genius.com/api/search/multi?per_page=5', params={'q': q_str}, headers=headers).json()
sections = response['response']['sections']
if len(sections[1]['hits']) == 0 and len(sections[2]) == 0:
return False
for section in response['response']['sections'][1:3]:
if section['type'] == 'song':
for song in section['hits']:
music = song['result']
# print(music)
if len(data['songs']) == 0:
data['songs'].append(dict_builder(music))
if data['songs'][-1]['api_path'] != music['api_path']:
data['songs'].append(dict_builder(music))
elif section['type'] == 'lyric':
for lyric in section['hits']:
music = lyric['result']
if len(data['lyric']) == 0:
data['lyric'].append(dict_builder(music))
if data['lyric'][-1]['api_path'] != music['api_path']:
data['songs'].append(dict_builder(music))
return data | 3,742 |
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app | 3,743 |
def geocode(level=None, names=None, countries=None, states=None, counties=None, scope=None) -> NamesGeocoder:
"""
Create a `Geocoder`. Allows to refine ambiguous request with `where()` method,
scope that limits area of geocoding or with parents.
Parameters
----------
level : {'country', 'state', 'county', 'city'}
The level of administrative division. Autodetection by default.
names : list or str
Names of objects to be geocoded.
For 'state' level: 'US-48' returns continental part of United States (48 states)
in a compact form.
countries : list
Parent countries. Should have same size as names. Can contain strings or `Geocoder` objects.
states : list
Parent states. Should have same size as names. Can contain strings or `Geocoder` objects.
counties : list
Parent counties. Should have same size as names. Can contain strings or `Geocoder` objects.
scope : str or `Geocoder`
Limits area of geocoding. If parent country is set then error will be generated.
If type is a string - geoobject should have geocoded scope in parents.
If type is a `Geocoder` - geoobject should have geocoded scope in parents.
Scope should contain only one entry.
Returns
-------
`NamesGeocoder`
Geocoder object specification.
Examples
--------
.. jupyter-execute::
:linenos:
:emphasize-lines: 5
from IPython.display import display
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
states = geocode('state').scope('Italy').get_boundaries(6)
display(states.head())
ggplot() + geom_map(data=states)
|
.. jupyter-execute::
:linenos:
:emphasize-lines: 5, 8
from IPython.display import display
from lets_plot import *
from lets_plot.geo_data import *
LetsPlot.setup_html()
states = geocode(level='state', scope='US').get_geocodes()
display(states.head())
names = ['York'] * len(states.state)
cities = geocode(names=names, states=states.state).ignore_not_found().get_centroids()
display(cities.head())
ggplot() + \\
geom_livemap() + \\
geom_point(data=cities, tooltips=layer_tooltips().line('@{found name}'))
"""
return NamesGeocoder(level, names) \
.scope(scope) \
.countries(countries) \
.states(states) \
.counties(counties) | 3,744 |
def fpAbs(x):
"""
Returns the absolute value of the floating point `x`. So:
a = FPV(-3.2, FSORT_DOUBLE)
b = fpAbs(a)
b is FPV(3.2, FSORT_DOUBLE)
"""
return abs(x) | 3,745 |
def volumes(container:str) -> list:
"""
Return list of 'container' volumes (host,cont)
"""
buf = StringIO()
_exec(
docker, 'inspect', '-f', "'{{json .Mounts}}'", container, _out=buf
)
res = buf.getvalue().strip()
vols_list = json.loads(res[1:-1])
# vols = {d['Source']:d['Destination'] for d in vols_list}
vols = [(d['Source'],d['Destination']) for d in vols_list]
return vols | 3,746 |
def test_launch_lsf_orc(fileutils, wlmutils):
"""test single node orchestrator"""
exp_name = "test-launch-lsf-orc-batch"
exp = Experiment(exp_name, launcher="lsf")
test_dir = fileutils.make_test_dir(exp_name)
# batch = False to launch on existing allocation
network_interface = wlmutils.get_test_interface()
orc = LSFOrchestrator(
6780,
batch=True,
project=wlmutils.get_test_account(),
interface=network_interface,
time="00:05",
smts=1,
)
orc.set_path(test_dir)
exp.start(orc, block=True)
status = exp.get_status(orc)
# don't use assert so that we don't leave an orphan process
if constants.STATUS_FAILED in status:
exp.stop(orc)
assert False
exp.stop(orc)
status = exp.get_status(orc)
assert all([stat == constants.STATUS_CANCELLED for stat in status]) | 3,747 |
def names():
"""Return stock summary information"""
helper = SQLHelper()
conn = helper.getConnection()
repo = robinhoodRepository(conn)
stockInfo = repo.getAllStocks()
return json_response(stockInfo, 200) | 3,748 |
def test_delpay_payment_split(node_factory, bitcoind):
"""
Test behavior of delpay with an MPP
"""
MPP_TARGET_SIZE = 10**7 # Taken from libpluin-pay.c
amt = 5 * MPP_TARGET_SIZE
l1, l2, l3 = node_factory.line_graph(3, fundamount=10**5,
wait_for_announce=True)
inv = l3.rpc.invoice(amt, 'lbl', 'desc')
l1.rpc.pay(inv['bolt11'])
assert len(l1.rpc.listpays()['pays']) == 1
delpay_result = l1.rpc.delpay(inv['payment_hash'], 'complete')['payments']
assert len(delpay_result) >= 5
assert len(l1.rpc.listpays()['pays']) == 0 | 3,749 |
def get_bot_group_config(bot_id):
"""Returns BotGroupConfig for a bot with given ID.
Returns:
BotGroupConfig or None if not found.
Raises:
BadConfigError if there's no cached config and the current config at HEAD is
not passing validation.
"""
cfg = _fetch_bot_groups()
gr = cfg.direct_matches.get(bot_id)
if gr is not None:
return gr
for prefix, gr in cfg.prefix_matches:
if bot_id.startswith(prefix):
return gr
return cfg.default_group | 3,750 |
def add_utm(url_, campaign, source='notification', medium='email'):
"""Add the utm_* tracking parameters to a URL."""
return urlparams(
url_, utm_campaign=campaign, utm_source=source, utm_medium=medium) | 3,751 |
def check_job_token_attributes(token):
"""Check that the given JOB token contains all required attributes."""
attribs = ["limit", "remaining", "reset"]
for attr in attribs:
assert attr in token
assert int(token[attr]) >= 0 | 3,752 |
def is_forest(G):
"""Return True if the input graph is a forest
Parameters
----------
G : NetworkX Graph
An undirected graph.
Returns
-------
True if the input graph is a forest
Notes
-----
For undirected graphs only.
"""
for graph in nx.connected_component_subgraphs(G):
if not nx.is_tree(graph):
return False
return True | 3,753 |
def package_to_pretty_string(package):
""" Given a PackageMetadata instance, returns a pretty string."""
template = "{0.name} {0.version}"
constraint_kinds = (
(ConstraintKinds.install_requires, package.install_requires),
(ConstraintKinds.conflicts, package.conflicts),
(ConstraintKinds.provides, package.provides),
)
for constraint_kind, constraints in constraint_kinds:
# FIXME: perhaps 'provides' just shouldn't include the package name
if constraint_kind == ConstraintKinds.provides:
constraints = tuple((dist, disjunction)
for dist, disjunction in constraints
if dist != package.name)
if len(constraints) > 0:
string = ', '.join(constraints_to_pretty_strings(constraints))
template += "; {} ({})".format(constraint_kind.value, string)
return template.format(package) | 3,754 |
def parse_uri(uri):
""" This implies that we are passed a uri that looks something like:
proto://username:password@hostname:port/database
In most cases, you can omit the port and database from the string:
proto://username:password@hostname
Also, in cases with no username, you can omit that:
proto://:password@hostname:port/database
Also supports additional arguments:
proto://hostname:port/database?arg1=val&arg2=vals
:param str uri: URI to parse
:rtype: dict
:returns: Dictionary with parsed URL components
.. note::
This function may move, as the currently location may not
be optimal. Location will be finalized by 1.0.0 stable release.
"""
proto = uri.split('://')[0]
uri = uri.split('://')[1]
_host = uri.split('@')[-1]
_host = _host.split(':')
if len(_host) == 2:
host = _host[0]
if '/' in _host[1]:
port = int(_host[1].split('/')[0])
else:
port = int(_host[1])
else:
host = _host[0]
if '/' in host:
host = host.split('/')[0]
port = None
if "@" in uri:
_cred = uri[0:uri.rfind(':'.join(_host)) - 1]
_cred = _cred.split(':')
if len(_cred) == 2:
_user = _cred[0]
_pass = _cred[1]
else:
_user = _cred[0]
_pass = None
else:
_user = None
_pass = None
database = uri.split('/')
if len(database) >= 2:
database = database[1]
if '?' in database:
_db = database.split('?')
database = _db[0]
args = parse_qs(_db[1], keep_blank_values = True)
else:
args = None
else:
database = None
args = None
return {
"protocol": proto,
"resource": uri,
"host": host,
"port": port,
"username": _user,
"password": _pass,
"database": database,
"args": args,
"uri": "{}://{}".format(proto, uri),
} | 3,755 |
def ITAparamsheet_itaparammatchinfo_data():
"""
ITAパラメータ抽出条件データ作成(正常系テスト用)
"""
module = import_module('web_app.models.ITA_models')
ItaParameterMatchInfo = getattr(module, 'ItaParameterMatchInfo')
ItaParameterMatchInfo(
match_id = 999,
ita_driver_id = 999,
menu_id = 999,
parameter_name = 'パラメーター名',
order = 0,
conditional_name = '条件名',
extraction_method1 = '',
extraction_method2 = '',
last_update_timestamp = datetime.datetime.now(pytz.timezone('UTC')),
last_update_user = 'pytest'
).save(force_insert=True)
yield
ItaParameterMatchInfo.objects.filter(match_id=999).delete() | 3,756 |
def pwgen(pw_len=16):
"""Generate a random password with the given length.
Allowed chars does not have "I" or "O" or letters and
digits that look similar -- just to avoid confusion.
"""
return get_random_string(
pw_len, 'abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789'
) | 3,757 |
def absolute_path(secured_filename: str, curr_file: str = __file__) -> str:
"""
Prepend `secured_filename` with the current path.
Args:
secured_filename (str): Safe file name. Can be a sub path without the first '/'.
curr_file (str): File name of the module.
Returns:
str: String which contains the full path to ``secured_filename``.
"""
return os.path.join(os.path.dirname(os.path.realpath(curr_file)), secured_filename) | 3,758 |
def test_non_fourier_params_are_consistent():
"""
Check that StandardParams, StandardWithBiasParams and
ExtendedParams give the same rotation angles, given the same data"""
p1 = StandardParams.linear_ramp_from_hamiltonian(hamiltonian, 2, time=2)
p2 = ExtendedParams.linear_ramp_from_hamiltonian(hamiltonian, 2, time=2)
p3 = StandardWithBiasParams.linear_ramp_from_hamiltonian(hamiltonian,
2, time=2)
assert np.allclose(p1.x_rotation_angles, p2.x_rotation_angles)
assert np.allclose(p2.x_rotation_angles, p3.x_rotation_angles)
assert np.allclose(p1.z_rotation_angles, p2.z_rotation_angles)
assert np.allclose(p2.z_rotation_angles, p3.z_rotation_angles)
assert np.allclose(p1.zz_rotation_angles, p2.zz_rotation_angles)
assert np.allclose(p2.zz_rotation_angles, p3.zz_rotation_angles) | 3,759 |
def compute_tree_distances(tree):
"""
Computes the matrix of pairwise distances between leaves of the tree
"""
num_leaves = len(get_leaves(tree)) - 1
distances = np.zeros([num_leaves, num_leaves])
for leaf in range(num_leaves):
distance_dictionary, tmp = nx.multi_source_dijkstra(tree.to_undirected(), [leaf], weight = 'time')
for target_leaf in range(num_leaves):
distances[leaf, target_leaf] = distance_dictionary[target_leaf]
return distances | 3,760 |
def create_rndm_backgr_selections(annotations, files, length, num, no_overlap=False, trim_table=False):
""" Create background selections of uniform length, randomly distributed across the
data set and not overlapping with any annotations, including those labelled 0.
The random sampling is performed without regard to already created background
selections. Therefore, it is in principle possible that some of the created
selections will overlap, although in practice this will only occur with very
small probability, unless the number of requested selections (num) is very
large and/or the (annotation-free part of) the data set is small in size.
To avoid any overlap, set the 'no_overlap' to True, but note that this can
lead to longer execution times.
Args:
annotations: pandas DataFrame
Annotation table.
files: pandas DataFrame
Table with file durations in seconds.
Should contain columns named 'filename' and 'duration'.
length: float
Selection length in seconds.
num: int
Number of selections to be created.
no_overlap: bool
If True, randomly selected segments will have no overlap.
trim_table: bool
Keep only the columns prescribed by the Ketos annotation format.
Returns:
table_backgr: pandas DataFrame
Output selection table.
Example:
>>> import pandas as pd
>>> import numpy as np
>>> from ketos.data_handling.selection_table import select
>>>
>>> #Ensure reproducible results by fixing the random number generator seed.
>>> np.random.seed(3)
>>>
>>> #Load and inspect the annotations.
>>> df = pd.read_csv("ketos/tests/assets/annot_001.csv")
>>> print(df)
filename start end label
0 file1.wav 7.0 8.1 1
1 file1.wav 8.5 12.5 0
2 file1.wav 13.1 14.0 1
3 file2.wav 2.2 3.1 1
4 file2.wav 5.8 6.8 1
5 file2.wav 9.0 13.0 0
>>>
>>> #Standardize annotation table format
>>> df, label_dict = standardize(df, return_label_dict=True)
>>> print(df)
start end label
filename annot_id
file1.wav 0 7.0 8.1 2
1 8.5 12.5 1
2 13.1 14.0 2
file2.wav 0 2.2 3.1 2
1 5.8 6.8 2
2 9.0 13.0 1
>>>
>>> #Enter file durations into a pandas DataFrame
>>> file_dur = pd.DataFrame({'filename':['file1.wav','file2.wav','file3.wav',], 'duration':[18.,20.,15.]})
>>>
>>> #Create randomly sampled background selection with fixed 3.0-s length.
>>> df_bgr = create_rndm_backgr_selections(df, files=file_dur, length=3.0, num=12, trim_table=True)
>>> print(df_bgr.round(2))
start end label
filename sel_id
file1.wav 0 1.06 4.06 0
1 1.31 4.31 0
2 2.26 5.26 0
file2.wav 0 13.56 16.56 0
1 14.76 17.76 0
2 15.50 18.50 0
3 16.16 19.16 0
file3.wav 0 2.33 5.33 0
1 7.29 10.29 0
2 7.44 10.44 0
3 9.20 12.20 0
4 10.94 13.94 0
"""
# compute lengths, and discard segments shorter than requested length
c = files[['filename','duration']]
if 'offset' in files.columns.names: c['offset'] = files['offset']
else: c['offset'] = 0
c.reset_index(drop=True, inplace=True)
c['length'] = c['duration'] - length
c = c[c['length'] >= 0]
# cumulative length
cs = c['length'].cumsum().values.astype(float)
cs = np.concatenate(([0],cs))
# output
filename, start, end = [], [], []
# randomply sample
df = pd.DataFrame()
while (len(df) < num):
times = np.random.random_sample(num) * cs[-1]
for t in times:
idx = np.argmax(t < cs) - 1
row = c.iloc[idx]
fname = row['filename']
start = t - cs[idx] + row['offset']
end = start + length
q = query(annotations, filename=fname, start=start, end=end)
if len(q) > 0: continue
if no_overlap and len(df) > 0:
q = query(df.set_index(df.filename), filename=fname, start=start, end=end)
if len(q) > 0: continue
x = {'start':start, 'end':end}
y = files[files['filename']==fname].iloc[0].to_dict()
z = {**x, **y}
df = df.append(z, ignore_index=True)
if len(df) == num: break
# sort by filename and offset
df = df.sort_values(by=['filename','start'], axis=0, ascending=[True,True]).reset_index(drop=True)
# re-order columns
col_names = ['filename','start','end']
if not trim_table:
names = df.columns.values.tolist()
for name in col_names: names.remove(name)
col_names += names
df = df[col_names]
df['label'] = 0 #add label
# transform to multi-indexing
df = use_multi_indexing(df, 'sel_id')
return df | 3,761 |
def to_BED(stats, manifest_or_array_type, save=True, filename='', genome_build=None, columns=None):
"""Converts & exports manifest and probe p-value dataframe to BED format.
- https://en.wikipedia.org/wiki/BED_(file_format)
- BED format: [ chromosome number | start position | end position | p-values]
Where p-values are the output from diff_meth_pos() comparing probes across two or more
groups of samples for genomic differences in methylation.
This output is required for combined-pvalues library to read and annotate manhattan plots
with the nearest Gene(s) for each significant CpG cluster.
manifest_or_array_type:
either pass in a Manifest instance from methylprep, or a string that defines which
manifest to load. One of {'27k', '450k', 'epic', 'epic+', 'mouse'}.
genome_build:
pass in 'OLD' to use the older genome build for each respective manifest array type.
note: if manifest has probes that aren't mapped to genome, they are omitted in BED file.
TODO: incorporate STRAND and OLD_STRAND in calculations.
returns a BED formatted dataframe if save is False, or the saved filename if save is True.
"""
array_types = {'27k', '450k', 'epic', 'epic+', 'mouse'}
manifest = None
if isinstance(manifest_or_array_type, str) and manifest_or_array_type not in array_types:
raise ValueError(f"Specify array type as one of: {array_types}")
if isinstance(manifest_or_array_type, str) and manifest_or_array_type in array_types:
import methylprep
manifest = methylprep.Manifest(methylprep.ArrayType(manifest_or_array_type))
if not manifest and hasattr(manifest_or_array_type, 'data_frame'):
manifest = manifest_or_array_type
if not manifest:
raise ValueError("Either provide a manifest or specify array_type.")
if not isinstance(stats, pd.DataFrame):
raise TypeError("stats should be a dataframe with either a PValue or a FDR_QValue column")
if not isinstance(manifest.data_frame, pd.DataFrame):
raise AttributeError("Expected manifest_or_array_type to be a methylprep manifest with a data_frame attribute but this does not have one.")
if "FDR_QValue" in stats:
pval = stats['FDR_QValue']
elif "PValue" in stats:
pval = stats['PValue']
else:
raise IndexError("stats did not contain either a PValue or a FDR_QValue column.")
# an unfinished, internal undocumented way to change the column names, if exactly 5 columns in list provided in same order.
if columns is None:
columns = ['chrom','chromStart','chromEnd','pvalue','name']
renamer = {}
else:
renamer = dict(zip(['chrom','chromStart','chromEnd','pvalue','name'],columns))
pval = pval.rename("pvalue")
genes = manifest_gene_map(manifest, genome_build=genome_build)
# finally, inner join and save/return the combined BED data frame.
BED = pd.merge(genes[['chrom','chromStart','chromEnd']], pval, left_index=True, right_index=True, how='inner')
BED = BED.sort_values(['chrom','chromStart'], ascending=True)
BED = BED.reset_index().rename(columns={'index':'name'})
BED = BED[['chrom','chromStart','chromEnd','pvalue','name']] # order matters, so be explicit
# omit unmapped CpGs
unmapped = len(BED[ BED['chromStart'].isna() ])
BED = BED[ ~BED['chromStart'].isna() ]
if renamer != {}:
BED = BED.rename(columns=renamer)
# cpv / combined-pvalues needs a tab-separated .bed file
timestamp = int(time.time())
if save:
if isinstance(filename, type(None)):
BED.to_csv(f"{timestamp}.bed", index=False, sep='\t')
return f"{timestamp}.bed"
if not isinstance(filename, Path):
filename = f"{filename}.bed"
# otherwise, use as is, assuming it is a complete path/filename
BED.to_csv(filename, index=False, sep='\t')
return filename
return BED | 3,762 |
def test_show_module(database_connection: mysql.connector.connect):
"""Run tests against show module"""
print("Testing wwdtm.show module")
# Start Time
start_time = time.perf_counter()
# Testing show.utility.id_exists
test_show.test_id_exists(1083, database_connection)
test_show.test_id_not_exists(-1083, database_connection)
# Testing show.utility.date_exists
test_show.test_date_exists(2006, 8, 19, database_connection)
test_show.test_date_not_exists(2006, 8, 20, database_connection)
# Testing retrieve basic show info
test_show.test_retrieve_by_id(47, database_connection)
test_show.test_retrieve_by_invalid_id(-47, database_connection)
test_show.test_retrieve_by_date(2018, 10, 27, database_connection)
test_show.test_retrieve_by_invalid_date(2018, 10, 28, database_connection)
test_show.test_retrieve_by_date_string("2007-03-24", database_connection)
test_show.test_retrieve_by_invalid_date_string("2007-03-",
database_connection)
# Testing retrieve multiple basic show info
test_show.test_retrieve_by_year(2006, database_connection)
test_show.test_retrieve_by_year_month(2006, 8, database_connection)
test_show.test_retrieve_all(database_connection)
test_show.test_retrieve_all_dates(database_connection)
test_show.test_retrieve_all_dates_tuple(database_connection)
test_show.test_retrieve_all_ids(database_connection)
test_show.test_retrieve_all_years_months(database_connection)
test_show.test_retrieve_all_years_months_tuple(database_connection)
# Testing retrieve recent basic show info
test_show.test_retrieve_recent(database_connection)
# Testing retrieve show months by year and show years
test_show.test_retrieve_months_by_year(2006, database_connection)
test_show.test_retrieve_years(database_connection)
# Testing retrieve show details
test_show.test_retrieve_details_by_id(1083, database_connection)
test_show.test_retrieve_details_by_invalid_id(-1083, database_connection)
test_show.test_retrieve_details_by_date(2018, 10, 27, database_connection)
test_show.test_retrieve_details_by_invalid_date(2018,
10,
2,
database_connection)
test_show.test_retrieve_details_by_date_string("2007-03-24",
database_connection)
test_show.test_retrieve_details_by_invalid_date_string("2007-03-02",
database_connection)
# Testing retrieve multiple show details
test_show.test_retrieve_details_by_year(2006, database_connection)
test_show.test_retrieve_details_by_year_month(2006, 12, database_connection)
test_show.test_retrieve_all_details(database_connection)
# Testing retrieve recent show details
test_show.test_retrieve_recent_details(database_connection)
# Testing retrieve show scores
test_show.test_retrieve_scores_by_year(2018, database_connection)
# Calculate time elapsed
end_time = time.perf_counter()
elapsed_time = end_time - start_time
print("Time Elapsed: {}s\n".format(round(elapsed_time, 5))) | 3,763 |
def GetPartition(partition_name, target_os):
"""Return the partition to install to.
Args:
partition_name: partition name from command-line
{'primary', 'secondary', 'other'}
target_os: 'fiberos' or 'android'
Returns:
0 or 1
Raises:
Fatal: if no partition could be determined
"""
if partition_name == 'other':
if target_os == GetOs():
boot = GetBootedPartition()
else:
boot = GetActivePartitionFromHNVRAM(target_os)
assert boot in [None, 0, 1]
if boot is None:
# Policy decision: if we're booted from NFS, install to secondary
return 1
else:
return boot ^ 1
elif partition_name in ['primary', 0]:
return 0
elif partition_name in ['secondary', 1]:
return 1
else:
raise Fatal('--partition must be one of: primary, secondary, other') | 3,764 |
def sync_code_to_masters(cluster: Cluster, dcos_checkout_dir: Path) -> None:
"""
Sync files from a DC/OS checkout to master nodes.
This syncs integration test files and bootstrap files.
This is not covered by automated tests, and it is non-trivial.
In the following instructions, running a test might look like:
`dcos-docker run pytest <test_filename>`
The manual test cases we want to work are:
* Sync a DC/OS Enterprise checkout and run a test - it should work.
* Delete a test file, sync, try to run this test file - it should fail
with "file not found".
* Add a test file, sync, try to run this test file - it should work.
* Add `assert False`, sync, to a test file and run this test file - it
should fail.
* Test bootstrap sync with no changes (a partial test that nothing
breaks):
- Sync
- `dcos-docker run systemctl restart dcos-mesos-master`
- `dcos-docker run journalctl -f -u dcos-mesos-master`
- We expect to see no assertion error.
* Test bootstrap sync with some changes
- Add `assert False` to
`packages/bootstrap/extra/dcos_internal_utils/bootstrap.py`
- `dcos-docker run systemctl restart dcos-mesos-master`
- `dcos-docker run journalctl -f -u dcos-mesos-master`
- We expect to see the assertion error.
Args:
cluster: The cluster to sync code to.
dcos_checkout_dir: The path to a DC/OS (Enterprise) checkout to sync
code from.
"""
local_packages = dcos_checkout_dir / 'packages'
local_test_dir = local_packages / 'dcos-integration-test' / 'extra'
if not Path(local_test_dir).exists():
message = (
'DCOS_CHECKOUT_DIR must be set to the checkout of a DC/OS '
'repository.\n'
'"{local_test_dir}" does not exist.'
).format(local_test_dir=local_test_dir)
raise click.BadArgumentUsage(message=message)
node_active_dir = Path('/opt/mesosphere/active')
node_test_dir = node_active_dir / 'dcos-integration-test'
node_lib_dir = node_active_dir / 'bootstrap' / 'lib'
# Different versions of DC/OS have different versions of Python.
master = next(iter(cluster.masters))
ls_result = master.run(args=['ls', str(node_lib_dir)])
python_version = ls_result.stdout.decode().strip()
node_python_dir = node_lib_dir / python_version
node_bootstrap_dir = (
node_python_dir / 'site-packages' / 'dcos_internal_utils'
)
local_bootstrap_dir = (
local_packages / 'bootstrap' / 'extra' / 'dcos_internal_utils'
)
test_tarstream = _tar_with_filter(
path=local_test_dir,
tar_filter=_cache_filter,
)
bootstrap_tarstream = _tar_with_filter(
path=local_bootstrap_dir,
tar_filter=_cache_filter,
)
node_test_py_pattern = node_test_dir / '*.py'
tar_path = '/tmp/dcos_e2e_tmp.tar'
for master in cluster.masters:
master.run(
args=['rm', '-rf', str(node_test_py_pattern)],
# We use a wildcard character, `*`, so we need shell expansion.
shell=True,
)
for tarstream, node_destination in (
(test_tarstream, node_test_dir),
(bootstrap_tarstream, node_bootstrap_dir),
):
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file.write(tarstream.getvalue())
tmp_file.flush()
master.send_file(
local_path=Path(tmp_file.name),
remote_path=Path(tar_path),
)
tar_args = ['tar', '-C', str(node_destination), '-xvf', tar_path]
master.run(args=tar_args)
master.run(args=['rm', tar_path]) | 3,765 |
def extendHeader(files):
""" Add new draft keywords. """
for filename in files:
try:
print(filename)
#open file in update mode
f = pyf.open(filename, memmap=True, ignore_missing_end=True, mode='update')
#grab primary header
hdr = f[0].header
update_version(hdr)
except:
print('Failed to open file: ', filename)
traceback.print_exc(file=sys.stdout)
continue
# Assuming this is an env var
if 'CCD_MANU' not in list(hdr.keys()):
try:
vendor = os.environ['CCD_MANU']
hdr['CCD_MANU'] = vendor
except:
print('Failed to update CCD_MANU for file: ', filename)
try:
sensor_id, imgType, testType, seqNum = get_id_and_type(filename)
if 'LSST_NUM' not in list(hdr.keys()):
hdr['LSST_NUM'] = sensor_id
print("setting LSST_NUM ", sensor_id)
if 'IMGTYPE' not in list(hdr.keys()):
hdr['IMGTYPE'] = imgType
print("setting IMGTYPE: ", imgType)
if 'TESTTYPE' not in list(hdr.keys()):
hdr['TESTTYPE'] = testType
print("setting TESTTYPE: ", testType)
if 'SEQNUM' not in list(hdr.keys()):
hdr['SEQNUM'] = seqNum
print("setting SEQNUM: ", seqNum)
except:
traceback.print_exc(file=sys.stdout)
continue
f.close()
print(filename + " done.") | 3,766 |
async def call_dialogflow(message, config, lang=DEFAULT_LANGUAGE):
"""Call the Dialogflow api and return the response."""
async with aiohttp.ClientSession() as session:
payload = {
"v": DIALOGFLOW_API_VERSION,
"lang": lang,
"sessionId": message.connector.name,
"query": message.text,
}
headers = {
"Authorization": "Bearer " + config["access-token"],
"Content-Type": "application/json",
}
resp = await session.post(
DIALOGFLOW_API_ENDPOINT, data=json.dumps(payload), headers=headers
)
result = await resp.json()
_LOGGER.info(_("Dialogflow response - %s"), json.dumps(result))
return result | 3,767 |
def gettgd(sat, eph, type=0):
""" get tgd: 0=E5a, 1=E5b """
sys = gn.sat2prn(sat)[0]
if sys == uGNSS.GLO:
return eph.dtaun * rCST.CLIGHT
else:
return eph.tgd[type] * rCST.CLIGHT | 3,768 |
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype) | 3,769 |
def ones_v(n):
"""
Return the column vector of ones of length n.
"""
return matrix(1, (n,1), 'd') | 3,770 |
def test_encrypt_and_decrypt_two(benchmark: BenchmarkFixture) -> None:
"""Benchmark encryption and decryption run together."""
primitives.decrypt = pysodium.crypto_aead_xchacha20poly1305_ietf_decrypt
primitives.encrypt = pysodium.crypto_aead_xchacha20poly1305_ietf_encrypt
def encrypt_and_decrypt() -> bytes:
token = version2.encrypt(MESSAGE, KEY, FOOTER)
return version2.decrypt(token, KEY, FOOTER)
plain_text = benchmark(encrypt_and_decrypt)
assert plain_text == MESSAGE | 3,771 |
def truncate(fh, length):
"""Implementation of perl $fh->truncate method"""
global OS_ERROR, TRACEBACK, AUTODIE
try:
if hasattr(fh, 'truncate'):
fh.truncate(length)
else:
os.truncate(fh, length)
return True
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
if isinstance(fh, str):
cluck(f"truncate({fh}, {length}) failed: {OS_ERROR}",skip=2)
else:
cluck(f"truncate to {length} failed: {OS_ERROR}",skip=2)
if AUTODIE:
raise
return None | 3,772 |
def get_fy_parent_nucl(fy_lib):
"""Gets the list of fission parents from a fission yield dictionnary.
Parameters
----------
fy_lib: dict
A fission yield dictionnary
"""
fy_nucl = get_fy_nucl(fy_lib)
fy_parent = []
sample_zamid = fy_nucl[0]
sample = fy_lib[sample_zamid]
for fission_parent in sample:
fy_parent.append(fission_parent)
return fy_parent | 3,773 |
def block_input():
"""Block all user input unconditionally.
:command: `BlockInput
<https://www.autohotkey.com/docs/commands/BlockInput.htm>`_
"""
ahk_call("BlockInput", "On")
yield
ahk_call("BlockInput", "Off") | 3,774 |
def initialize ( is_test, no_cam ) :
"""job machine Tableをもとに個体、世代の初期設定"""
from functools import partial
jmTable = getJmTable ( is_test )
MAX_JOBS = jmTable.getJobsCount()
MAX_MACHINES = jmTable.getMachinesCount()
# makespan最小化
creator.create ( "FitnessMin", base.Fitness, weights=(-1.0,) )
# 個体はジョブ番号のリスト
#creator.create ( "Individual", list, fitness=creator.FitnessMin )
creator.create ( "Individual", array.array, typecode='b', fitness=creator.FitnessMin ) # 'b' is signed char
toolbox = base.Toolbox()
# ゼロからMAX_MACHINES未満までがMAX_JOBS回ランダムに並ぶ個体と設定
gen_ind = partial ( initIndividual, MAX_JOBS, MAX_MACHINES )
toolbox.register ( "individual", tools.initIterate, creator.Individual, gen_ind )
# 初期世代を生成する関数を登録、初期世代はIndividualのリストとして設定
toolbox.register ( "population", tools.initRepeat, list, toolbox.individual )
# 評価関数を登録
toolbox.register ( "evaluate", schedule.eval, jmTable )
# 交叉関数を登録
toolbox.register ( "mate", schedule.crossover )
# 突然変異を登録
toolbox.register ( "mutate", schedule.mutation )
# ルーレット選択を登録
toolbox.register ( "select", tools.selRoulette )
# 置換操作を登録
if no_cam :
# 通常の置換操作
toolbox.register ( "getArgWorst", schedule.getArgWorst )
else :
# クラスタ平均法(CAM)による置換操作
toolbox.register ( "getArgWorst", schedule.getArgWorstCAM )
return toolbox, jmTable | 3,775 |
def run():
"""Unleash payments in USA."""
(AddonExcludedRegion.objects
.exclude(addon__premium_type=amo.ADDON_FREE)
.filter(region=mkt.regions.US.id).delete()) | 3,776 |
def perform_variants_query(job, **kwargs):
"""Query for variants.
:param job: API to interact with the owner of the variants.
:type job: :class:`cibyl.sources.zuul.transactions.JobResponse`
:param kwargs: See :func:`handle_query`.
:return: List of retrieved variants.
:rtype: list[:class:`cibyl.sources.zuul.transactions.VariantResponse`]
"""
return job.variants().get() | 3,777 |
def get_model(share_weights=False, upsample=False): # pylint: disable=too-many-statements
""" Return a network dict for the model """
block0 = [{'conv1_1': [3, 64, 3, 1, 1]},
{'conv1_2': [64, 64, 3, 1, 1]}, {'pool1_stage1': [2, 2, 0]},
{'conv2_1': [64, 128, 3, 1, 1]},
{'conv2_2': [128, 128, 3, 1, 1]}, {'pool2_stage1': [2, 2, 0]},
{'conv3_1': [128, 256, 3, 1, 1]},
{'conv3_2': [256, 256, 3, 1, 1]},
{'conv3_3': [256, 256, 3, 1, 1]},
{'conv3_4': [256, 256, 3, 1, 1]}, {'pool3_stage1': [2, 2, 0]},
{'conv4_1': [256, 512, 3, 1, 1]},
{'conv4_2': [512, 512, 3, 1, 1]}]
if share_weights:
print("defining network with shared weights")
network_dict = get_shared_network_dict()
else:
network_dict = get_network_dict()
def define_base_layers(block, layer_size):
layers = []
for i in range(layer_size):
one_ = block[i]
for k, v in zip(one_.keys(), one_.values()):
if 'pool' in k:
layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])]
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d, nn.ReLU(inplace=True)]
return layers
def define_stage_layers(cfg_dict):
layers = define_base_layers(cfg_dict, len(cfg_dict) - 1)
one_ = cfg_dict[-1].keys()
k = list(one_)[0]
v = cfg_dict[-1][k]
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
layers += [conv2d]
return nn.Sequential(*layers)
# create all the layers of the model
base_layers = define_base_layers(block0, len(block0))
pre_stage_layers = define_base_layers(network_dict['block_pre_stage'], len(network_dict['block_pre_stage']))
blocks = {'block0': nn.Sequential(*base_layers),
'block_pre_stage': nn.Sequential(*pre_stage_layers)}
if share_weights:
shared_layers_s1 = define_base_layers(network_dict['block1_shared'], len(network_dict['block1_shared']))
shared_layers_s2 = define_base_layers(network_dict['block2_shared'], len(network_dict['block2_shared']))
blocks['block1_shared'] = nn.Sequential(*shared_layers_s1)
blocks['block2_shared'] = nn.Sequential(*shared_layers_s2)
for k, v in zip(network_dict.keys(), network_dict.values()):
if 'shared' not in k and 'pre_stage' not in k:
blocks[k] = define_stage_layers(v)
class PoseModel(nn.Module):
""" Pose Model class """
def __init__(self, model_dict, upsample=False):
super(PoseModel, self).__init__()
self.upsample = upsample
self.basemodel = model_dict['block0']
self.pre_stage = model_dict['block_pre_stage']
if share_weights:
self.stage1_shared = model_dict['block1_shared']
self.stage1_1 = model_dict['block1_1']
self.stage2_1 = model_dict['block2_1']
# self.stage3_1 = model_dict['block3_1']
# self.stage4_1 = model_dict['block4_1']
# self.stage5_1 = model_dict['block5_1']
# self.stage6_1 = model_dict['block6_1']
if share_weights:
self.stage2_shared = model_dict['block2_shared']
self.stage1_2 = model_dict['block1_2']
self.stage2_2 = model_dict['block2_2']
# self.stage3_2 = model_dict['block3_2']
# self.stage4_2 = model_dict['block4_2']
# self.stage5_2 = model_dict['block5_2']
# self.stage6_2 = model_dict['block6_2']
def forward(self, *inputs):
out1_vgg = self.basemodel(inputs[0])
out1 = self.pre_stage(out1_vgg)
if share_weights:
out1_shared = self.stage1_shared(out1)
else:
out1_shared = out1
out1_1 = self.stage1_1(out1_shared)
out1_2 = self.stage1_2(out1_shared)
out2 = torch.cat([out1_1, out1_2, out1], 1)
if share_weights:
out2_shared = self.stage2_shared(out2)
else:
out2_shared = out2
out2_1 = self.stage2_1(out2_shared)
out2_2 = self.stage2_2(out2_shared)
# out3 = torch.cat([out2_1, out2_2, out1], 1)
# out3_1 = self.stage3_1(out3)
# out3_2 = self.stage3_2(out3)
# out4 = torch.cat([out3_1, out3_2, out1], 1)
#
# out4_1 = self.stage4_1(out4)
# out4_2 = self.stage4_2(out4)
# out5 = torch.cat([out4_1, out4_2, out1], 1)
#
# out5_1 = self.stage5_1(out5)
# out5_2 = self.stage5_2(out5)
# out6 = torch.cat([out5_1, out5_2, out1], 1)
#
# out6_1 = self.stage6_1(out6)
# out6_2 = self.stage6_2(out6)
if self.upsample:
# parameters to check for up-sampling: align_corners = True, mode='nearest'
upsampler = nn.Upsample(scale_factor=2, mode='bilinear')
out2_1_up = upsampler(out2_1)
out2_2_up = upsampler(out2_2)
return out1_1, out1_2, out2_1, out2_2, out2_1_up, out2_2_up
return out1_1, out1_2, out2_1, out2_2
model = PoseModel(blocks, upsample=upsample)
return model | 3,778 |
def bootstrap(config_uri, request=None, options=None):
""" Load a WSGI application from the PasteDeploy config file specified
by ``config_uri``. The environment will be configured as if it is
currently serving ``request``, leaving a natural environment in place
to write scripts that can generate URLs and utilize renderers.
This function returns a dictionary with ``app``, ``root``, ``closer``,
``request``, and ``registry`` keys. ``app`` is the WSGI app loaded
(based on the ``config_uri``), ``root`` is the traversal root resource
of the Pyramid application, and ``closer`` is a parameterless callback
that may be called when your script is complete (it pops a threadlocal
stack).
.. note::
Most operations within :app:`Pyramid` expect to be invoked within the
context of a WSGI request, thus it's important when loading your
application to anchor it when executing scripts and other code that is
not normally invoked during active WSGI requests.
.. note::
For a complex config file containing multiple :app:`Pyramid`
applications, this function will setup the environment under the context
of the last-loaded :app:`Pyramid` application. You may load a specific
application yourself by using the lower-level functions
:meth:`pyramid.paster.get_app` and :meth:`pyramid.scripting.prepare` in
conjunction with :attr:`pyramid.config.global_registries`.
``config_uri`` -- specifies the PasteDeploy config file to use for the
interactive shell. The format is ``inifile#name``. If the name is left
off, ``main`` will be assumed.
``request`` -- specified to anchor the script to a given set of WSGI
parameters. For example, most people would want to specify the host,
scheme and port such that their script will generate URLs in relation
to those parameters. A request with default parameters is constructed
for you if none is provided. You can mutate the request's ``environ``
later to setup a specific host/port/scheme/etc.
``options`` Is passed to get_app for use as variable assignments like
{'http_port': 8080} and then use %(http_port)s in the
config file.
See :ref:`writing_a_script` for more information about how to use this
function.
"""
app = get_app(config_uri, options=options)
env = prepare(request)
env['app'] = app
return env | 3,779 |
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all rasa x parsers.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
x_parser_args["help"] = (
"Run a Rasa server in a mode that enables connecting "
"to Rasa Enterprise as the config endpoint."
)
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser) | 3,780 |
def readSegy(filename) :
"""
Data,SegyHeader,SegyTraceHeaders=getSegyHeader(filename)
"""
printverbose("readSegy : Trying to read "+filename,0)
data = open(filename).read()
filesize=len(data)
SH=getSegyHeader(filename)
bps=getBytePerSample(SH)
ntraces = (filesize-3600)/(SH['ns']*bps+240)
# ntraces = 100
printverbose("readSegy : Length of data : " + str(filesize),2)
SH["ntraces"]=ntraces;
ndummy_samples=240/bps
printverbose("readSegy : ndummy_samples="+str(ndummy_samples),6)
printverbose("readSegy : ntraces=" + str(ntraces) + " nsamples="+str(SH['ns']),2)
# GET TRACE
index=3600;
nd=(filesize-3600)/bps
# READ ALL SEGY TRACE HEADRES
SegyTraceHeaders = getAllSegyTraceHeaders(SH,data)
printverbose("readSegy : reading segy data",2)
# READ ALL DATA EXCEPT FOR SEGY HEADER
#Data = zeros((SH['ns'],ntraces))
revision=SH["SegyFormatRevisionNumber"]
if (revision==100):
revision=1
dsf=SH["DataSampleFormat"]
DataDescr=SH_def["DataSampleFormat"]["descr"][revision][dsf]
printverbose("readSegy : SEG-Y revision = "+str(revision),1)
printverbose("readSegy : DataSampleFormat="+str(dsf)+"("+DataDescr+")",1)
if (SH["DataSampleFormat"]==1):
printverbose("readSegy : Assuming DSF=1, IBM FLOATS",2)
Data1 = getValue(data,index,'ibm',endian,nd)
elif (SH["DataSampleFormat"]==2):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 32bit INT",2)
Data1 = getValue(data,index,'l',endian,nd)
elif (SH["DataSampleFormat"]==3):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 16bit INT",2)
Data1 = getValue(data,index,'h',endian,nd)
elif (SH["DataSampleFormat"]==5):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", IEEE",2)
Data1 = getValue(data,index,'float',endian,nd)
elif (SH["DataSampleFormat"]==8):
printverbose("readSegy : Assuming DSF=" + str(SH["DataSampleFormat"]) + ", 8bit CHAR",2)
Data1 = getValue(data,index,'B',endian,nd)
else:
printverbose("readSegy : DSF=" + str(SH["DataSampleFormat"]) + ", NOT SUPORTED",2)
Data = Data1[0]
printverbose("readSegy : - reshaping",2)
Data=reshape(Data,(ntraces,SH['ns']+ndummy_samples))
printverbose("readSegy : - stripping header dummy data",2)
Data=Data[:,ndummy_samples:(SH['ns']+ndummy_samples)]
printverbose("readSegy : - transposing",2)
Data=transpose(Data)
# SOMEONE NEEDS TO IMPLEMENT A NICER WAY DO DEAL WITH DSF=8
if (SH["DataSampleFormat"]==8):
for i in arange(ntraces):
for j in arange(SH['ns']):
if Data[i][j]>128:
Data[i][j]=Data[i][j]-256
printverbose("readSegy : read data",2)
return Data,SH,SegyTraceHeaders | 3,781 |
def chdir(request):
"""Reset CWD directory:
1. At start of test the CWD will be in the directory that
contains the test file.
2. After test completes the CWD will be reset to the CWD
before the test started.
"""
oldcwd = pathlib.Path.cwd()
request.fspath.dirpath().chdir()
def reset():
os.chdir(oldcwd)
request.addfinalizer(reset) | 3,782 |
def planar_transform(imgs, masks, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a):
"""transforms imgs, masks and computes dmaps according to planar transform.
Args:
imgs: are L X [...] X C, typically RGB images per layer
masks: L X [...] X 1, indicating which layer pixels are valid
pixel_coords_trg: [...] X H_t X W_t X 3;
pixel (u,v,1) coordinates of target image pixels.
k_s: intrinsics for source cameras, are [...] X 3 X 3 matrices
k_t: intrinsics for target cameras, are [...] X 3 X 3 matrices
rot: relative rotation, are [...] X 3 X 3 matrices
t: [...] X 3 X 1, translations from source to target camera
n_hat: L X [...] X 1 X 3, plane normal w.r.t source camera frame
a: L X [...] X 1 X 1, plane equation displacement
Returns:
imgs_transformed: L X [...] X C images in trg frame
masks_transformed: L X [...] X 1 masks in trg frame
dmaps_trg: L X [...] X 1, indicating per pixel inverse depth
Assumes the first dimension corresponds to layers.
"""
with tf.name_scope('planar_transform'):
n_layers = imgs.get_shape().as_list()[0]
rot_rep_dims = [n_layers]
rot_rep_dims += [1 for _ in range(len(k_s.get_shape()))]
cds_rep_dims = [n_layers]
cds_rep_dims += [1 for _ in range(len(pixel_coords_trg.get_shape()))]
k_s = tf.tile(tf.expand_dims(k_s, axis=0), rot_rep_dims)
k_t = tf.tile(tf.expand_dims(k_t, axis=0), rot_rep_dims)
t = tf.tile(tf.expand_dims(t, axis=0), rot_rep_dims)
rot = tf.tile(tf.expand_dims(rot, axis=0), rot_rep_dims)
pixel_coords_trg = tf.tile(
tf.expand_dims(pixel_coords_trg, axis=0), cds_rep_dims)
ndims_img = len(imgs.get_shape())
imgs_masks = tf.concat([imgs, masks], axis=ndims_img - 1)
imgs_masks_trg = homography.transform_plane_imgs(
imgs_masks, pixel_coords_trg, k_s, k_t, rot, t, n_hat, a)
imgs_trg, masks_trg = tf.split(imgs_masks_trg, [3, 1], axis=ndims_img - 1)
dmaps_trg = homography.trg_disp_maps(pixel_coords_trg, k_t, rot, t, n_hat,
a)
return imgs_trg, masks_trg, dmaps_trg | 3,783 |
def writeRLE(grid, rule):
"""
Writes grid out to a text file in RLE format
"""
if not os.path.exists("saved-RLEs"): os.mkdir("saved-RLEs")
filename = unique_file("saved-RLEs/RLEfile", "rle")
f = open(filename, "w")
top, bot, minCol, maxCol = findBoundaries(grid)
#write x,y header
f.write('x = {}, y = {}, rule = {}\n'.format(str(maxCol - minCol + 1), str(bot - top + 1), rule))
RLEgroups = encodeGrid(grid, top, bot, minCol, maxCol)
finishedWriting = False
allLines = []
individualLine = ''
pos = 0
#write grid with 70 character lines
while finishedWriting == False:
if (RLEgroups[pos][1] == 1):
#single cell
if (1 + len(individualLine) > 70):
#new line
individualLine += '\n'
f.write(individualLine)
individualLine = RLEgroups[pos][0]
else:
#same line
individualLine += RLEgroups[pos][0]
else:
if (len(str(RLEgroups[pos][1])) + len(individualLine) + 1 > 70):
#new line
individualLine += '\n'
f.write(individualLine)
individualLine = str(RLEgroups[pos][1]) + RLEgroups[pos][0]
else:
#same line
individualLine += str(RLEgroups[pos][1]) + RLEgroups[pos][0]
if (pos == len(RLEgroups) - 1):
f.write(individualLine)
finishedWriting = True
else:
pos += 1
f.close()
print('Done! RLE info saved to {}'.format(filename)) | 3,784 |
def convertDynamicRenderStates(data, builder):
"""
Converts dynamic render states. The data map is expected to contain the following elements:
- lineWidth: float width for the line. Defaults to 1.
- depthBiasConstantFactor: float value for the depth bias constant factor. Defaults to 0.
- depthBiasClamp: float value for the depth bias clamp. Defaults to 0.
- depthBiasSlopeFactor: float value for the depth bias slope factor. Defaults to 0.
- blendConstants: array of 4 floats for the blend color. Defaults to [0, 0, 0, 0].
- depthBounds: array of 2 floats for the min and max depth value. Defaults to [0, 1].
- stencilCompareMask: int compare mask for both the front and back stencil. Defaults to
0xFFFFFFFF.
- frontStencilCompareMask: int compare mask for just the front stencil.
- backStencilCompareMask: int compare mask for just the back stencil.
- stencilWriteMask: int write mask for both the front and back stencil. Defaults to 0.
- frontStencilWriteMask: int write mask for just the front stencil.
- backStencilWriteMask: int write mask for just the back stencil.
- stencilReference: int reference for both the front and back stencil. Defaults to 0.
- frontStencilReference: int reference for just the front stencil.
- backStencilReference: int reference for just the back stencil.
"""
def readFloat(value, name):
try:
return float(value)
except:
raise Exception('Invalid ' + name + ' float value "' + str(value) + '".')
def readUInt(value, name):
try:
intVal = int(value)
if intVal < 0:
raise Exception()
return intVal
except:
raise Exception('Invalid ' + name + ' unsigned int value "' + str(value) + '".')
lineWidth = readFloat(data.get('lineWidth', 1.0), 'line width')
depthBiasConstantFactor = readFloat(data.get('depthBiasConstantFactor', 0.0),
'depth bias constant factor')
depthBiasClamp = readFloat(data.get('depthBiasClamp', 0.0), 'depth bias clamp')
depthBiasSlopeFactor = readFloat(data.get('depthBiasSlopeFactor', 0.0),
'depth bias slope factor')
colorValue = data.get('blendConstants', [0.0, 0.0, 0.0, 0.0])
try:
if len(colorValue) != 4:
raise Exception()
except:
raise Exception('Blend constants value must be an array of 4 floats.')
blendConstants = []
for c in colorValue:
blendConstants.append(readFloat(c, 'blend constant'))
depthBoundsValue = data.get('depthBounds', [0.0, 1.0])
try:
if len(depthBoundsValue) != 2:
raise Exception()
except:
raise Exception('Depth bounds value must be an array of 2 floats.')
depthBounds = []
for b in depthBoundsValue:
depthBounds.append(readFloat(b, 'depth bounds'))
stencilCompareMask = data.get('stencilCompareMask', 0xFFFFFFFF)
frontStencilCompareMask = readUInt(data.get('frontStencilCompareMask', stencilCompareMask),
'stencil compare mask')
backStencilCompareMask = readUInt(data.get('backStencilCompareMask', stencilCompareMask),
'stencil compare mask')
stencilWriteMask = data.get('stencilWriteMask', 0)
frontStencilWriteMask = readUInt(data.get('frontStencilWriteMask', stencilWriteMask),
'stencil write mask')
backStencilWriteMask = readUInt(data.get('backStencilWriteMask', stencilWriteMask),
'stencil write mask')
stencilReference = data.get('stencilReference', 0)
frontStencilReference = readUInt(data.get('frontStencilReference', stencilReference),
'stencil reference')
backStencilReference = readUInt(data.get('backStencilReference', stencilReference),
'stencil reference')
DynamicRenderStates.Start(builder)
DynamicRenderStates.AddLineWidth(builder, lineWidth)
DynamicRenderStates.AddDepthBiasConstantFactor(builder, depthBiasConstantFactor)
DynamicRenderStates.AddDepthBiasClamp(builder, depthBiasClamp)
DynamicRenderStates.AddDepthBiasSlopeFactor(builder, depthBiasSlopeFactor)
DynamicRenderStates.AddBlendConstants(builder, CreateColor4f(builder, *blendConstants))
DynamicRenderStates.AddDepthBounds(builder, CreateVector2f(builder, *depthBounds))
DynamicRenderStates.AddFrontStencilCompareMask(builder, frontStencilCompareMask)
DynamicRenderStates.AddBackStencilCompareMask(builder, backStencilCompareMask)
DynamicRenderStates.AddFrontStencilWriteMask(builder, frontStencilWriteMask)
DynamicRenderStates.AddBackStencilWriteMask(builder, backStencilWriteMask)
DynamicRenderStates.AddFrontStencilReference(builder, frontStencilReference)
DynamicRenderStates.AddBackStencilReference(builder, backStencilReference)
return DynamicRenderStates.End(builder) | 3,785 |
def binary_search(a, search_value):
"""
@name binary_search
@param a array
"""
N = len(a)
l = 0
r = len(a) - 1
while(True):
try:
result = binary_search_iteration(a, l, r, search_value)
l, r = result
except TypeError:
return -1 if not result else result | 3,786 |
def touch(filepath):
"""
Creates an empty file at filepath, if it does not already exist
"""
with open(filepath, 'a'):
pass | 3,787 |
def _key_match(d1: Dict[str, Any], d2: Dict[str, Any], key: str) -> bool:
"""
>>> _key_match({"a": 1}, {"a": 2}, "a")
False
>>> _key_match({"a": 1}, {"a": 2}, "b")
True
>>> _key_match({"a": 2}, {"a": 1}, "a")
False
>>> _key_match({"a": 1}, {"a": 1}, "a")
True
>>> _key_match({"a": 2}, {"b": 1}, "a")
False
>>> _key_match({"b": 2}, {"a": 1}, "a")
False
"""
try:
return (key not in d1 and key not in d2) or cast(bool, d1[key] == d2[key])
except KeyError:
return False | 3,788 |
def calculate(over):
"""Returns the value of the first triangle number to have
over the specified number of divisors"""
triangle = 0
count = sum(range(triangle))
while True:
if num_divisors(count) > over:
answer = count
return answer
triangle += 1
count = sum(range(triangle)) | 3,789 |
def convert_image_to_kernel(im: Image, oversampling, kernelwidth):
""" Convert an image to a griddata kernel
:param im: Image to be converted
:param oversampling: Oversampling of Image spatially
:param kernelwidth: Kernel width to be extracted
:return: numpy.ndarray[nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]
"""
naxis = len(im.shape)
assert numpy.max(numpy.abs(im.data)) > 0.0, "Image is empty"
nchan, npol, ny, nx = im.shape
assert nx % oversampling == 0, "Oversampling must be even"
assert ny % oversampling == 0, "Oversampling must be even"
assert kernelwidth < nx and kernelwidth < ny, "Specified kernel width %d too large"
assert im.wcs.wcs.ctype[0] == 'UU', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[0]
assert im.wcs.wcs.ctype[1] == 'VV', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[1]
newwcs = WCS(naxis=naxis + 2)
for axis in range(2):
newwcs.wcs.ctype[axis] = im.wcs.wcs.ctype[axis]
newwcs.wcs.crpix[axis] = kernelwidth // 2
newwcs.wcs.crval[axis] = 0.0
newwcs.wcs.cdelt[axis] = im.wcs.wcs.cdelt[axis] * oversampling
newwcs.wcs.ctype[axis + 2] = im.wcs.wcs.ctype[axis]
newwcs.wcs.crpix[axis + 2] = oversampling // 2
newwcs.wcs.crval[axis + 2] = 0.0
newwcs.wcs.cdelt[axis + 2] = im.wcs.wcs.cdelt[axis]
# Now do Stokes and Frequency
newwcs.wcs.ctype[axis + 4] = im.wcs.wcs.ctype[axis + 2]
newwcs.wcs.crpix[axis + 4] = im.wcs.wcs.crpix[axis + 2]
newwcs.wcs.crval[axis + 4] = im.wcs.wcs.crval[axis + 2]
newwcs.wcs.cdelt[axis + 4] = im.wcs.wcs.cdelt[axis + 2]
newdata_shape = [nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]
newdata = numpy.zeros(newdata_shape, dtype=im.data.dtype)
assert oversampling * kernelwidth < ny
assert oversampling * kernelwidth < nx
ystart = ny // 2 - oversampling * kernelwidth // 2
xstart = nx // 2 - oversampling * kernelwidth // 2
yend = ny // 2 + oversampling * kernelwidth // 2
xend = nx // 2 + oversampling * kernelwidth // 2
for chan in range(nchan):
for pol in range(npol):
for y in range(oversampling):
slicey = slice(yend + y, ystart + y, -oversampling)
for x in range(oversampling):
slicex = slice(xend + x, xstart + x, -oversampling)
newdata[chan, pol, y, x, ...] = im.data[chan, pol, slicey, slicex]
return create_image_from_array(newdata, newwcs, polarisation_frame=im.polarisation_frame) | 3,790 |
def groupby(keys: Iterable, *arrays) -> Iterator[tuple]:
"""Generate unique keys with associated groups.
Args:
keys:
*arrays (Iterable):
"""
arrays = tuple(map(np.asarray, arrays))
try:
items = _arggroupby(asiarray(keys))
except TypeError: # fallback to sorting
items = arggroupby(keys)
for key, values in items:
yield (key,) + tuple(arr[values] for arr in arrays) | 3,791 |
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob | 3,792 |
def test_warped_vrt_dimensions(path_rgb_byte_tif):
"""
A WarpedVRT with target dimensions has the expected dataset
properties.
"""
with rasterio.open(path_rgb_byte_tif) as src:
extent = (-20037508.34, 20037508.34)
size = (2 ** 16) * 256
resolution = (extent[1] - extent[0]) / size
dst_transform = affine.Affine(
resolution, 0.0, extent[0], 0.0, -resolution, extent[1]
)
vrt = WarpedVRT(
src, crs=DST_CRS, width=size, height=size, transform=dst_transform
)
assert vrt.dst_crs == CRS.from_string(DST_CRS)
assert vrt.src_nodata == 0.0
assert vrt.dst_nodata == 0.0
assert vrt.resampling == Resampling.nearest
assert vrt.width == size
assert vrt.height == size
assert vrt.transform == dst_transform
assert vrt.warp_extras == {"init_dest": "NO_DATA"} | 3,793 |
def get_similar_genes_Quantiles(
gene_expr: np.array,
n_genes: int,
candidate_quants: np.ndarray,
candidate_genes: np.array,
quantiles=(0.5, 0.75, 0.85, 0.9, 0.95, 0.97, 0.98, 0.99, 1),
):
"""Gets genes with a similar expression distribution as the inputted gene,
by measuring distance between the gene expression quantiles.
Parameters
----------
gene_expr: np.array Expression of the gene of interest, or, if the same length as quantiles, then assumes is the pre-calculated quantiles.
n_genes: int Number of equivalent genes to select.
candidate_quants: np.ndarray Expression quantiles of gene candidates (quantiles*genes).
candidate_genes: np.array Same as candidate_expr.shape[1], indicating gene names.
quantiles: tuple The quantile to use
Returns
-------
similar_genes: np.array Array of strings for gene names.
"""
if type(quantiles) == float:
quantiles = np.array([quantiles])
else:
quantiles = np.array(quantiles)
# Getting the quantiles for the gene #
if len(gene_expr) != len(quantiles):
# ref_quants = np.quantile(gene_expr, q=quantiles, interpolation='nearest')
ref_quants = nonzero_quantile(gene_expr, q=quantiles, interpolation="nearest")
else:
ref_quants = gene_expr
# Measuring distances from the desired gene #
dists = np.apply_along_axis(canberra, 0, candidate_quants, ref_quants)
order = np.argsort(dists)
""" During debugging, plotting distribution of distances & selected genes.
import matplotlib.pyplot as plt
cutoff = dists[order[n_genes]]
fig, ax = plt.subplots()
ax.hist(dists[order[0:28000]], bins=1000)
y_max = ax.get_ylim()[1]
ax.vlines(cutoff, 0, y_max/2, color='r')
plt.show()
print(candidate_quants[:,order[0:3]]) # Showing the quantiles of selected
print(candidate_quants[:,order[n_genes-3:n_genes]])
print(ref_quants)
"""
# Retrieving desired number of genes #
similar_genes = candidate_genes[order[0:n_genes]]
return similar_genes | 3,794 |
def get_date_strings():
"""
Get date strings for last month and this month in "%Y%m" format, e.g. "202201"
"""
today = date.today()
first = today.replace(day=1)
last_month = first - timedelta(days=1)
this_month_string = today.strftime("%Y%m")
last_month_string = last_month.strftime("%Y%m")
return this_month_string, last_month_string | 3,795 |
def get_gpa(cookie, sno, year='', term=''):
"""
获取已取得的总基点: 专必 公必 公选 专选
"""
logging.debug('Getting gpa: %s %s %s %s', sno, year, term, cookie)
url = 'http://uems.sysu.edu.cn/jwxt/xscjcxAction/xscjcxAction.action?method=getAllJd'
query_json = """
{
header: {
"code": -100,
"message": {
"title": "",
"detail": ""
}
},
body: {
dataStores: {
jdStore: {
rowSet: {
"primary": [],
"filter": [],
"delete": []
},
name: "jdStore",
pageNumber: 1,
pageSize: 2147483647,
recordCount: 0,
rowSetName: "pojo_com.neusoft.education.sysu.djks.ksgl.model.TwoColumnModel"
}
},
parameters: {
"args": [
"%s",
"%s",
"%s",
""
]
}
}
}
""" %(sno, year, term)
return retrive_data(url, cookie, query_json) | 3,796 |
def get_status(conf):
"""
Find out in what state we are, ie. what steps have been done, etc
- are there unjudged terms? update with judgements from the file (if existings)
- if all terms are judged, we can proceed to the next step, so we set the new seed terms
"""
conn, domain, model = conf['db'], conf['domain'], conf['model']
dbc = conn.cursor()
# 1. which is the last step
dbc.execute("SELECT MAX(step) FROM data WHERE domain='%s' AND model='%s'" % (domain, model))
max_step = dbc.fetchone()[0]
conf['step'] = max_step
print "current max_step", max_step
# see if there are unjudged terms in DB?
dbc.execute("SELECT COUNT(*) FROM data WHERE domain='%s' AND model='%s' AND relevant IS NULL" % (domain, model))
c = dbc.fetchone()[0]
if c>0:
file_terms = fs_helpers.load_judgements_from_fs(conf)
# step 1 # construct lists of relevant and not relevant-terms and update data table
pos_terms, neg_terms, num_missing = update_data_table(conf, file_terms)
# step 2 # insert into checked_terms table
save_checked_terms(conf, pos_terms, neg_terms)
# are there still unjudged terms? # TODO ? check in DB?
fn = "../data/to_check/" + conf['model'] + "step_" + str(conf['step']) + ".CHECK_ME"
if (num_missing>0):
print "\nTHERE ARE TERMS IN THE TABLE WITHOUT JUDGEMENT -- set relevance\n"
print "See file:", fn, "\n"
sys.exit()
# everything done for this step
if max_step == 3:
print "\n\nstep 3 and everything judged -- we are finished"
print "\n\nlet's try to create a taxonomy:"
generate_taxonomy(conf)
sys.exit()
# get current terms
dbc.execute("SELECT term FROM data WHERE domain='%s' AND model='%s' AND relevant=1" % (domain, model))
rows = dbc.fetchall()
current_terms = [row[0] for row in rows]
print "current_terms", current_terms
### set current seed terms -- for the next iteration!
conf['seeds'] = current_terms | 3,797 |
def hosts_disable_all():
"""
status de host 0 = enabled
status de host 1 = disabled
"""
logger.info('Disabling all hosts, in blocks of 1000')
hosts = zapi.host.get(output=[ 'hostid' ], search={ 'status': 0 })
maxval = int(ceil(hosts.__len__())/1000+1)
bar = ProgressBar(maxval=maxval,widgets=[Percentage(), ReverseBar(), ETA(), RotatingMarker(), Timer()]).start()
i = 0
for i in xrange(maxval):
block = hosts[:1000]
del hosts[:1000]
result = zapi.host.massupdate(hosts=[ x for x in block ], status=1)
i += 1
bar.update(i)
bar.finish()
logger.info('Done')
return | 3,798 |
def run_vep_annotator(vep_data: str, vcf_path: str, out_path: str, fasta: str, vep_custom: Union[str,list]=None, overwrite: bool=False, vep_n_fork: int=4):
"""
Run variant ensembl predictor alone with custom options. See options details at
https://www.ensembl.org/info/docs/tools/vep/script/vep_options.html#opt_af
Parameters
---------
vep_data: str
path to the .vep data where the reference genome is located
vcf_path: str
path to the vcf file
out_path: str
path where output should be saved
fasta: str
relative path to fasta file from vep_folder
vep_custom: str or list
additional options to add to the vep cmd. For instance
'~/.vep/custom/ClinVar/clinvar.vcf.gz,ClinVar,vcf,exact,0,CLNSIG,CLNREVSTAT,CLNDN'
overwrite: bool, optional.
if the output file already exists (from previous run), should it be overwritten?
vep_n_fork: int, optional.
number of forks to be used when running VEP.
"""
vep_path = os.path.normpath(os.path.join(__file__, "../../tools/ensembl-vep/vep"))
need_run = True
if os.path.exists(out_path) and not overwrite:
need_run = False
if need_run:
print("STATUS: RUNNING VEP")
if os.path.exists(out_path):
os.remove(out_path)
print("removed existing file: %s" % out_path)
cmd = """%s \
--dir %s \
--af \
--af_gnomad \
--af_esp \
--clin_sig_allele 0 \
--max_af \
--af_1k \
--no_progress \
--no_stats \
--appris \
--biotype \
--buffer_size 500 \
--canonical \
--ccds \
--check_existing \
--distance 5000 \
--hgvs \
--fork %s \
--numbers \
--mane \
--pick \
--polyphen b \
--protein \
--pubmed \
--regulatory \
--sift b \
--species homo_sapiens \
--symbol \
--transcript_version \
--tsl \
--uniprot \
--input_file %s \
--output_file %s \
--fasta %s \
--cache \
--offline """ % (vep_path, vep_data, vep_n_fork, vcf_path, out_path, fasta)
if vep_custom is not None:
if type(vep_custom) == list:
for v_custom in vep_custom:
cmd += "--custom %s " % v_custom
elif type(vep_custom) == str:
cmd += "--custom %s " % vep_custom
else:
raise ValueError("vep_custom should be of type list or str")
os.system(cmd)
else:
print("output file %s already exists and overwrite is set to False" % out_path) | 3,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.