content
stringlengths 5
1.05M
|
---|
import pygame
FLOOR_IMG = pygame.image.load("./asset/image/floor.png").convert_alpha()
class Floor:
def __init__(self, app, position):
self.app = app
self.image = FLOOR_IMG
self.rect = self.image.get_rect(topleft=position)
|
from pathlib import Path
import os
import webutils
import shutil
import json
import difflib
###
# Emojis are generally stored in (emoji:filename) and (emoji:url) pairs
###
class EmojiCache:
_emoji_pngs = []
_init = False
_fresh = False
_guilds = None
# JSON Constants
_EMOJIFILES = 'emoji_files'
_EMOJIURLS = 'emoji_urls'
_GUILDNAME = 'guild_name'
_GUILDID = 'guild_id'
# Cache layout:
# {
# <guild_id> :
# {
# name : <guild_name>,
# id : <guild_id>,
# emojis :
# [(<emoji_name> : <emoji_filename>), ...] }
# }
_cache = {}
def __init__(self):
# Try reading in persistent registries
customPath = Path('emojis/custom/')
if not customPath.exists():
os.makedirs(customPath)
subDirs = next(os.walk(customPath))[1]
for sd in subDirs:
# Check for an emojis.json file in each subdir
self._refreshGuildReg(customPath / sd)
# Update the emoji registry
# We always start over from scratch
def _writeGuildReg(self, guild, path, files, urls):
data = {}
data[self._GUILDNAME] = guild.name
data[self._GUILDID] = guild.id
data[self._EMOJIFILES] = {}
data[self._EMOJIURLS] = {}
for i in zip(files, urls):
data[self._EMOJIFILES][i[0][0]] = i[0][1] # {'troll':'troll.jpg'}
data[self._EMOJIURLS][i[0][0]] = i[1] # {'troll':'https://cdn.discordapp.com//emojis/720440736251641957.png'}
# serialize
outFile = Path(path) / 'emojis.json'
with open(outFile, 'w') as json_file:
json.dump(data, json_file)
def _writeGuildCache(self, guild, files, urls):
data = {}
data[self._GUILDNAME] = guild.name
data[self._GUILDID] = guild.id
data[self._EMOJIFILES] = {}
data[self._EMOJIURLS] = {}
for i in zip(files, urls):
data[self._EMOJIFILES][i[0][0]] = i[0][1]
data[self._EMOJIURLS][i[0][0]] = i[1]
# cache
self._cache[str(guild.id)] = data
def _refreshGuildReg(self, guildFolder):
entries = []
sPath = guildFolder / "emojis.json"
if sPath.exists():
try:
with open(sPath, 'r') as json_file:
data = json.load(json_file)
id = str(data[self._GUILDID])
name = data[self._GUILDNAME]
files = data[self._EMOJIFILES]
urls = data[self._EMOJIURLS]
self._cache[id] = {}
self._cache[id][self._GUILDID] = id
self._cache[id][self._GUILDNAME] = name
self._cache[id][self._EMOJIFILES] = files
self._cache[id][self._EMOJIURLS] = urls
except BaseException as err:
# Something is wrong with the JSON - delete it so next try refreshes
print('EmojiCache exception. ' + f"Unexpected {err}, {type(err)}")
shutil.rmtree(guildFolder)
async def getFuzzyEmojiFilePath(self, guild, emoji_name):
try:
cached_guild = self._cache[str(guild.id)]
except KeyError:
# Try fetching the guild now
(entries, _) = self.fetchEmojis(guild)
if len(entries) == 0:
return None
cached_guild = self._cache[str(guild.id)]
try:
name = cached_guild[self._GUILDNAME]
guildName = name.replace(' ', '_')
entries = cached_guild[self._EMOJIFILES]
eKeys = entries.keys()
# difflib is overkill and doesn't match some normal substring use cases
#eMatches = difflib.get_close_matches(emoji_name, eKeys)
eMatches = [i for i in eKeys if emoji_name in i]
if len(eMatches) > 0:
return (Path(f'emojis/custom/{guildName}/'), eMatches)
except KeyError:
return None
return (None, None)
def getEmojiFilePath(self, guild, emoji_name):
try:
cached_guild = self._cache[str(guild.id)]
except KeyError:
# Try fetching the guild now
entries, _ = self.fetchEmojis(guild)
if len(entries) == 0:
return None
cached_guild = self._cache[str(guild.id)]
try:
name = cached_guild[self._GUILDNAME]
guildName = name.replace(' ', '_')
entries = cached_guild[self._EMOJIFILES]
emojiPath = entries[emoji_name]
filePath = Path(f'emojis/custom/{guildName}/{emojiPath}')
except KeyError:
return None
return filePath
def getEmojiList(self, guild):
try:
cached_guild = self._cache[str(guild.id)]
except KeyError:
# Try fetching the guild now
entries, _ = self.fetchEmojis(guild)
if len(entries) == 0:
return None
cached_guild = self._cache[str(guild.id)]
l = []
for key in cached_guild[self._EMOJIFILES].keys():
l.append(key)
l.sort()
return l
async def getEmojiURLs(self, guild, emoji_name):
try:
cached_guild = self._cache[str(guild.id)]
except KeyError:
# Try fetching the guild now
(entries, _) = self.fetchEmojis(guild)
if len(entries) == 0:
return None
cached_guild = self._cache[str(guild.id)]
try:
name = cached_guild[self._GUILDNAME]
guildName = name.replace(' ', '_')
entries = cached_guild[self._EMOJIURLS]
eKeys = entries.keys()
# difflib is overkill and doesn't match some normal substring use cases
#eMatches = difflib.get_close_matches(emoji_name, eKeys)
keyMatches = [i for i in eKeys if emoji_name in i]
if len(keyMatches) > 0:
urlMatches = [entries[i] for i in keyMatches]
urlPairs = tuple(zip(keyMatches, urlMatches))
return urlPairs
except KeyError:
return None
return None
# Takes a Discord guild object
def fetchEmojis(self, guild):
# sanitize the guild name
guildName = guild.name.replace(' ', '_')
emojiPath = Path('emojis')
customPath = emojiPath / Path(f'custom/{guildName}')
if not emojiPath.exists():
os.mkdir(emojiPath)
if not customPath.exists():
os.mkdir(customPath)
emojiFiles = []
emojiURLs = []
# Get all the custom emojis
for e in guild.emojis:
emojiURLs.append(webutils.unpackUrl(e.url))
filename = f"{e.name}.png"
emojiFiles.append((e.name, filename))
webutils.download(e.url, customPath, filename)
# Write the registry for this guild
self._writeGuildReg(guild, customPath, emojiFiles, emojiURLs)
self._writeGuildCache(guild, emojiFiles, emojiURLs)
return (emojiFiles, emojiURLs)
def clearEmojis(self, guild):
self._cache.pop(str(guild.id), None)
guildName = guild.name.replace(' ', '_')
emojiPath = Path(f'emojis/custom/{guildName}')
cnt = 0
if emojiPath.exists():
cnt = len([name for name in os.listdir(emojiPath) if os.path.isfile(os.path.join(emojiPath, name))])
shutil.rmtree(emojiPath)
return cnt
else:
print(f"path: {emojiPath.resolve()}")
return 0 |
from django.db import models
from iioy.core.fields import SlugField
from iioy.core.models import BaseTmdbModel
class Movie(BaseTmdbModel):
title = models.TextField()
original_title = models.TextField()
slug = SlugField(slug_field='title')
tagline = models.TextField(null=True)
budget = models.BigIntegerField(null=True)
revenue = models.BigIntegerField(null=True)
homepage = models.URLField(null=True)
imdb_id = models.TextField()
synopsis = models.TextField(null=True)
runtime = models.IntegerField(null=True) # in minutes
mpaa_rating = models.TextField(null=True)
release_date = models.DateField(null=True)
backdrop_url = models.URLField(null=True)
mobile_backdrop_url = models.URLField(null=True)
poster_url = models.URLField(null=True)
mobile_poster_url = models.URLField(null=True)
trailer_url = models.URLField(null=True)
genres = models.ManyToManyField(
to='movies.Genre',
related_name='movies',
)
similar_movies = models.ManyToManyField(to='self')
class Meta:
indexes = [
models.Index(fields=['tmdb_id']),
models.Index(fields=['imdb_id']),
]
def __str__(self):
return self.title
def is_missing_data(self):
return (
self.synopsis is None
or self.backdrop_url is None
or not self.genres.exists()
)
|
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain VIT"""
import torch
import torch.nn.functional as F
from functools import partial
from megatron import get_args, get_timers, mpu, print_rank_0, print_rank_last
from megatron.data.vit_dataset import build_train_valid_datasets
from megatron.model.vision.inpainting import VitInpaintingModel
from megatron.model.vision.inpainting import MitInpaintingModel
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
from tasks.vision.metrics import SSIM, PSNR
from megatron.model import ModelType
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
args = get_args()
if args.vision_backbone_type == 'vit':
model = VitInpaintingModel(pre_process=pre_process,
post_process=post_process)
elif args.vision_backbone_type == 'mit':
model = MitInpaintingModel(pre_process=pre_process,
post_process=post_process)
else:
raise Exception('{} vision backbone is not supported.'.format(
args.vision_backbone_type))
return model
def get_batch(data_iterator):
"""Build the batch."""
data = next(data_iterator)
# only data parallelism; no need for broadcast
images = data[0][0].cuda()
masks = data[0][1].cuda()
return images, masks
def loss_func(images, masks, masked_images, outputs, collect_data=False):
outputs = outputs.contiguous().float()
masks_flip = 1-masks
flip_masked_outputs = outputs.masked_fill(masks_flip.bool(), 0)
flip_masked_images = images.masked_fill(masks_flip.bool(), 0)
ssim_fun = SSIM()
psnr_fun = PSNR()
if not collect_data:
mask_count = torch.count_nonzero(masks)
loss = F.mse_loss(
flip_masked_outputs,
flip_masked_images.float(),
reduction="sum"
)
loss = loss/mask_count
ssim = ssim_fun(flip_masked_outputs, flip_masked_images.float())
psnr = psnr_fun(flip_masked_outputs, flip_masked_images.float())
averaged_loss = average_losses_across_data_parallel_group(
[loss, psnr, ssim]
)
return loss, {"loss": averaged_loss[0],
"psnr": averaged_loss[1],
'ssim': averaged_loss[2]}
else:
synth_images = masked_images.float() + flip_masked_outputs
ssim = ssim_fun(synth_images, images.float())
psnr = psnr_fun(synth_images, images.float())
return torch.cat((images, masked_images, synth_images), dim=2), ssim, psnr
def forward_step(data_iterator, model):
"""Forward step."""
timers = get_timers()
# Get the batch.
timers("batch-generator").start()
(
images,
masks,
) = get_batch(data_iterator)
timers("batch-generator").stop()
masked_images = images.masked_fill(masks.bool(), 0)
outputs = model(masked_images)
# Forward mode
return outputs, partial(loss_func, images, masks, masked_images)
def process_non_loss_data(data, iteration, writer):
psnr_sum = 0
ssim_sum = 0
for (output_tb, ssim, psnr) in data:
output_tb[output_tb < 0] = 0
output_tb[output_tb > 1] = 1
writer.add_images("gt-input-output-vald", output_tb,
global_step=iteration, walltime=None,
dataformats='NCHW')
psnr_sum = psnr_sum + psnr.item()
ssim_sum = ssim_sum + ssim.item()
psnr = psnr_sum/len(data)
ssim = ssim_sum/len(data)
writer.add_scalar('PSNR generate value-validation', psnr, iteration)
writer.add_scalar('SSIM generate value-validation', ssim, iteration)
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0(
"> building train, validation, and test datasets " "for VIT ..."
)
train_ds, valid_ds = build_train_valid_datasets(
data_path=args.data_path,
image_size=(args.img_h, args.img_w)
)
print_rank_0("> finished creating VIT datasets ...")
return train_ds, valid_ds, None
if __name__ == "__main__":
pretrain(
train_valid_test_datasets_provider,
model_provider,
ModelType.encoder_or_decoder,
forward_step,
process_non_loss_data,
args_defaults={'dataloader_type': 'cyclic', 'vision_pretraining': True}
)
|
#!/usr/bin/env python
# time_frames.py
# A helper script to quickly and efficiently time animated gifs from sources with erratic key frame rates.
# Layer names must contain original frame numbers (e.g. shot0001.png, frame0001.png, etc.).
# Simply delete duplicate frames (but leave the last one) and run the script to get proper timings added.
# Also don't forget that the first frame will likely be renamed "Background". Change it back before
# deleting anything or you'll lose the timing for it!
# If you want to keep the last frame, time it manually and it will be ignored.
from gimpfu import *
import re
def time_frames(image, drawable):
image = gimp.image_list()[0]
keep_last = (image.layers[0].name.find('ms') != -1)
for layer in image.layers:
if not 'prev' in locals():
prev = int(re.search(r'\d+', layer.name).group())
if not keep_last:
prev += 1
else:
gn = prev
prev = ln = int(re.search(r'\d+', layer.name).group())
delay = (gn - ln) * 40
layer.name += '(' + str(delay) + 'ms)'
if not keep_last:
image.remove_layer(image.layers[0])
register(
"time_frames",
"Add timing to frames based on difference in layer names",
"Add timing to frames based on difference in layer names",
"Cory Schmunsler",
"WTFPL",
"2013",
"<Image>/Filters/Animation/Time Frames",
"*",
[],
[],
time_frames)
main() |
from konlpy.tag import Kkma
kkma = Kkma()
malist = kkma.pos("아버지 가방에 들어가신다.")
print(malist) |
import sys
import os
import json
# py2/3 imports fix
from .gmapi import GraymetaClient
from .cli import CLI
from .constants import *
COMMAND="gm"
def usageAndDie():
print("gm is a tool for querying a graymeta.com installation over https")
print("https://github.com/simonski/gm-api-python")
print("")
print("Usage: ")
print("")
print(" gm command [arguments]")
print("")
server_url = os.environ.get("GRAYMETA_SERVER_URL") or None
server_key = os.environ.get("GRAYMETA_API_KEY") or None
ljust_value = 60
if server_url:
print(" GRAYMETA_SERVER_URL".ljust(ljust_value) + ": " + server_url)
else:
print(" GRAYMETA_SERVER_URL".ljust(ljust_value) + ": unset - please `export GRAYMETA_SERVER_URL=https://your-graymeta-server`")
if server_key:
print(" GRAYMETA_API_KEY".ljust(ljust_value) + ": xxxxxxxx")
else:
print(" GRAYMETA_API_KEY".ljust(ljust_value) + ": unset - please `export GRAYMETA_API_KEY=xxxxxxx`")
print("")
print("The commands are:")
print("")
print(" list_locations".ljust(ljust_value) + "- displays all locations")
print(" list_location {location_id}".ljust(ljust_value) + "- gets information on a specific location")
print("")
print(" list_containers {location_id}".ljust(ljust_value) + "- displays enabled containers")
print(" list_all_containers {location_id}".ljust(ljust_value) + "- displays all containers")
print("")
print(" search -json".ljust(ljust_value) + "- displays all items (-json prints json)")
print(" -last_modified_from|-last_modified_to")
print(" -last_harvested_from|-last_harvested_to")
print("")
print(" get_gm_item_id {location_id} {container_id} {item_id}".ljust(ljust_value) + "- gets the gm_item_id for ")
print(" get_gm_item {gm_item_id}".ljust(ljust_value) + "- gets metadata for an item using the gm_item_id")
print(" get_gm_item_v2 {gm_item_id}".ljust(ljust_value) + "- gets metadata v2 for an item using the gm_item_id")
print("")
print(" create_gm_item_id_from_s3_key {s3_key}".ljust(ljust_value) + "- create gm_item_id from an s3_key")
print(" get_gm_item_id_from_s3_key {s3_key}".ljust(ljust_value) + "- gets gm_item_id from an s3_key")
print(" get_gm_item_from_s3_key {s3_key}".ljust(ljust_value) + "- gets metadata for an item using the s3_key")
print("")
print(" delete_gm_item {gm_item_id}".ljust(ljust_value) + "- deletes the metadata from graymeta")
print("")
print(" get_captions {gm_item_id}".ljust(ljust_value) + "- returns the captions in json")
print(" upload_captions {gm_item_id} {stl_filename} ".ljust(ljust_value) + "- uploads and associates an STL file with content")
print(" delete_captions {gm_item_id} {captions_id}".ljust(ljust_value) + "- deletes the captions from the item")
print(" upload_captions_content {gm_item_id} {stl_filename} ".ljust(ljust_value) + "- uploads and associates an STL file with content")
print("")
print(" harvest_item_from_s3_key {s3_key} {extractors,,}".ljust(ljust_value) + "- forces a harvest for an item via its S3 key")
print(" harvest_container {location_id} {container_id}".ljust(ljust_value) + "- forces a harvest for an entire container.")
print(" harvest_item {gm_item_id} {extractors,,}".ljust(ljust_value) + "- forces a harvest for an item via its gm_item_id")
print("")
print(" comment".ljust(ljust_value) + "- uses the Graymeta Comments API")
print("")
print(" keyword".ljust(ljust_value) + "- uses the Graymeta Keywords API")
print("")
print(" extract_all".ljust(ljust_value) + "- extracts all metadata")
print(" extract (-q term)".ljust(ljust_value) + "- extracts all metadata where 'term' is present in the stow_url")
print("")
print(" stats".ljust(ljust_value) + "- print current /api/control/system/stats data.")
print(" health".ljust(ljust_value) + "- print current /api/data/healthz data.")
print(" activity".ljust(ljust_value) + "- print current /api/data/activity data.")
print(" version".ljust(ljust_value) + "- print current gmapi version number.")
print(" summary_platform".ljust(ljust_value) + "- print summary information about the platform.")
print(" summary_data".ljust(ljust_value) + "- print summary information about the data.")
print(" get {URL}".ljust(ljust_value) + "- returns response from a GET.")
print("")
sys.exit(0)
def version():
print(COMMAND + " client " + VERSION)
def main():
if len(sys.argv) == 1:
usageAndDie()
command = sys.argv[1]
if command == "version":
version()
sys.exit(1)
cli = CLI(sys.argv)
server_url = os.environ.get("GRAYMETA_SERVER_URL") or None
server_key = os.environ.get("GRAYMETA_API_KEY") or None
if server_url is None or server_url.strip() == "":
print("Error, GRAYMETA_SERVER_URL is required.")
sys.exit(1)
if server_key is None or server_key.strip() == "":
print("Error, GRAYMETA_API_KEY is required.")
sys.exit(1)
gm = GraymetaClient(server_url, server_key)
if cli.containsKey("-nossl"):
gm.SSL_VERIFY = False
if cli.containsKey("-v") or cli.containsKey("--verbose") or cli.containsKey("-verbose"):
gm.verbose = True
if command == "upload_captions":
gm_item_id = sys.argv[2]
stl_filename = sys.argv[3]
nicePrint(gm.upload_captions(gm_item_id, stl_filename))
elif command == "get_captions":
gm_item_id = sys.argv[2]
nicePrint(gm.get_captions(gm_item_id))
elif command == "delete_captions":
gm_item_id = sys.argv[2]
captions_id = sys.argv[3]
nicePrint(gm.delete_captions(gm_item_id, captions_id))
elif command == "disable_live_harvesting":
gm.disable_live_harvesting()
elif command == "extract_all":
gm.extract_all(cli)
elif command == "extract":
gm.extract(cli)
elif command == "scroll":
nicePrint(gm.scroll())
elif command == "features":
nicePrint(gm.features())
elif command == "summary_platform":
nicePrint(gm.summary_platform())
elif command == "summary_data":
nicePrint(gm.summary_data())
elif command == "comment":
cli = CLI(sys.argv)
command = cli.getOrDefault("comment", "list")
gm_item_id = cli.getOrDie("-gm_item_id")
if command == "add":
comment = cli.getOrDie("-m")
nicePrint(gm.add_comment(gm_item_id, comment))
elif command == "list":
nicePrint(gm.list_comments(gm_item_id))
elif command == "delete":
comment_id = cli.getOrDie("-comment_id")
nicePrint(gm.delete_comment(gm_item_id, comment_id))
nicePrint(gm.list_comments(gm_item_id))
else:
print("invalid comment command - try 'add, list, delete'")
elif command == "keyword":
cli = CLI(sys.argv)
command = cli.getOrDie("keyword")
if command == "list":
nicePrint(gm.keyword_list_groups())
elif command == "get":
group_id = cli.getOrDie("-group_id")
nicePrint(gm.keyword_get_group(group_id))
elif command == "create_group":
name = cli.getOrDie("-name")
color = "#" + cli.getOrDie("-color")
nicePrint(gm.keyword_create_group(name, color))
elif command == "delete_group":
group_id = cli.getOrDie("-group_id")
nicePrint(gm.keyword_delete_group(group_id))
elif command == "add_to_group":
group_id = cli.getOrDie("-group_id")
word = cli.getOrDie("-word")
nicePrint(gm.keyword_add_to_group(group_id, word))
elif command == "remove_from_group":
group_id = cli.getOrDie("-group_id")
word = cli.getOrDie("-word")
nicePrint(gm.keyword_remove_from_group(group_id, word))
else:
print("gm keyword (list | get | create_group | delete_group | add_to_group | remove_from_group)")
elif command == "list_locations":
nicePrint(gm.list_locations())
elif command == "list_location":
location_id = sys.argv[2]
nicePrint(gm.list_location(location_id))
elif command == "list_all_containers":
location_id = sys.argv[2]
nicePrint(gm.list_containers(location_id))
elif command == "list_containers":
nicePrint(gm.list_enabled_containers())
elif command == "harvest_container":
location_id = sys.argv[2]
container_id = sys.argv[3]
nicePrint(gm.harvest_container(location_id, container_id))
elif command == "harvest_item_from_s3_key":
s3_key = sys.argv[2]
extractors = sys.argv[3].split(",")
response = gm.create_gm_item_id_from_s3_key(s3_key)
nicePrint(response)
#gm_item_id, location_id = gm.get_gm_item_id_from_s3_key(s3_key)
#nicePrint(gm.harvest_item(location_id, gm_item_id))
"""
elif command == "harvest_item":
location_id = sys.argv[2]
gm_item_id = sys.argv[3]
nicePrint(gm.harvest_item(location_id, gm_item_id))
"""
elif command == "create_gm_item_id_from_s3_key":
s3_key = sys.argv[2]
nicePrint(gm.create_gm_item_id_from_s3_key(s3_key))
elif command == "get_gm_item_id_from_s3_key":
s3_key = sys.argv[2]
gm_item_id = gm.get_gm_item_id_from_s3_key(s3_key)
print(gm_item_id)
elif command == "get_gm_item_id":
location_id = sys.argv[2]
container_id = sys.argv[3]
item_id = sys.argv[4]
nicePrint(gm.get_gm_item_id(location_id, container_id, item_id))
elif command == "get_gm_item_from_s3_key":
s3_key = sys.argv[2]
nicePrint(gm.get_gm_item_from_s3_key(s3_key))
elif command == "get_gm_item":
gm_item_id = sys.argv[2]
nicePrint(gm.get_gm_item(gm_item_id))
elif command == "get_gm_item_v2":
gm_item_id = sys.argv[2]
nicePrint(gm.get_gm_item_v2(gm_item_id))
elif command == "delete_gm_item":
gm_item_id = sys.argv[2]
nicePrint(gm.delete_gm_item(gm_item_id))
elif command == "health":
nicePrint(gm.health())
elif command == "stats":
nicePrint(gm.stats())
elif command == "activity":
nicePrint(gm.activity())
elif command == "user":
nicePrint(gm.user())
elif command == "platform":
nicePrint(gm.platform())
elif command == "compilations":
nicePrint(gm.compilations())
elif command == "search_quick":
results = gm.search_quick()
nicePrint(results)
elif command == "search_extracted":
results = gm.search_extracted()
elif command == "search_not_extracted":
results = gm.search_not_extracted()
elif command == "idle":
results = gm.isIdle()
print(results)
elif command == "search":
results = None
if cli.containsKey("-last_modified_from") or cli.containsKey("-last_modified_to"):
last_modified_from = cli.getOrDie("-last_modified_from")
last_modified_to = cli.getOrDie("-last_modified_to")
results = gm.search_last_modified(last_modified_from, last_modified_to)
elif cli.containsKey("-last_harvested_from") or cli.containsKey("-last_harvested_to"):
last_harvested_from = cli.getOrDie("-last_harvested_from")
last_harvested_to = cli.getOrDie("-last_harvested_to")
results = gm.search_last_harvested(last_harvested_from, last_harvested_to)
else:
results = gm.search()
if cli.containsKey("-json"):
nicePrint(results)
else:
if not "results" in results:
print("No results found.")
print(results)
else:
print("ItemID".ljust(35)+"Last Harvested".ljust(27) + "Last Modified".ljust(27) + "Name".ljust(20))
for entry in results["results"]:
result = entry["result"]
gm_item_id = result["_id"]
container = result.get("stow_container_id") or "stow_container_id"
name = result.get("name") or None
last_modified = result.get("last_modified") or "no last modified."
last_harvested = result.get("last_harvested") or "no last harvested."
if name is not None:
full_name = container + "/" + name
else:
full_name = "<not harvested> ( " + result.get("stow_url") + " )"
print(gm_item_id.ljust(35) + last_harvested.ljust(27) + last_modified.ljust(27) + full_name.ljust(20))
elif command == "get":
"""
performs an authenticated GET to the API
"""
partial_url = sys.argv[2]
nicePrint(gm.http_get(partial_url))
else:
print("I don't know how to '" + command + "'")
sys.exit(1)
def nicePrint(data):
if data:
print(json.dumps(data, indent=4))
else:
print("No data found.")
|
# -*- coding: utf-8 -*-
import multiprocessing
def is_divisible(n):
if n % 3 == 0 or n % 5 == 0:
return True
else:
return False
# def sum_divisibles(ns):
# return sum(filter(is_divisible, ns))
def sum_divisibles(ns):
"""return paras.
no good
"""
print 'group'
return sum(filter(is_divisible, ns))
pool = multiprocessing.Pool() # Create a group of CPUs to run on
partial_sums = pool.map(sum_divisibles, [range(1, 5001), range(5001, 10001)])
print sum(partial_sums)
|
# Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
from .utils import sorted_by_key # noqa
from haversine import haversine, Unit
'''import plotly.express as px'''
def stations_by_distance(stations, p): #1B
distance = []
names = []
towns = []
for item in stations:
dist = haversine(item.coord, p)
distance.append(dist)
for item in stations:
names.append(item.name)
for item in stations:
towns.append(item.town)
finaldistancelist = list(zip(names, towns, distance))
finaldistancelist.sort(key=lambda x:x[2])
return finaldistancelist
def stations_within_radius(stations, centre, r): #1C
'''Returns a list (unsorted) of all stations (type MonitoringStation) within a radius
r from a centre coordinate'''
within_radius = []
for station in stations:
distance = haversine(centre, station.coord) #calculates distance using haversine
if distance <= r:
within_radius.append(station)
return within_radius
def rivers_with_station(stations): #1D
'''returns a sorted set of rivers with stations
eg {'Amazon', 'Euphrates', 'Ganges', 'Nile', 'Zambezi'}'''
rivers = set() #empty set to put rivers with stations into
for station in stations:
rivers.add(station.river)
return sorted(rivers)
def stations_by_river(stations): #1D
"""returns a dictionary that maps river names (the 'key') to a list of station objects on a given river
{'River Cam':[Station1, Station2]} where StationX is the MonitoringStation class"""
dictionary = {}
rivers = rivers_with_station(stations) #gets a list of rivers to find stations on
for i in range (len(rivers)): #iterating across all rivers
stations_on_river = [] #empties the list after the ith river iteration
for station in stations: #iterates across all stations and looks if the river matches
if rivers[i] == station.river:
stations_on_river.append(station)
dictionary[str(rivers[i])] = stations_on_river #adds the station list
return dictionary
def rivers_by_station_number(stations, N): #1E
"determines the N rivers with the greatest number of monitoring stations."
"It should return a list of (river name, number of stations) tuples, sorted "
"by the number of stations. In the case that there are more rivers with the "
"same number of stations as the N th entry, include these rivers in the list. "
rivers = rivers_with_station(stations)
riverslist = [] #empty river list
listrivernumber = [] #create final empty list for river name and number of stations
for x in stations:
riverslist.append(x.river) #get river list with repetition
for y in rivers:
occurence = riverslist.count(y) #count number of times each river comes up
tuples = (y, occurence)
listrivernumber.append(tuples) #create list
listrivernumber.sort(key=lambda x:x[1]) #sort by second elemnt of tuple
endlist = listrivernumber [-N:]
for i in range(1,900):
if listrivernumber[-N-i][1] == listrivernumber[-N][1]:
endlist.insert(0,listrivernumber[-N-i])
return endlist
'''def plot_stations(stations):
px.set_mapbox_access_token(open(".mapbox_token").read())
fig = px.scatter_mapbox(stations,
lat=stations.coord[0],
lon=stations.coord[1],
hover_name=stations.name,
zoom=1)
fig.show()'''
|
from pyramid.response import Response
import os
HERE = os.path.dirname(__file__)
def list_(request):
imported_text = open(os.path.join(HERE, 'templates/home.html')).read()
return Response(imported_text)
def my_view2(request):
imported_text = open(os.path.join(HERE, 'templates/sample2.html')).read()
return Response(imported_text)
def create(request):
imported_text = open(os.path.join(HERE, 'templates/new-entry.html')).read()
return Response(imported_text)
def detail(request):
imported_text = open(os.path.join(HERE, 'templates/single-entry.html')).read()
return Response(imported_text)
def update(request):
imported_text = open(os.path.join(HERE, 'templates/edit-entry.html')).read()
return Response(imported_text)
def bootstrap(request):
imported_text = open(os.path.join(HERE,
'navbar-static-top/index.html')).read()
return Response(imported_text)
def single_entry():
pass
def edit_entry():
pass
def includeme(config):
config.add_view(list_, route_name='list')
config.add_view(detail, route_name='detail')
config.add_view(create, route_name='create')
config.add_view(update, route_name='update')
config.add_view(bootstrap, route_name='bootstrap')
|
"""
Copyright, the CVXPY authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.atom import Atom
from typing import Tuple
import numpy as np
class length(Atom):
"""Length of a vector (index of last nonzero, ones-based).
"""
def __init__(self, x) -> None:
super(length, self).__init__(x)
if not self.args[0].is_vector():
raise ValueError(
"`length` can only be applied to vectors.")
@Atom.numpy_numeric
def numeric(self, values) -> int:
"""Returns the length of x.
"""
return np.max(np.nonzero(values[0])) + 1
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return tuple()
def sign_from_args(self) -> Tuple[bool, bool]:
"""Returns sign (is positive, is negative) of the expression.
"""
# Always nonnegative.
return (True, False)
def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return False
def is_atom_concave(self) -> bool:
"""Is the atom concave?
"""
return False
def is_atom_quasiconvex(self) -> bool:
"""Is the atom quasiconvex?
"""
return True
def is_atom_quasiconcave(self) -> bool:
"""Is the atom quasiconvex?
"""
return False
def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return False
def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return False
def _grad(self, values) -> None:
return None
|
import sys
import keras
import tensorflow as tf
from keras import backend
from keras import backend as K
from keras.models import Sequential
from keras.layers import Input, Conv3D, MaxPooling3D, Conv3DTranspose, BatchNormalization, GlobalAvgPool3D, GlobalMaxPooling3D
from keras.layers import concatenate, Reshape, Activation, Permute, Softmax, Lambda, Dense, Flatten, Dropout
from keras.models import Model
from keras.optimizers import Adam
from keras.backend.tensorflow_backend import set_session
sys.path.append('/home/toosyou/projects/keras-retinanet')
from keras_retinanet import initializers
def focal_loss(gamma=2, alpha=0.75):
def focal_loss_fixed(y_true, y_pred):#with tensorflow
eps = 1e-12
y_pred=K.clip(y_pred,eps,1.-eps)#improve the stability of the focal loss and see issues 1 for more information
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
# pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))# -K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
def get_unet_model(number_filter_base = 16):
def downsample_block(input, concated, filters):
net = Conv3D(filters, 3, activation='relu', padding='same')(input)
net = Conv3D(filters, 3, activation='relu', padding='same')(net)
net = BatchNormalization()(net)
net = MaxPooling3D(2, padding='same')(net)
if concated is not None:
net = concatenate([net, concated])
return net
def upsample_block(input, concated, filters):
net = concatenate([Conv3DTranspose(filters, 3, strides=2, padding='same')(input), concated])
net = Conv3D(filters, 3, activation='relu', padding='same')(net)
net = Conv3D(filters, 3, activation='relu', padding='same')(net)
net = BatchNormalization()(net)
return net
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
# building unet-like model
downsample_outputs = [0] * 4
upsample_outputs = [0] * 4
inputs = Input((64, 64, 16, 1))
net = inputs
for i in range(4):
net = downsample_block(net, None, number_filter_base*(2**i))
downsample_outputs[i] = net
upsample_outputs[0] = net
for i in range(3):
net = upsample_block(net, downsample_outputs[2-i], number_filter_base*(2**(2-i)))
upsample_outputs[i+1] = net
for i in range(3):
net = downsample_block(net, upsample_outputs[2-i], number_filter_base*(2**(i+1)))
net = Conv3D(number_filter_base*8, 3, activation='relu', padding='same')(net)
net = BatchNormalization()(net)
net = Conv3D(number_filter_base*8, 3, activation='relu', padding='same')(net)
# net = GlobalMaxPooling3D()(net)
net = GlobalAvgPool3D()(net)
net = BatchNormalization()(net)
net = Dense(2, activation='softmax')(net)
model = Model(inputs=inputs, outputs=net)
training_model = keras.utils.multi_gpu_model(model, gpus=2)
training_model.compile(optimizer=Adam(amsgrad=True), loss=focal_loss(alpha=0.9, gamma=2.), metrics=['accuracy'])
return model, training_model
def get_model():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
number_filter_base = 32
model = Sequential([
Conv3D(number_filter_base, 3, padding='same', activation='relu', input_shape=(64, 64, 16, 1)),
BatchNormalization(),
Conv3D(number_filter_base, 3, padding='same', activation='relu'),
BatchNormalization(),
MaxPooling3D(2, padding='same'), # 32, 32, 8, ?
Conv3D(number_filter_base*2, 3, padding='same', activation='relu'),
BatchNormalization(),
Conv3D(number_filter_base*2, 3, padding='same', activation='relu'),
BatchNormalization(),
MaxPooling3D(2, padding='same'), # 16, 16, 4, ?
Conv3D(number_filter_base*4, 3, padding='same', activation='relu'),
BatchNormalization(),
# Conv3D(number_filter_base*4, 3, padding='same', activation='relu'),
# BatchNormalization(),
MaxPooling3D(2, padding='same'), # 8, 8, 2, ?
Conv3D(number_filter_base*8, 3, padding='same', activation='relu'),
BatchNormalization(),
Conv3D(number_filter_base*8, 3, padding='same', activation='relu'),
BatchNormalization(),
MaxPooling3D(2, padding='same'), # 4, 4, 1, ?
# Conv3D(number_filter_base*16, 3, padding='same', activation='relu'),
# BatchNormalization(),
# Conv3D(number_filter_base*16, 3, padding='same', activation='relu'),
# BatchNormalization(),
# MaxPooling3D((2, 2, 1), padding='same'), # 2, 2, 1, ?
# GlobalMaxPooling3D(), # number_filter_base*16
GlobalAvgPool3D(),
# Flatten(),
# Dense(512, activation='relu'),
# Dropout(rate=0.2),
# BatchNormalization(),
# Dense(256, activation='relu'),
# Dropout(rate=0.2),
# BatchNormalization(),
# Dense(128, activation='relu'),
# Dropout(rate=0.2),
BatchNormalization(),
Dense(2, activation='softmax')
])
training_model = keras.utils.multi_gpu_model(model, gpus=2)
training_model.compile(optimizer=Adam(amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])
return model, training_model
if __name__ == '__main__':
model, training_model = get_unet_model()
model.summary()
|
#! /usr/bin/env python3
# script to append all the csv data files into 1
import csv
import os
import sys
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
fileNames = []
dataPath = 'CleanedTrafficData' # use your path
for f in os.listdir(dataPath):
# print(f)
if f.endswith('_TrafficForML_CICFlowMeter.csv'):
fileNames.append(f.split('/')[-1])
# print(fileNames)
# fileNames = ['02-14-2018.csv', '02-15-2018.csv', '02-16-2018.csv',
# '02-22-2018.csv', '02-23-2018.csv', '03-01-2018.csv', '03-02-2018.csv']
df = pd.read_csv(os.path.join(dataPath, fileNames[0]))
print(df.shape)
for name in fileNames[1:]:
fname = os.path.join(dataPath, name)
print('appending:', fname)
df1 = pd.read_csv(fname)
df = df.append(df1, ignore_index=True)
df = shuffle(df)
print(df.shape)
print('creating multi-class file')
outFile = os.path.join(dataPath, 'IDS-2018-multiclass')
df.to_csv(outFile + '.csv', index=False)
df.to_pickle(outFile + '.pickle')
print('creating binary-class file')
df['Label'] = df['Label'].map(
{'Benign': 0, 'FTP-BruteForce': 1, 'SSH-Bruteforce': 1, 'DoS attacks-GoldenEye': 1, 'DoS attacks-Slowloris': 1,
'DoS attacks-SlowHTTPTest': 1, 'DoS attacks-Hulk': 1, 'Brute Force -Web': 1, 'Brute Force -XSS': 1,
'SQL Injection': 1, 'Infilteration': 1, 'Bot': 1})
print(df['Label'][1:20])
outFile = os.path.join(dataPath, 'IDS-2018-binaryclass')
df.to_csv(outFile + '.csv', index=False)
df.to_pickle(outFile + '.pickle')
print('all done...')
|
import albow
from albow.dialogs import Dialog
from config import config
import pygame
from albow.translate import _, buildTemplate
import sys
import os
import logging
import traceback
import directories
old_lang = None
old_fprop = None
class OptionsPanel(Dialog):
anchor = 'wh'
def __init__(self, mcedit):
Dialog.__init__(self)
self.mcedit = mcedit
self.langs = {}
self.sgnal = {}
self.portableVar = albow.AttrRef(self, 'portableLabelText')
self.saveOldPortable = self.portableVar.get()
self.saveOldConfig = {
config.controls.autobrake: config.controls.autobrake.get(),
config.controls.swapAxes: config.controls.swapAxes.get(),
config.controls.cameraAccel: config.controls.cameraAccel.get(),
config.controls.cameraDrag: config.controls.cameraDrag.get(),
config.controls.cameraMaxSpeed: config.controls.cameraMaxSpeed.get(),
config.controls.cameraBrakingSpeed: config.controls.cameraBrakingSpeed.get(),
config.controls.mouseSpeed: config.controls.mouseSpeed.get(),
config.settings.undoLimit: config.settings.undoLimit.get(),
config.settings.maxCopies: config.settings.maxCopies.get(),
config.controls.invertMousePitch: config.controls.invertMousePitch.get(),
config.settings.spaceHeight: config.settings.spaceHeight.get(),
albow.AttrRef(self, 'blockBuffer'): albow.AttrRef(self, 'blockBuffer').get(),
config.settings.setWindowPlacement: config.settings.setWindowPlacement.get(),
config.settings.rotateBlockBrush: config.settings.rotateBlockBrush.get(),
config.settings.shouldResizeAlert: config.settings.shouldResizeAlert.get(),
config.settings.superSecretSettings: config.settings.superSecretSettings.get(),
config.settings.longDistanceMode: config.settings.longDistanceMode.get(),
config.settings.flyMode: config.settings.flyMode.get(),
config.settings.langCode: config.settings.langCode.get(),
config.settings.compassToggle: config.settings.compassToggle.get(),
config.settings.compassSize: config.settings.compassSize.get(),
config.settings.fontProportion: config.settings.fontProportion.get(),
config.settings.fogIntensity: config.settings.fogIntensity.get(),
config.schematicCopying.cancelCommandBlockOffset: config.schematicCopying.cancelCommandBlockOffset.get()
}
global old_lang
if old_lang == None:
old_lang = config.settings.langCode.get()
global old_fprop
if old_fprop == None:
old_fprop = config.settings.fontProportion.get()
def initComponents(self):
"""Initilize the window components. Call this after translation hs been loaded."""
autoBrakeRow = albow.CheckBoxLabel("Autobrake",
ref=config.controls.autobrake,
tooltipText="Apply brake when not pressing movement keys")
swapAxesRow = albow.CheckBoxLabel("Swap Axes Looking Down",
ref=config.controls.swapAxes,
tooltipText="Change the direction of the Forward and Backward keys when looking down")
cameraAccelRow = albow.FloatInputRow("Camera Acceleration: ",
ref=config.controls.cameraAccel, width=100, min=5.0)
cameraDragRow = albow.FloatInputRow("Camera Drag: ",
ref=config.controls.cameraDrag, width=100, min=1.0)
cameraMaxSpeedRow = albow.FloatInputRow("Camera Max Speed: ",
ref=config.controls.cameraMaxSpeed, width=100, min=1.0)
cameraBrakeSpeedRow = albow.FloatInputRow("Camera Braking Speed: ",
ref=config.controls.cameraBrakingSpeed, width=100,
min=1.0)
mouseSpeedRow = albow.FloatInputRow("Mouse Speed: ",
ref=config.controls.mouseSpeed, width=100, min=0.1,
max=20.0)
undoLimitRow = albow.IntInputRow("Undo Limit: ",
ref=config.settings.undoLimit, width=100, min=0)
maxCopiesRow = albow.IntInputRow("Copy Stack Size: ",
ref=config.settings.maxCopies, width=100, min=0,
tooltipText="Maximum number of copied objects.")
compassSizeRow = albow.IntInputRow("Compass Size (%): ",
ref=config.settings.compassSize, width=100, min=0, max=100)
fontProportion = albow.IntInputRow("Fonts Proportion (%): ",
ref=config.settings.fontProportion, width=100, min=0,
tooltipText="Fonts sizing proportion. The number is a percentage.\nRestart needed!")
albow.resource.font_proportion = config.settings.fontProportion.get()
fogIntensityRow = albow.IntInputRow("Fog Intensity (%): ",
ref=config.settings.fogIntensity, width=100, min=0, max=100)
invertRow = albow.CheckBoxLabel("Invert Mouse",
ref=config.controls.invertMousePitch,
tooltipText="Reverse the up and down motion of the mouse.")
spaceHeightRow = albow.IntInputRow("Low Detail Height",
ref=config.settings.spaceHeight,
tooltipText="When you are this far above the top of the world, move fast and use low-detail mode.")
blockBufferRow = albow.IntInputRow("Block Buffer (MB):",
ref=albow.AttrRef(self, 'blockBuffer'), min=1,
tooltipText="Amount of memory used for temporary storage. When more than this is needed, the disk is used instead.")
setWindowPlacementRow = albow.CheckBoxLabel("Set Window Placement",
ref=config.settings.setWindowPlacement,
tooltipText="Try to save and restore the window position.")
rotateBlockBrushRow = albow.CheckBoxLabel("Rotate block with brush",
ref=config.settings.rotateBlockBrush,
tooltipText="When rotating your brush, also rotate the orientation of the block your brushing with")
compassToggleRow =albow.CheckBoxLabel("Toggle compass",
ref=config.settings.compassToggle)
windowSizeRow = albow.CheckBoxLabel("Window Resize Alert",
ref=config.settings.shouldResizeAlert,
tooltipText="Reminds you that the cursor won't work correctly after resizing the window.")
superSecretSettingsRow = albow.CheckBoxLabel("Super Secret Settings",
ref=config.settings.superSecretSettings,
tooltipText="Weird stuff happen!")
longDistanceRow = albow.CheckBoxLabel("Long-Distance Mode",
ref=config.settings.longDistanceMode,
tooltipText="Always target the farthest block under the cursor, even in mouselook mode.")
flyModeRow = albow.CheckBoxLabel("Fly Mode",
ref=config.settings.flyMode,
tooltipText="Moving forward and Backward will not change your altitude in Fly Mode.")
showCommandsRow = albow.CheckBoxLabel("Show Commands",
ref=config.settings.showCommands,
tooltipText="Show the command in a Command Block when hovering over it.")
cancelCommandBlockOffset = albow.CheckBoxLabel("Cancel Command Block Offset",
ref=config.schematicCopying.cancelCommandBlockOffset,
tooltipText="Cancels the command blocks coords changed when copied.")
lng = config.settings.langCode.get()
langs = sorted(self.getLanguageChoices().items())
langNames = [k for k, v in langs]
self.languageButton = albow.ChoiceButton(langNames, choose=self.changeLanguage, doNotTranslate=True)
if self.sgnal[lng] in self.languageButton.choices:
self.languageButton.selectedChoice = self.sgnal[lng]
langButtonRow = albow.Row((albow.Label("Language", tooltipText="Choose your language."), self.languageButton))
portableList = ["Portable", "Fixed"]
self.goPortableButton = goPortableButton = albow.ChoiceButton(portableList, choose=self.togglePortable)
goPortableButton.selectedChoice = self.saveOldPortable
goPortableButton.tooltipText = self.portableButtonTooltip()
goPortableRow = albow.Row((albow.Label("Install Mode"), goPortableButton))
# Disabled Crash Reporting Option
# reportRow = albow.CheckBoxLabel("Report Errors",
# ref=config.settings.reportCrashes,
# tooltipText="Automatically report errors to the developer.")
self.inputs = (
spaceHeightRow,
cameraAccelRow,
cameraDragRow,
cameraMaxSpeedRow,
cameraBrakeSpeedRow,
blockBufferRow,
mouseSpeedRow,
undoLimitRow,
maxCopiesRow,
compassSizeRow,
fontProportion,
fogIntensityRow,
)
options = (
longDistanceRow,
flyModeRow,
autoBrakeRow,
swapAxesRow,
invertRow,
superSecretSettingsRow,
rotateBlockBrushRow,
compassToggleRow,
showCommandsRow,
cancelCommandBlockOffset,
langButtonRow,
) + (
((sys.platform == "win32" and pygame.version.vernum == (1, 9, 1)) and (windowSizeRow,) or ())
) + (
(sys.platform == "win32") and (setWindowPlacementRow,) or ()
) + (
(not sys.platform == "darwin") and (goPortableRow,) or ()
)
rightcol = albow.Column(options, align='r')
leftcol = albow.Column(self.inputs, align='r')
optionsColumn = albow.Column((albow.Label("Options"),
albow.Row((leftcol, rightcol), align="t")))
settingsRow = albow.Row((optionsColumn,))
buttonsRow = albow.Row((albow.Button("OK", action=self.dismiss), albow.Button("Cancel", action=self.cancel)))
resetToDefaultRow = albow.Row((albow.Button("Reset to default", action=self.resetDefault),))
optionsColumn = albow.Column((settingsRow, buttonsRow, resetToDefaultRow))
optionsColumn.key_down = self.key_down
self.add(optionsColumn)
self.shrink_wrap()
@property
def blockBuffer(self):
return config.settings.blockBuffer.get() / 1048576
@blockBuffer.setter
def blockBuffer(self, val):
config.settings.blockBuffer.set(int(val * 1048576))
def getLanguageChoices(self, current=None):
files = os.listdir(albow.translate.langPath)
langs = {}
sgnal = {}
for file in files:
name, ext = os.path.splitext(file)
if ext == ".trn" and len(name) == 5 and name[2] == "_":
langName = albow.translate.getLangName(file)
langs[langName] = name
sgnal[name] = langName
if "English (US)" not in langs.keys():
langs[u"English (US)"] = "en_US"
sgnal["en_US"] = u"English (US)"
self.langs = langs
self.sgnal = sgnal
logging.debug("Detected languages: %s"%self.langs)
return langs
def changeLanguage(self):
if albow.translate.buildTemplate:
self.languageButton.selectedChoice = 'English (US)'
return
langName = self.languageButton.selectedChoice
if langName not in self.langs:
lng = "en_US"
else:
lng = self.langs[langName]
config.settings.langCode.set(lng)
#-# Translation live update preparation
logging.debug('*** Language change detected.')
logging.debug(' Former language: %s.'%albow.translate.getLang())
logging.debug(' New language: %s.'%lng)
albow.translate.langPath = os.sep.join((directories.getDataDir(), "lang"))
update = albow.translate.setLang(lng)[2]
logging.debug(' Update done? %s (Magic %s)'%(update, update or lng == 'en_US'))
self.mcedit.root.set_update_ui(update or lng == 'en_US')
self.mcedit.root.set_update_ui(False)
self.mcedit.editor.set_update_ui(update or lng == 'en_US')
self.mcedit.editor.set_update_ui(False)
#-#
@staticmethod
def portableButtonTooltip():
return (
"Click to make your MCEdit install self-contained by moving the settings and schematics into the program folder",
"Click to make your MCEdit install persistent by moving the settings and schematics into your Documents folder")[
directories.portable]
@property
def portableLabelText(self):
return ("Portable", "Fixed")[1 - directories.portable]
@portableLabelText.setter
def portableLabelText(self, *args, **kwargs):
pass
def togglePortable(self):
if sys.platform == "darwin":
return False
textChoices = [
_("This will make your MCEdit \"portable\" by moving your settings and schematics into the same folder as {0}. Continue?").format(
(sys.platform == "darwin" and _("the MCEdit application") or _("MCEditData"))),
_("This will move your settings and schematics to your Documents folder. Continue?"),
]
useExisting = False
alertText = textChoices[directories.portable]
if albow.ask(alertText) == "OK":
if [directories.hasPreviousPortableInstallation, directories.hasPreviousFixedInstallation][directories.portable]():
asked = albow.ask("Found a previous %s installation"%["portable", "fixed"][directories.portable], responses=["Use", "Overwrite", "Cancel"])
if asked == "Use":
useExisting = True
elif asked == "Overwrite":
useExisting = False
elif asked == "Cancel":
return False
try:
[directories.goPortable, directories.goFixed][directories.portable](useExisting)
except Exception, e:
traceback.print_exc()
albow.alert(_(u"Error while moving files: {0}").format(repr(e)))
else:
self.goPortableButton.selectedChoice = self.saveOldPortable
self.goPortableButton.tooltipText = self.portableButtonTooltip()
return True
def dismiss(self, *args, **kwargs):
"""Used to change the font proportion."""
# If font proportion setting has changed, update the UI.
if config.settings.fontProportion.get() != self.saveOldConfig[config.settings.fontProportion]:
albow.resource.reload_fonts(proportion=config.settings.fontProportion.get())
self.mcedit.root.set_update_ui(True)
self.mcedit.root.set_update_ui(False)
self.mcedit.editor.set_update_ui(True)
self.mcedit.editor.set_update_ui(False)
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
self.saveOldConfig[key] = key.get()
config.save()
Dialog.dismiss(self, *args, **kwargs)
def cancel(self, *args, **kwargs):
Changes = False
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if key.get() != self.saveOldConfig[key]:
Changes = True
oldLanguage = self.saveOldConfig[config.settings.langCode]
if config.settings.langCode.get() != oldLanguage:
Changes = True
newPortable = self.portableVar.get()
if newPortable != self.saveOldPortable:
Changes = True
if not Changes:
Dialog.dismiss(self, *args, **kwargs)
return
result = albow.ask("Do you want to save your changes?", ["Save", "Don't Save", "Cancel"])
if result == "Cancel":
return
if result == "Save":
self.dismiss(*args, **kwargs)
return
if config.settings.langCode.get() != oldLanguage:
self.languageButton.selectedChoice = self.sgnal[oldLanguage]
self.changeLanguage()
if _(newPortable) != _(self.saveOldPortable):
self.portableVar.set(newPortable)
self.togglePortable()
for key in self.saveOldConfig.keys():
key.set(self.saveOldConfig[key])
config.save()
Dialog.dismiss(self, *args, **kwargs)
def resetDefault(self):
self.reshowNumberFields()
for key in self.saveOldConfig.keys():
if "AttrRef" in str(key):
key.set(config.settings.blockBuffer.default / 1048576)
elif "lang" not in str(key):
key.set(key.default)
if config.settings.langCode.get() != "en_US":
config.settings.langCode.set("en_US")
self.changeLanguage()
if "Fixed" != self.portableVar.get():
self.portableVar.set("Fixed")
self.togglePortable()
config.save()
def reshowNumberFields(self):
for key in self.inputs:
key.subwidgets[1].editing = False
def dispatch_key(self, name, evt):
super(OptionsPanel, self).dispatch_key(name, evt)
if name == "key_down":
keyname = self.get_root().getKey(evt)
if keyname == 'Escape':
self.cancel()
|
from .parser import *
from .containers import *
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io
import torch
from bigdl.dllib.nn.layer import Layer
from bigdl.dllib.utils.common import JTensor
from bigdl.dllib.utils.file_utils import callZooFunc
from bigdl.orca.torch.utils import trainable_param
from bigdl.orca.torch import zoo_pickle_module
from importlib.util import find_spec
if sys.version < '3.7':
print("WARN: detect python < 3.7, if you meet zlib not available " +
"exception on yarn, please update your python to 3.7")
if find_spec('jep') is None:
raise Exception("jep not found, please install jep first.")
class TorchModel(Layer):
"""
TorchModel wraps a PyTorch model as a single layer, thus the PyTorch model can be used for
distributed inference or training.
"""
def __init__(self, jvalue, module_bytes, bigdl_type="float"):
self.value = jvalue
self.module_bytes = module_bytes
self.bigdl_type = bigdl_type
@staticmethod
def from_value(model_value):
model_bytes = callZooFunc("float", "getTorchModelBytes", model_value)
net = TorchModel(model_value, model_bytes)
return net
@staticmethod
def from_pytorch(model):
"""
Create a TorchModel directly from PyTorch model, e.g. model in torchvision.models.
:param model: a PyTorch model, or a function to create PyTorch model
"""
weights = []
import types
if isinstance(model, types.FunctionType) or isinstance(model, type):
for param in trainable_param(model()):
weights.append(param.view(-1))
else:
for param in trainable_param(model):
weights.append(param.view(-1))
flatten_weight = torch.nn.utils.parameters_to_vector(weights).data.numpy()
bys = io.BytesIO()
torch.save(model, bys, pickle_module=zoo_pickle_module)
weights = JTensor.from_ndarray(flatten_weight)
jvalue = callZooFunc(
"float", "createTorchModel", bys.getvalue(), weights)
net = TorchModel(jvalue, bys.getvalue())
return net
def to_pytorch(self):
"""
Convert to pytorch model
:return: a pytorch model
"""
new_weight = self.get_weights()
assert(len(new_weight) == 1, "TorchModel's weights should be one tensor")
# set weights
m = torch.load(io.BytesIO(self.module_bytes), pickle_module=zoo_pickle_module)
import types
if isinstance(m, types.FunctionType) or isinstance(m, type):
m = m()
w = torch.Tensor(new_weight[0])
torch.nn.utils.vector_to_parameters(w, trainable_param(m))
# set named buffers
new_extra_params = callZooFunc(self.bigdl_type, "getModuleExtraParameters", self.value)
if len(new_extra_params) != 0:
idx = 0
for named_buffer in m.named_buffers():
named_buffer[1].copy_(torch.reshape(
torch.Tensor(new_extra_params[idx].to_ndarray()), named_buffer[1].size()))
idx += 1
return m
def saveModel(self, path, over_write=False):
from bigdl.dllib.utils.common import callBigDlFunc
callBigDlFunc(self.bigdl_type, "modelSave", self.value, path,
over_write)
@staticmethod
def loadModel(path, bigdl_type="float"):
from bigdl.dllib.utils.common import callBigDlFunc
jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path)
return Layer.of(jmodel)
|
from napalm_base import get_network_driver
from pprint import pprint
from json import dumps
junos_driver = get_network_driver('junos')
device = junos_driver(hostname='172.30.179.107', username='pytraining', password='Poclab123', optional_args={'port': 830})
device.open()
print('-'*60)
pprint(device.get_interfaces())
print('-'*60)
print dumps(device.get_interfaces(), indent=4)
print('-'*60)
print device.get_interfaces()['me0']['mac_address']
device.close()
'''
# python junos_get_interfaces.py
------------------------------------------------------------
{'.local.': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'bme0': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'00:0B:CA:FE:00:00',
u'speed': -1},
'dsc': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'ge-0/0/0': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': 4520394.0,
u'mac_address': u'00:19:E2:53:EE:C3',
u'speed': -1},
'ge-0/0/1': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': 4520380.0,
u'mac_address': u'00:19:E2:53:EE:C4',
u'speed': -1},
'ge-0/0/10': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:CD',
u'speed': -1},
'ge-0/0/11': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:CE',
u'speed': -1},
'ge-0/0/12': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:CF',
u'speed': -1},
'ge-0/0/13': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D0',
u'speed': -1},
'ge-0/0/14': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D1',
u'speed': -1},
'ge-0/0/15': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D2',
u'speed': -1},
'ge-0/0/16': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D3',
u'speed': -1},
'ge-0/0/17': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D4',
u'speed': -1},
'ge-0/0/18': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D5',
u'speed': -1},
'ge-0/0/19': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D6',
u'speed': -1},
'ge-0/0/2': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C5',
u'speed': -1},
'ge-0/0/20': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D7',
u'speed': -1},
'ge-0/0/21': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D8',
u'speed': -1},
'ge-0/0/22': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:D9',
u'speed': -1},
'ge-0/0/23': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:DA',
u'speed': -1},
'ge-0/0/24': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:DB',
u'speed': -1},
'ge-0/0/25': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:DC',
u'speed': -1},
'ge-0/0/26': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:DD',
u'speed': -1},
'ge-0/0/27': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:DE',
u'speed': -1},
'ge-0/0/28': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:DF',
u'speed': -1},
'ge-0/0/29': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E0',
u'speed': -1},
'ge-0/0/3': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C6',
u'speed': -1},
'ge-0/0/30': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E1',
u'speed': -1},
'ge-0/0/31': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E2',
u'speed': -1},
'ge-0/0/32': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E3',
u'speed': -1},
'ge-0/0/33': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E4',
u'speed': -1},
'ge-0/0/34': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E5',
u'speed': -1},
'ge-0/0/35': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E6',
u'speed': -1},
'ge-0/0/36': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E7',
u'speed': -1},
'ge-0/0/37': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E8',
u'speed': -1},
'ge-0/0/38': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:E9',
u'speed': -1},
'ge-0/0/39': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:EA',
u'speed': -1},
'ge-0/0/4': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C7',
u'speed': -1},
'ge-0/0/40': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:EB',
u'speed': -1},
'ge-0/0/41': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:EC',
u'speed': -1},
'ge-0/0/42': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:ED',
u'speed': -1},
'ge-0/0/43': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:EE',
u'speed': -1},
'ge-0/0/44': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:EF',
u'speed': -1},
'ge-0/0/45': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:F0',
u'speed': -1},
'ge-0/0/46': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:F1',
u'speed': -1},
'ge-0/0/47': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:F2',
u'speed': -1},
'ge-0/0/5': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C8',
u'speed': -1},
'ge-0/0/6': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C9',
u'speed': -1},
'ge-0/0/7': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:CA',
u'speed': -1},
'ge-0/0/8': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:CB',
u'speed': -1},
'ge-0/0/9': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:CC',
u'speed': -1},
'gre': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'ipip': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'None',
u'speed': -1},
'lo0': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'lsi': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'me0': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': 4540459.0,
u'mac_address': u'00:19:E2:53:EF:3F',
u'speed': 1000},
'mtun': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'pimd': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'None',
u'speed': -1},
'pime': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'None',
u'speed': -1},
'tap': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': -1},
'vcp-0': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': 32000},
'vcp-1': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'Unspecified',
u'speed': 32000},
'vlan': {u'description': u'',
u'is_enabled': True,
u'is_up': True,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C1',
u'speed': 1000},
'vme': {u'description': u'',
u'is_enabled': True,
u'is_up': False,
u'last_flapped': -1.0,
u'mac_address': u'00:19:E2:53:EE:C2',
u'speed': 1000}}
------------------------------------------------------------
{
"ge-0/0/7": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:CA",
"speed": -1
},
"ge-0/0/6": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:C9",
"speed": -1
},
"ge-0/0/5": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:C8",
"speed": -1
},
"ge-0/0/4": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:C7",
"speed": -1
},
"ge-0/0/3": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:C6",
"speed": -1
},
"ge-0/0/2": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:C5",
"speed": -1
},
"ge-0/0/1": {
"is_enabled": true,
"description": "",
"last_flapped": 4520383.0,
"is_up": true,
"mac_address": "00:19:E2:53:EE:C4",
"speed": -1
},
"ge-0/0/0": {
"is_enabled": true,
"description": "",
"last_flapped": 4520397.0,
"is_up": true,
"mac_address": "00:19:E2:53:EE:C3",
"speed": -1
},
"lsi": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"ge-0/0/9": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:CC",
"speed": -1
},
"ge-0/0/8": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:CB",
"speed": -1
},
"ge-0/0/13": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D0",
"speed": -1
},
"ge-0/0/12": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:CF",
"speed": -1
},
"ge-0/0/11": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:CE",
"speed": -1
},
"ge-0/0/10": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:CD",
"speed": -1
},
"ge-0/0/17": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D4",
"speed": -1
},
"ge-0/0/16": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D3",
"speed": -1
},
"ge-0/0/15": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D2",
"speed": -1
},
"ge-0/0/14": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D1",
"speed": -1
},
"ge-0/0/35": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E6",
"speed": -1
},
"ge-0/0/34": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E5",
"speed": -1
},
"ge-0/0/19": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D6",
"speed": -1
},
"ge-0/0/18": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D5",
"speed": -1
},
"ge-0/0/31": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E2",
"speed": -1
},
"ge-0/0/30": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E1",
"speed": -1
},
"ge-0/0/33": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E4",
"speed": -1
},
"ge-0/0/32": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E3",
"speed": -1
},
"ge-0/0/43": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:EE",
"speed": -1
},
"ge-0/0/36": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E7",
"speed": -1
},
"ge-0/0/44": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:EF",
"speed": -1
},
"mtun": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"pimd": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "None",
"speed": -1
},
"me0": {
"is_enabled": true,
"description": "",
"last_flapped": 4540462.0,
"is_up": true,
"mac_address": "00:19:E2:53:EF:3F",
"speed": 1000
},
"ge-0/0/45": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:F0",
"speed": -1
},
"ge-0/0/40": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:EB",
"speed": -1
},
"gre": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"ge-0/0/46": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:F1",
"speed": -1
},
"ge-0/0/47": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:F2",
"speed": -1
},
"ge-0/0/42": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:ED",
"speed": -1
},
"dsc": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"vme": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:C2",
"speed": 1000
},
"tap": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"vlan": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "00:19:E2:53:EE:C1",
"speed": 1000
},
"ge-0/0/37": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E8",
"speed": -1
},
"bme0": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "00:0B:CA:FE:00:00",
"speed": -1
},
"ge-0/0/28": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:DF",
"speed": -1
},
"ge-0/0/29": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E0",
"speed": -1
},
"ge-0/0/26": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:DD",
"speed": -1
},
"ge-0/0/27": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:DE",
"speed": -1
},
"ge-0/0/24": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:DB",
"speed": -1
},
"ge-0/0/25": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:DC",
"speed": -1
},
"ge-0/0/22": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D9",
"speed": -1
},
"ge-0/0/23": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:DA",
"speed": -1
},
"ge-0/0/20": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D7",
"speed": -1
},
"ge-0/0/21": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:D8",
"speed": -1
},
"vcp-1": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "Unspecified",
"speed": 32000
},
"vcp-0": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "Unspecified",
"speed": 32000
},
"ipip": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "None",
"speed": -1
},
"lo0": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"pime": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "None",
"speed": -1
},
"ge-0/0/41": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:EC",
"speed": -1
},
".local.": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": true,
"mac_address": "Unspecified",
"speed": -1
},
"ge-0/0/39": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:EA",
"speed": -1
},
"ge-0/0/38": {
"is_enabled": true,
"description": "",
"last_flapped": -1.0,
"is_up": false,
"mac_address": "00:19:E2:53:EE:E9",
"speed": -1
}
}
------------------------------------------------------------
00:19:E2:53:EF:3F
'''
|
import os
import nester
import pickle
""" A test program to test exception"""
man = []
other = []
try:
data=open('/home/lgt/Program/pythonlearn/excep/sketch.txt')
for each_line in data:
try:
if not each_line.find(':')== -1:
(role,line_spoken) = each_line.split(':',1)
line_spoken = line_spoken.strip()
if role == 'Man':
man.append(line_spoken)
elif role == 'Other Man':
other.append(line_spoken)
except ValueError:
pass
data.close()
except IOError:
print("The datafile is missing")
try:
with open('man_data.txt', 'wb') as man_data,open('other_data.txt', 'wb') as other_data:
pickle.dump(man, man_data)
pickle.dump(other,other_data)
except IOError as err:
print("Can't not write to file" + str(err))
except pickle.PickleError as perr:
print('Pickling Error:' + str(perr))
finally:
man_data.close()
other_data.close()
|
from django.core.management.base import BaseCommand
from core.datatools.fail_repeat import FailRepeater
class Command(BaseCommand):
def handle(self, *args, **options):
try:
repeater = FailRepeater()
repeater.run()
except KeyboardInterrupt:
pass
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
__author__ = "pmeier82"
class DjSpikevalFrankeAppConfig(AppConfig):
label = "djspikeval_franke"
name = "djspikeval_franke"
verbose_name = _("Django Spikeval - Metric by F. Franke")
def ready(self):
# import all parts of the application that need to be exposed
pass
if __name__ == "__main__":
pass
|
import io
import os
import re
from setuptools import find_packages
from setuptools import setup
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open("requirements.txt", "r") as file:
requirements = [x.strip() for x in file.readlines()]
setup(
name="tfserving-manager",
version="0.1.0",
url="www.github.com/ismailuddin/tfserving-manager",
license="MIT",
author="Ismail Uddin",
description="Simple interface for interacting with TensorFlow Serving",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(exclude=("tests",)),
install_requires=requirements,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
|
import fixtures
from oslo.config import cfg
from bricks.common import config
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'bricks.netconf')
CONF.import_opt('host', 'bricks.common.service')
class ConfFixture(fixtures.Fixture):
"""Fixture to manage global conf settings."""
def __init__(self, conf):
self.conf = conf
def setUp(self):
super(ConfFixture, self).setUp()
self.conf.set_default('host', 'bwaaaaaaah')
self.conf.set_default('rpc_backend',
'bricks.openstack.common.rpc.impl_fake')
self.conf.set_default('rpc_cast_timeout', 5)
self.conf.set_default('rpc_response_timeout', 5)
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True)
config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset)
|
"""
Author: Joseph Min ([email protected])
This script validates the given csv file for correct formatting.
"""
|
# Generated by Django 3.0.5 on 2022-02-17 07:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crawling', '0029_auto_20220216_1408'),
]
operations = [
migrations.AlterField(
model_name='financialstatementline',
name='statement_type',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, to='crawling.StatementTypeLocalDefinition'),
),
]
|
from __future__ import print_function
import os
import unittest
from dropbox import create_session
from fs.test import FSTestCases
import fs.subfs
from dropboxfs.dropboxfs import DropboxFS
def join(a, b):
return a + b
TEST_PATH = 'dropboxfs'
class TestDropboxFS(FSTestCases, unittest.TestCase):
def make_fs(self):
# Return an instance of your FS object here
self.access_token = "olshmk5XitgAAAAAAAAJnRpQ3mrGJq5kmsI6QvycvC2kT8p18SsajOlhWV511oAd"
if "DEV" in os.environ:
proxies = {
"http": "http://127.0.0.1:1087",
"https": "http://127.0.0.1:1087",
}
sess = create_session(8, proxies=proxies)
else:
sess = None
dfs = DropboxFS(self.access_token, session=sess)
if dfs.exists(TEST_PATH):
dfs.removetree(TEST_PATH)
dfs.makedir(TEST_PATH)
fs2 = fs.subfs.SubFS(dfs, TEST_PATH)
return fs2
def test_case_sensitive(self):
# dropbox insesitive
pass
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.minigame.CogGuardGlobals
from panda3d.core import *
GuardDialog = {'arrest': [
'Get your hands over your head!',
'Put your hands over your head!',
'Put your hands over your head and drop the weapon.',
"Put your hands over your head and don't move!",
'Hands behind your back!',
'Get your hands behind your back!',
"You're under arrest!",
"You're arrested!",
'Put your hands behind your back!',
'Where did you think you were going?',
"Don't move!",
'Place your hands over the top of your head.'],
'disregard': [
'Hmm, I guess it was nothing.',
'I swear I saw something.',
'I swear I heard something.',
'My mind must be playing tricks on me.',
'Hmm, nothing there.'],
'somethingInSight': [
'Who goes there?!',
'Whoa, what was that?',
'I think I just saw something.',
'Did I just see someone?',
'Did I just see a Toon?'],
'spot': [
"It's a Toon! Hey, stop right there!",
'Stop right there, Toon!',
'Trespasser!',
'Get back here!',
"I've got a 10-76 in sight!",
'10-43 of a 10-76!',
'In pursuit of a prowler!'],
'chase': [
"Like my face? It's the last thing you'll see.",
"You can run, but you can't hide.",
'Stop running!',
'Running will just make it last longer!',
'Catching you will just be my uprising.',
"Let's make this fast, I'm having lunch with Mr. Hollywood."],
'shot': [
"I'm hit!",
'Ow!',
'Ouch!',
'Ouch! 10-52!',
'Ouch! 10-33!',
'Oof!',
'Oof! 11-99!'],
'heard': [
'What was that noise?',
'Am I the only one who hears that?',
'Did you hear that too?',
'Whoa, what was that?',
'Whoa, I think I just heard something...'],
'suspect': [
"OK, there's gotta be someone around here.",
"I'm definitely hearing somebody.",
"This is getting ridiculous. I'm searching for this guy.",
'Okay come out now!',
"Okay I know you're there!",
'Come on out!',
'Come out wherever you are!',
'Okay stop hiding and get your hands on top of your head!']}
FactoryWalkPoints = {'1': Point3(7.02, 102.03, 3.73),
'2': Point3(36.15, 102.0, 3.73),
'3': Point3(21.0, 91.48, 3.73),
'4': Point3(21.0, 34.75, 3.73),
'5': Point3(36.6, 21.79, 3.73),
'6': Point3(6.84, 21.79, 3.73),
'7': Point3(57.42, 113.02, 3.73),
'8': Point3(57.42, 145.53, 3.73),
'9': Point3(39.48, 126.06, 3.73),
'10': Point3(39.48, 158.71, 3.73),
'11': Point3(21.46, 154.05, 3.73),
'12': Point3(20.8298492432, 127.261932373, 3.72601008415),
'13': Point3(-15.8184213638, 107.70401001, 3.72601008415),
'14': Point3(-12.6347436905, 150.498260498, 3.72601008415),
'15': Point3(1.862185359, 127.62688446, 3.72601008415),
'16': Point3(20.7491359711, 181.617355347, 3.72601008415),
'17': Point3(15.9736003876, 195.81703186, 3.72601008415),
'18': Point3(28.177230835, 190.976760864, 3.72601008415),
'19': Point3(-11.136932373, 178.399993896, 3.72601008415),
'20': Point3(20.5021152496, 220.11050415, 3.72601008415),
'21': Point3(20.5021152496, 250.118209839, 3.72601008415),
'22': Point3(20.5021152496, 266.976531982, 3.73070979118),
'23': Point3(-1.70821225643, 266.976531982, 3.72704768181),
'24': Point3(-9.31725215912, 311.727172852, 23.6061973572),
'25': Point3(6.00064468384, 311.727172852, 23.6010684967),
'26': Point3(-2.11948752403, 393.333221436, 58.6142539978),
'27': Point3(45.626335144, 250.790618896, 3.74456477165),
'28': Point3(72.1958084106, 250.790618896, 8.7260093689),
'29': Point3(102.783546448, 236.582550049, 8.68389701843),
'30': Point3(102.783546448, 254.469970703, 8.68389701843),
'31': Point3(182.737991333, 250.998352051, 8.68877983093),
'32': Point3(170.940628052, 274.417785645, 8.71758842468),
'33': Point3(170.940628052, 311.51348877, 18.7258872986),
'34': Point3(155.780151367, 311.51348877, 18.7258872986),
'35': Point3(157.472915649, 338.74230957, 18.7259998322),
'36': Point3(188.975006104, 341.060272217, 18.7259998322),
'37': Point3(155.905532837, 373.534423828, 18.7259998322),
'38': Point3(155.905532837, 466.330718994, 23.7259998322),
'39': Point3(145.229248047, 472.678375244, 23.7259998322),
'40': Point3(145.229248047, 484.490905762, 23.7259998322),
'41': Point3(85.9906768799, 493.035339355, 23.7259998322),
'42': Point3(64.7825775146, 495.028625488, 23.7259998322),
'43': Point3(64.7825775146, 437.651000977, 24.8016834259),
'44': Point3(59.5449943542, 421.77545166, 28.720872879),
'45': Point3(45.8543891907, 420.722930908, 28.7196521759),
'46': Point3(38.069896698, 431.21975708, 28.7245349884),
'47': Point3(38.069896698, 461.540252686, 28.7245349884),
'48': Point3(5.61381816864, 471.174255371, 28.7245349884),
'49': Point3(22.9643363953, 482.653747559, 28.7245349884),
'50': Point3(-12.0776023865, 485.552703857, 28.7245349884),
'51': Point3(5.63659906387, 451.67791748, 28.7245349884),
'52': Point3(5.63659906387, 432.762542725, 28.7245349884),
'53': Point3(19.1862716675, 424.348571777, 28.7235584259),
'54': Point3(22.0231189728, 404.197906494, 33.7259979248),
'55': Point3(-9.55003833771, 405.499603271, 43.7059783936),
'56': Point3(-5.86112260818, 420.435211182, 43.7108612061),
'57': Point3(-34.9273223877, 456.744995117, 28.7250232697),
'58': Point3(-16.2481575012, 460.112060547, 28.7250232697),
'59': Point3(-39.9931030273, 419.621917725, 28.720872879),
'60': Point3(-54.2984924316, 423.636260986, 28.7245349884),
'61': Point3(-54.2984924316, 494.452941895, 23.7259998322),
'62': Point3(-108.340660095, 494.452941895, 23.7259998322),
'63': Point3(-129.677352905, 485.094451904, 23.7259998322),
'64': Point3(-313.463043213, 495.315826416, 23.7259998322),
'65': Point3(-314.154663086, 595.736450195, 23.7376232147),
'66': Point3(-403.777557373, 595.736450195, 23.7376232147),
'67': Point3(-481.709381104, 595.736450195, 8.72601032257),
'68': Point3(-494.487335205, 578.331970215, 8.72601032257),
'69': Point3(-494.144317627, 386.693328857, 8.72601032257),
'70': Point3(-494.144317627, 370.74432373, 8.72601032257),
'71': Point3(-494.144317627, 355.754882812, 8.72601032257),
'72': Point3(-459.184967041, 355.756225586, 18.7259998322),
'73': Point3(-459.184967041, 385.447113037, 18.7259998322),
'74': Point3(-459.184967041, 371.280548096, 18.7259998322),
'75': Point3(-436.206726074, 371.280548096, 18.7259998322),
'76': Point3(-392.863433838, 362.806182861, 18.7259998322),
'77': Point3(-383.311065674, 383.347747803, 18.7259998322),
'78': Point3(-316.622070312, 413.971374512, 18.7259998322),
'79': Point3(-316.622070312, 325.665863037, 18.7259998322),
'80': Point3(-256.322418213, 354.900543213, 18.7259998322),
'81': Point3(-256.322418213, 390.568115234, 18.7259998322),
'82': Point3(-214.715209961, 372.348297119, 18.7259998322),
'83': Point3(-129.384460449, 371.602508545, 18.7259998322),
'84': Point3(-129.384460449, 354.145294189, 18.7259998322),
'85': Point3(-114.271522522, 340.94833374, 18.7259998322),
'86': Point3(-114.271522522, 310.575469971, 18.7259998322),
'87': Point3(-152.82208252, 300.420196533, 18.7259998322),
'88': Point3(-168.71031189, 294.892425537, 18.7259998322),
'89': Point3(-173.580856323, 289.509796143, 18.7259998322),
'90': Point3(-173.580856323, 250.033050537, 8.72601032257),
'91': Point3(-164.228210449, 240.328811646, 8.72601032257),
'92': Point3(-143.334457397, 240.701599121, 8.72601032257),
'93': Point3(-133.640853882, 243.669052124, 8.72601032257),
'94': Point3(-117.203361511, 242.005691528, 8.72601032257),
'95': Point3(-80.3400650024, 244.324478149, 8.72601032257),
'96': Point3(-55.8061103821, 250.527297974, 8.72601032257),
'97': Point3(-2.45391345024, 250.527297974, 3.72601008415),
'98': Point3(4.77302837372, 541.511352539, 38.7259979248),
'99': Point3(-49.9130744934, 540.511962891, 43.7259979248),
'100': Point3(65.2844085693, 541.129455566, 43.7259979248)}
FactoryGuardPoints = {'1': [
Point3(50.8605651855, 493.650238037, 28.7250232697),
Vec3(313.025054932, 0.0, 0.0)],
'2': [
Point3(50.5030555725, 479.770721436, 28.7250232697),
Vec3(270.0, 0.0, 0.0)],
'3': [
Point3(-24.8482875824, 498.838745117, 28.7250232697),
Vec3(0.0, 0.0, 0.0)],
'4': [
Point3(-24.8482875824, 446.4090271, 28.7252674103),
Vec3(180.0, 0.0, 0.0)],
'5': [
Point3(-41.4686851501, 446.4090271, 28.7252674103),
Vec3(90.0, 0.0, 0.0)],
'6': [
Point3(-7.23602104187, 434.330535889, 28.7252674103),
Vec3(90.0, 0.0, 0.0)],
'7': [
Point3(17.745639801, 434.330535889, 28.7252674103),
Vec3(270.0, 0.0, 0.0)],
'8': [
Point3(-6.85039520264, 434.566162109, 43.7142791748),
Vec3(180.0, 0.0, 0.0)],
'9': [
Point3(1.79736101627, 434.566162109, 43.7142791748),
Vec3(180.0, 0.0, 0.0)],
'10': [
Point3(10.3381643295, 434.566162109, 43.7142791748),
Vec3(180.0, 0.0, 0.0)],
'11': [
Point3(19.1219005585, 434.566162109, 43.7154998779),
Vec3(180.0, 0.0, 0.0)],
'12': [
Point3(43.9076843262, 449.104980469, 28.724779129),
Vec3(270.0, 0.0, 0.0)],
'13': [
Point3(31.5120067596, 445.849884033, 28.724779129),
Vec3(90.0, 0.0, 0.0)],
'14': [
Point3(76.6376647949, 539.907409668, 43.7259979248),
Vec3(270.0, 0.0, 0.0)],
'15': [
Point3(-64.5058364868, 539.907409668, 43.7259979248),
Vec3(90.0, 0.0, 0.0)],
'16': [
Point3(-19.6063995361, 131.088729858, 3.7260093689),
Vec3(90.0, 0.0, 0.0)],
'17': [
Point3(63.3582801819, 131.088729858, 3.7260093689),
Vec3(270.0, 0.0, 0.0)],
'18': [
Point3(127.58114624, 268.98614502, 8.68414115906),
Vec3(308.659820557, 0.0, 0.0)],
'19': [
Point3(92.5095977783, 232.670257568, 8.68414115906),
Vec3(152.916442871, 0.0, 0.0)],
'20': [
Point3(189.383132935, 233.027069092, 8.68487358093),
Vec3(180.0, 0.0, 0.0)],
'21': [
Point3(205.738693237, 340.856567383, 18.7260017395),
Vec3(270.0, 0.0, 0.0)],
'22': [
Point3(144.820892334, 465.931915283, 23.7259998322),
Vec3(138.037338257, 0.0, 0.0)],
'23': [
Point3(-65.8107452393, 480.288360596, 23.7259998322),
Vec3(270.0, 0.0, 0.0)],
'24': [
Point3(-313.754058838, 528.80090332, 23.7295665741),
Vec3(0.0, 0.0, 0.0)],
'25': [
Point3(-343.322021484, 596.249816895, 23.7386016846),
Vec3(90.0, 0.0, 0.0)],
'26': [
Point3(-404.657501221, 552.570800781, 23.7461681366),
Vec3(180.0, 0.0, 0.0)],
'27': [
Point3(-538.390075684, 595.941772461, 8.72601032257),
Vec3(90.0, 0.0, 0.0)],
'28': [
Point3(-515.727783203, 370.884094238, 8.72601032257),
Vec3(90.0, 0.0, 0.0)],
'29': [
Point3(-184.767532349, 370.884094238, 18.7259998322),
Vec3(90.0, 0.0, 0.0)],
'30': [
Point3(-99.2514801025, 341.364715576, 18.7259998322),
Vec3(270.0, 0.0, 0.0)],
'31': [
Point3(-158.564758301, 295.215332031, 18.7259998322),
Vec3(270.0, 0.0, 0.0)]}
FactoryWayPointData = {'24': ['25', '26', '21', '23'], '25': ['24', '26', '23'], '26': ['25', '23'], '27': ['21', '28', '93', '95', '94', '97', '96', '31', '30'], '20': ['21', '22', '11', '12', '17', '16', '18', '9'], '21': ['24', '27', '20', '22', '28', '93', '95', '94', '97', '96', '11', '12', '17', '16', '31', '30'], '22': ['20', '21', '23', '11', '17', '16'], '23': ['24', '25', '26', '22', '98', '48'], '28': ['27', '21', '93', '96', '31', '30'], '29': ['30'], '4': ['6', '5', '3'], '8': ['2', '11', '10', '7', '9', '14'], '59': ['60', '57'], '58': ['57', '51', '50', '48', '49', '47'], '55': ['54', '56'], '54': ['55', '51', '50', '53'], '57': ['59', '58', '50', '48', '49', '47'], '56': ['55'], '51': ['98', '58', '54', '57', '50', '52', '48', '49', '47'], '50': ['58', '54', '57', '51', '52', '48', '49', '47'], '53': ['54', '52'], '52': ['98', '51', '50', '53', '48', '49'], '88': ['90', '89', '86', '87'], '89': ['90', '88'], '82': ['83', '80', '81', '79', '78'], '83': ['82', '84', '85'], '80': ['82', '79'], '81': ['82', '78'], '86': ['88', '87', '85'], '87': ['88', '86'], '84': ['83', '85'], '85': ['83', '86', '84'], '3': ['2', '4', '1'], '7': ['2', '8', '10', '13', '17', '16', '1', '3', '9'], '100': ['99'], '39': ['42', '40', '41', '38'], '38': ['40', '41', '39', '37', '35', '34'], '33': ['32', '34'], '32': ['33', '31'], '31': ['27', '21', '28', '93', '97', '96', '32', '30'], '30': ['27', '21', '28', '29', '93', '95', '94', '97', '96', '31'], '37': ['38', '35', '34'], '36': ['35'], '35': ['38', '37', '36', '34'], '34': ['38', '33', '37', '35'], '60': ['61', '59'], '61': ['60', '62', '63', '64'], '62': ['61', '63', '64'], '63': ['61', '62'], '64': ['61', '62', '65'], '65': ['64', '66', '67'], '66': ['67'], '67': ['66', '68'], '68': ['67', '69', '71', '70'], '69': ['68', '76', '75', '74', '73', '72', '71', '70'], '2': ['8', '10', '13', '1', '3', '7', '9'], '6': ['4', '5', '3'], '99': ['100'], '98': ['23', '51', '52', '48'], '91': ['90', '92', '97'], '90': ['91', '92', '88', '89'], '93': ['27', '21', '28', '92', '95', '94', '97', '96', '31', '30'], '92': ['91', '90', '93'], '95': ['27', '21', '28', '93', '94', '97', '96'], '94': ['27', '21', '93', '95', '97', '96'], '97': ['27', '21', '28', '91', '93', '95', '94', '96', '31', '30'], '96': ['27', '21', '28', '93', '95', '94', '97', '31', '30'], '11': ['20', '21', '22', '8', '10', '12', '17', '16', '18', '14'], '10': ['2', '8', '11', '7', '9', '14'], '13': ['2', '16', '19', '18', '15', '1', '7'], '12': ['20', '21', '22', '11', '16', '18'], '15': ['13', '16', '19', '1', '14'], '14': ['8', '11', '10', '13', '19', '15', '1'], '17': ['20', '21', '22', '11', '16', '18', '7'], '16': ['20', '21', '22', '11', '13', '12', '17', '19', '18', '15', '7'], '19': ['13', '16', '15', '1', '14'], '18': ['13', '12', '16', '20'], '48': ['23', '98', '58', '57', '51', '50', '52', '49', '47'], '49': ['58', '57', '51', '50', '52', '48', '46', '47'], '46': ['23', '49', '47', '44', '45'], '47': ['58', '57', '51', '50', '48', '49', '46'], '44': ['46', '45', '42', '43'], '45': ['46', '44'], '42': ['44', '43', '40', '41', '39'], '43': ['44'], '40': ['42', '41', '39', '38'], '41': ['42', '40', '39', '38'], '1': ['2', '13', '19', '15', '3', '7', '14'], '5': ['4', '6', '3'], '9': ['20', '2', '8', '10', '7'], '77': ['76', '75', '74', '71', '70', '78'], '76': ['69', '77', '75', '74', '70', '79', '78'], '75': ['69', '77', '76', '74', '71', '70', '79'], '74': ['69', '77', '76', '75', '73', '72', '71', '70'], '73': ['69', '74', '72', '71', '70'], '72': ['69', '74', '73', '71', '70'], '71': ['68', '69', '77', '75', '74', '73', '72', '70'], '70': ['68', '69', '77', '76', '75', '74', '73', '72', '71'], '79': ['82', '80', '76', '75'], '78': ['82', '81', '77', '76']}
GuardPointData = {'1': '49', '2': '49', '3': '50', '4': '57', '5': '57', '6': '52', '7': '52',
'8': '56', '9': '56', '10': '56', '11': '56', '12': '47', '13': '47',
'14': '100', '15': '99', '16': '15', '17': '9', '18': '30', '19': '30',
'20': '31', '21': '36', '22': '39', '23': '62', '24': '64', '25': '65',
'26': '66', '27': '67', '28': '70', '29': '82', '30': '85', '31': '88'}
JellybeanBarrelPoints = [
[
Point3(-39.7513237, 492.480865479, 28.7250213623), Vec3(45.0, 0.0, 0.0)],
[
Point3(33.1251411438, 499.403594971, 28.7250213623), Vec3(0.0, 0.0, 0.0)],
[
Point3(20.7891941071, 451.0, 28.7250213623), Vec3(0.0, 0.0, 0.0)],
[
Point3(-22.3909473419, 416.06060791, 28.7184333801), Vec3(270.0, 0.0, 0.0)],
[
Point3(-10.0228147507, 451.252868652, 28.7252674103), Vec3(180.0, 0.0, 0.0)],
[
Point3(5.74519491196, 416.444671631, 28.7176990509), Vec3(180.0, 0.0, 0.0)],
[
Point3(19.3000221252, 416.705780029, 43.7093963623), Vec3(180.0, 0.0, 0.0)],
[
Point3(10.1045351028, 416.705780029, 43.7093963623), Vec3(180.0, 0.0, 0.0)],
[
Point3(1.59424114227, 416.705780029, 43.7093963623), Vec3(180.0, 0.0, 0.0)],
[
Point3(158.375442505, 498.157196045, 23.7259998322), Vec3(315.0, 0.0, 0.0)],
[
Point3(212.277908325, 341.0, 18.7259998322), Vec3(270.0, 0.0, 0.0)],
[
Point3(152.493438721, 280.12286377, 18.7259998322), Vec3(180.0, 0.0, 0.0)],
[
Point3(213.100830078, 268.131378174, 8.68511772156), Vec3(0.0, 0.0, 0.0)],
[
Point3(22.4859085083, 339.311187744, 58.6203575134), Vec3(180.0, 0.0, 0.0)],
[
Point3(22.4859085083, 348.359558105, 58.6203575134), Vec3(270.0, 0.0, 0.0)],
[
Point3(22.4859085083, 360.231384277, 58.6215820312), Vec3(270.0, 0.0, 0.0)],
[
Point3(-128.960494995, 328.47164917, 18.7260093689), Vec3(0.0, 0.0, 0.0)],
[
Point3(-263.571563721, 374.413665771, 18.7260093689), Vec3(90.0, 0.0, 0.0)],
[
Point3(-372.620025635, 367.780334473, 18.7260093689), Vec3(270.0, 0.0, 0.0)],
[
Point3(-528.821838379, 370.591156006, 8.72601032257), Vec3(90.0, 0.0, 0.0)],
[
Point3(-477.526855469, 577.470153809, 8.72601032257), Vec3(225.0, 0.0, 0.0)],
[
Point3(-403.55960083, 533.912475586, 23.7383556366), Vec3(180.0, 0.0, 0.0)],
[
Point3(-299.539581299, 490.111419678, 23.7259044647), Vec3(180.0, 0.0, 0.0)]]
GuardBitmask = BitMask32.bit(6) |
from TestHelperSuperClass import testHelperSuperClass
class local_helpers(testHelperSuperClass):
pass
class test_kong_test_delete_service(local_helpers):
def test_noArgs(self):
cmdToExecute = "./scripts/kong_delete_service"
expectedOutput = ""
expectedOutput += "Start of ./scripts/kong_delete_service\n"
expectedOutput += "Wrong number of arguments expected 2 - got 0\n"
expectedOutput += "Recieved args:\n"
expectedOutput += "['./scripts/kong_delete_service']\n"
expectedOutput += "-\n"
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [1], 1, False)
def test_delete_a_Service_with_routes(self):
serviceName = "TestServiceName"
route = {
"protocol": "http",
"host": "route.host.com",
"path": "/ppp"
}
#call install service and route
cmdToExecute = "./scripts/kong_install_service_and_route"
cmdToExecute += " " + self.kong_server
cmdToExecute += " " + serviceName
cmdToExecute += " http"
cmdToExecute += " www.host.com"
cmdToExecute += " 80"
cmdToExecute += " /"
cmdToExecute += " " + route["protocol"]
cmdToExecute += " " + route["host"]
cmdToExecute += " " + route["path"]
cmdToExecute += " GET"
cmdToExecute += " null"
cmdToExecute += " null"
expectedOutput = ""
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
#not checking output, just checking function
#check service is there
resp, respCode = self.callKongService("/services/" + serviceName, {}, "get", None, [200])
self.assertEqual(resp["name"],serviceName)
#delete service
cmdToExecute = "./scripts/kong_delete_service " + self.kong_server + " " + serviceName
expectedOutput = ""
expectedErrorOutput = None
a = self.executeCommand(cmdToExecute, expectedOutput, expectedErrorOutput, [0], 1, True)
#check service is not there
resp, respCode = self.callKongService("/services/" + serviceName, {}, "get", None, [404])
|
#Today it is all about control flow
print("Welcome to the rollercoaster!")
height = int(input("What is your height in cm? "))
if height > 120:
print("You can ride the rollercoaster!")
else:
print("Sorry, you can't ride the rollercoaster!") |
# coding: utf-8
"""
Functions for working with pitch data
This file depends on the praat script get_pitch_and_intensity.praat
(which depends on praat) to extract pitch and intensity values from
audio data. Once the data is extracted, there are functions for
data normalization and calculating various measures from the time
stamped output of the praat script (ie **generatePIMeasures()**)
For brevity, 'pitch_and_intensity' is referred to as 'PI'
see **examples/get_pitch_and_formants.py**
"""
import os
from os.path import join
import io
import math
from typing import List, Tuple, Optional, cast
from praatio import data_points
from praatio import praatio_scripts
from praatio import textgrid
from praatio.utilities import errors
from praatio.utilities import my_math
from praatio.utilities import utils
from praatio.utilities.constants import Point
HERTZ = "Hertz"
UNSPECIFIED = "unspecified"
_PITCH_ERROR_TIER_NAME = "pitch errors"
def _extractPIPiecewise(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
tgFN: str,
tierName: str,
tmpOutputPath: str,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and int from each labeled interval in a textgrid
This has the benefit of being faster than using _extractPIFile if only
labeled regions need to have their pitch values sampled, particularly
for longer files.
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
windowSize = medianFilterWindowSize
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
utils.makeDir(tmpOutputPath)
splitAudioList = praatio_scripts.splitAudioOnTier(
inputFN, tgFN, tierName, tmpOutputPath, False
)
allPIList: List[Tuple[str, str, str]] = []
for start, _, fn in splitAudioList:
tmpTrackName = os.path.splitext(fn)[0] + ".txt"
piList = _extractPIFile(
join(tmpOutputPath, fn),
join(tmpOutputPath, tmpTrackName),
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate=True,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
convertedPiList = [
("%0.3f" % (float(time) + start), str(pV), str(iV))
for time, pV, iV in piList
]
allPIList.extend(convertedPiList)
outputData = [",".join(row) for row in allPIList]
with open(outputFN, "w") as fd:
fd.write("\n".join(outputData) + "\n")
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def _extractPIFile(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity values from an audio file
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
argList = [
inputFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
pitchUnit,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractIntensity(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
sampleStep: float = 0.01,
forceRegenerate: bool = True,
undefinedValue: float = None,
) -> List[Tuple[float, ...]]:
"""
Extract the intensity for an audio file
Calculates intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [inputFN, outputFN, sampleStep, minPitch, -1, -1]
scriptName = "get_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPitchTier(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> data_points.PointObject2D:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitchtier.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return data_points.open2DPointObject(outputFN)
def extractPitch(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
undefinedValue - if None remove from the dataset, otherset set to
undefinedValue
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPI(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
tgFN: str = None,
tierName: str = None,
tmpOutputPath: str = None,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity from a file wholesale or piecewise
If the parameters for a tg are passed in, this will only extract labeled
segments in a tier of the tg. Otherwise, pitch will be extracted from
the entire file.
male: minPitch=50; maxPitch=350
female: minPitch=75; maxPitch=450
pitchUnit: "Hertz", "semitones re 100 Hz", etc
Calculates pitch and intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
windowSize = medianFilterWindowSize
if tgFN is None or tierName is None:
piList = _extractPIFile(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
else:
if tmpOutputPath is None:
tmpOutputPath = join(outputPath, "piecewise_output")
piList = _extractPIPiecewise(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
tgFN,
tierName,
tmpOutputPath,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
return piList
def loadTimeSeriesData(
fn: str, undefinedValue: float = None
) -> List[Tuple[float, ...]]:
"""
For reading the output of get_pitch_and_intensity or get_intensity
Data should be of the form
[(time1, value1a, value1b, ...),
(time2, value2a, value2b, ...), ]
"""
name = os.path.splitext(os.path.split(fn)[1])[0]
try:
with io.open(fn, "r", encoding="utf-8") as fd:
data = fd.read()
except IOError:
print(f"No pitch track for: {name}")
raise
dataList = [row.split(",") for row in data.splitlines() if row != ""]
# The new praat script includes a header
if dataList[0][0] == "time":
dataList = dataList[1:]
newDataList = []
for row in dataList:
time = float(row.pop(0))
entry = [
time,
]
doSkip = False
for value in row:
if "--" in value:
if undefinedValue is not None:
appendValue = undefinedValue
else:
doSkip = True
break
else:
appendValue = float(value)
entry.append(appendValue)
if doSkip is True:
continue
newDataList.append(tuple(entry))
return newDataList
def generatePIMeasures(
dataList: List[Tuple[float, float, float]],
tgFN: str,
tierName: str,
doPitch: bool,
medianFilterWindowSize: int = None,
globalZNormalization: bool = False,
localZNormalizationWindowSize: int = 0,
) -> List[Tuple[float, ...]]:
"""
Generates processed values for the labeled intervals in a textgrid
nullLabelList - labels to ignore in the textgrid. Defaults to ["",]
if 'doPitch'=true get pitch measures; if =false get rms intensity
medianFilterWindowSize: if none, no filtering is done
globalZNormalization: if True, values are normalized with the mean
and stdDev of the data in dataList
localZNormalization: if greater than 1, values are normalized with the mean
and stdDev of the local context (for a window of 5, it
would consider the current value, 2 values before and 2
values after)
"""
# Warn user that normalizing a second time nullifies the first normalization
if globalZNormalization is True and localZNormalizationWindowSize > 0:
raise errors.NormalizationException()
castDataList = cast(List[Tuple[float, ...]], dataList)
if globalZNormalization is True:
if doPitch:
castDataList = my_math.znormalizeSpeakerData(castDataList, 1, True)
else:
castDataList = my_math.znormalizeSpeakerData(castDataList, 2, True)
# Raw values should have 0 filtered; normalized values are centered around 0, so don't filter
filterZeroFlag = not globalZNormalization
tg = textgrid.openTextgrid(tgFN, False)
if not isinstance(tg.tierDict[tierName], textgrid.IntervalTier):
raise errors.IncompatibleTierError(tg.tierDict[tierName])
tier = cast(textgrid.IntervalTier, tg.tierDict[tierName])
piData = tier.getValuesInIntervals(castDataList)
outputList: List[List[float]] = []
for interval, entryList in piData:
label = interval[0]
if doPitch:
tmpValList = [f0Val for _, f0Val, _ in entryList]
f0Measures = getPitchMeasures(
tmpValList, tgFN, label, medianFilterWindowSize, filterZeroFlag
)
outputList.append(list(f0Measures))
else:
tmpValList = [intensityVal for _, _, intensityVal in entryList]
if filterZeroFlag:
tmpValList = [
intensityVal for intensityVal in tmpValList if intensityVal != 0.0
]
rmsIntensity = 0.0
if len(tmpValList) != 0:
rmsIntensity = my_math.rms(tmpValList)
outputList.append(
[
rmsIntensity,
]
)
# Locally normalize the output
if localZNormalizationWindowSize > 0 and len(outputList) > 0:
for colI in range(len(outputList[0])):
featValList = [row[colI] for row in outputList]
featValList = my_math.znormWindowFilter(
featValList, localZNormalizationWindowSize, True, True
)
if len(featValList) != len(outputList): # This should hopefully not happen
raise errors.UnexpectedError(
"Lists must be of the same length but are not: "
f"({len(featValList)}), ({len(outputList)})"
)
for i, val in enumerate(featValList):
outputList[i][colI] = val
return [tuple(row) for row in outputList]
def getPitchMeasures(
f0Values: List[float],
name: str = None,
label: str = None,
medianFilterWindowSize: int = None,
filterZeroFlag: bool = False,
) -> Tuple[float, float, float, float, float, float]:
"""
Get various measures (min, max, etc) for the passed in list of pitch values
name is the name of the file. Label is the label of the current interval.
Both of these labels are only used debugging and can be ignored if desired.
medianFilterWindowSize: None -> no median filtering
filterZeroFlag:True -> zero values are removed
"""
if name is None:
name = UNSPECIFIED
if label is None:
label = UNSPECIFIED
if medianFilterWindowSize is not None:
f0Values = my_math.medianFilter(
f0Values, medianFilterWindowSize, useEdgePadding=True
)
if filterZeroFlag:
f0Values = [f0Val for f0Val in f0Values if int(f0Val) != 0]
if len(f0Values) == 0:
myStr = f"No pitch data for file: {name}, label: {label}"
print(myStr.encode("ascii", "replace"))
counts = 0.0
meanF0 = 0.0
maxF0 = 0.0
minF0 = 0.0
rangeF0 = 0.0
variance = 0.0
std = 0.0
else:
counts = float(len(f0Values))
meanF0 = sum(f0Values) / counts
maxF0 = max(f0Values)
minF0 = min(f0Values)
rangeF0 = maxF0 - minF0
variance = sum([(val - meanF0) ** 2 for val in f0Values]) / counts
std = math.sqrt(variance)
return (meanF0, maxF0, minF0, rangeF0, variance, std)
def detectPitchErrors(
pitchList: List[Tuple[float, float]],
maxJumpThreshold: float = 0.70,
tgToMark: Optional[textgrid.Textgrid] = None,
) -> Tuple[List[Point], Optional[textgrid.Textgrid]]:
"""
Detect pitch halving and doubling errors.
If a textgrid is passed in, it adds the markings to the textgrid
"""
if maxJumpThreshold < 0 or maxJumpThreshold > 1:
raise errors.ArgumentError(
f"'maxJumpThreshold' must be between 0 and 1. Was given ({maxJumpThreshold})"
)
tierName = _PITCH_ERROR_TIER_NAME
if tgToMark is not None and tierName in tgToMark.tierNameList:
raise errors.ArgumentError(
f"Tier name '{tierName}' is already in provided textgrid"
)
errorList = []
for i in range(1, len(pitchList)):
lastPitch = pitchList[i - 1][1]
currentPitch = pitchList[i][1]
ceilingCutoff = currentPitch / maxJumpThreshold
floorCutoff = currentPitch * maxJumpThreshold
if (lastPitch <= floorCutoff) or (lastPitch >= ceilingCutoff):
currentTime = pitchList[i][0]
errorList.append(Point(currentTime, str(currentPitch / lastPitch)))
if tgToMark is not None:
pointTier = textgrid.PointTier(
tierName, errorList, tgToMark.minTimestamp, tgToMark.maxTimestamp
)
tgToMark.addTier(pointTier)
return errorList, tgToMark
|
"""
A collection of utility functions:
.. autosummary::
start_delayed
"""
from typing import Generator
from simpy.core import Environment, SimTime
from simpy.events import Event, Process, ProcessGenerator
def start_delayed(
env: Environment, generator: ProcessGenerator, delay: SimTime
) -> Process:
"""Return a helper process that starts another process for *generator*
after a certain *delay*.
:meth:`~simpy.core.Environment.process()` starts a process at the current
simulation time. This helper allows you to start a process after a delay of
*delay* simulation time units::
>>> from simpy import Environment
>>> from simpy.util import start_delayed
>>> def my_process(env, x):
... print(f'{env.now}, {x}')
... yield env.timeout(1)
...
>>> env = Environment()
>>> proc = start_delayed(env, my_process(env, 3), 5)
>>> env.run()
5, 3
Raise a :exc:`ValueError` if ``delay <= 0``.
"""
if delay <= 0:
raise ValueError(f'delay(={delay}) must be > 0.')
def starter() -> Generator[Event, None, Process]:
yield env.timeout(delay)
proc = env.process(generator)
return proc
return env.process(starter())
def subscribe_at(event: Event) -> None:
"""Register at the *event* to receive an interrupt when it occurs.
The most common use case for this is to pass
a :class:`~simpy.events.Process` to get notified when it terminates.
Raise a :exc:`RuntimeError` if ``event`` has already occurred.
"""
env = event.env
assert env.active_process is not None
subscriber = env.active_process
def signaller(signaller: Event, receiver: Process) -> ProcessGenerator:
result = yield signaller
if receiver.is_alive:
receiver.interrupt((signaller, result))
if event.callbacks is not None:
env.process(signaller(event, subscriber))
else:
raise RuntimeError(f'{event} has already terminated.')
|
import numpy as np
import matplotlib.pyplot as plt
"""initialize the input vestor X and output vector Y"""
def get_data():
temp_x = []
temp_y = []
for i in range(1, 51):
x_i = i
temp_x.append(x_i)
temp_point = np.random.uniform(-1, 1, 1)
y_i = float(i + temp_point)
temp_y.append(y_i)
return temp_x, temp_y
def gradient_cost_function(gcf, x1, y1):
#gcf = np.asarray(gcf)
gradient_y = 0.0
gradient_x = 0.0
x1 = np.asarray(x1)
y1 = np.asarray(y1)
gcf_x = gcf[0][0]
gcf_y = gcf[0][1]
temp_x = 0.0
temp_y = 0.0
for j in range(0,51):
temp_x = temp_x + (y1[j - 1] - (gcf_x + (gcf_y * x1[j - 1])))
temp_y = temp_y + x1[j - 1] * (y1[j - 1] - (gcf_x + (gcf_y * x1[j - 1])))
gradient_x = 2 * temp_x
gradient_y = 2 * temp_y
new_gcf = np.asarray((gradient_x, gradient_y))
return new_gcf
def y_cal(w):
w0 = w[0][0]
w1 = w[0][1]
y_out = []
for i in range(1, 51):
y_temp = w0 + (w1 * i)
y_out.append(y_temp)
return y_out
"""check output"""
x = []
y = []
x, y = get_data()
#x1 , y1 = x, y
# print "x values"
# print x
# print "y values"
# print y
"""initialize weights used by the algorithm"""
weights = np.random.rand(1, 2)
learning_rate = 0.000001
epoch = 0
threshold = 0.000002
total_error = 0
# print "weights"
# print weights
cal_output = np.dot(weights, x[0])
# print "calculated output"
# print cal_output
check_weights = weights
while True:
"""calculate the total error"""
cal_output = gradient_cost_function(weights, x, y)
weights = weights + (learning_rate * cal_output)
# for j in range(1, 51):
# cal_output = np.dot(weights, x[j - 1])
# # temp_error = (y[j-1] - cal_output) * (y[j-1] - cal_output)
# temp_error = (pow((y[j - 1] - cal_output), 2))
# total_error = total_error + temp_error
epoch = epoch + 1
diff_w0 = -(check_weights[0][0] - weights[0][0])
diff_w1 = -(check_weights[0][1] - weights[0][1])
print "epoch::", epoch, "weights difference", diff_w0, " ", diff_w1
if diff_w0 <= threshold and diff_w1 <= threshold:
break
else:
check_weights = 0
check_weights = weights
# total_error = 0
print "final weights:"
print weights
y_cal_plot = y_cal(weights)
plt.figure(1)
plt.scatter(x, y, color='blue', s=4)
plt.scatter(x, y_cal_plot, color='red', s=4)
plt.savefig('question3Gradient.png')
|
# 导入 socket、sys 模块
import socket
import sys
import string
import numpy as np
import matplotlib.pyplot as plt
#bolin Zhao. [email protected]
# 创建 socket 对象
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 获取本地主机名
#host = socket.gethostname()
host = '10.75.81.226'
# 设置端口好
port = 27015
# 连接服务,指定主机和端口
s.connect((host, port))
sendmsg='$FMWSM,1,-25,-310,0,0,0,&*'
# 接收小于 1024 字节的数据
plt.ion() # interactive mode on
for i in range(500):
msg2 = []
msg = s.recv(50000)
msg2=msg.decode('utf-8')
#### Process Data ####
msg_line = msg2.split('\n')
x = []
y = []
v = []
for i in range(len(msg_line)-1):
msg_single_line = msg_line[i]
block = msg_single_line.split(',')
x.append(block[0])
y.append(block[1])
v.append(block[5])
yy = list(map(float, y))
xx = list(map(float, x))
vv = list(map(float, v))
plt.figure(1)
plt.clf()
plt.axis('off')
plt.plot(xx, yy)
plt.figure(2)
plt.clf()
plt.axis('off')
i = len(v)
plt.plot(range(i), vv)
######################
s.send(sendmsg.encode())
plt.draw()
plt.pause(1e-10)
s.close() |
import json
import os
import yaml
import argparse
import make
from collections import OrderedDict
from datetime import datetime
from shutil import copy as copy_file
'''
12/4/18
Suppressing writing term ref's in make.py
Preserving pre-existing term ref's in modify,
but suppressing functionality to add new term ref's in modify.
Reason:
Still currently maintaining the "old syntax"
At some point we need to convert all the existing PlanX dictionaries to the new syntax
This involves converting old term syntax to new term syntax which supports multiple ref's
This script can perform the conversion
Uncomment the commented sections in this script and make.py
To write terms with the new syntax
We will need to add a small bit of code
To actually convert pre-existing term refs to the new syntax
But that should only take a few lines
Search 'terms syntax' to find the place in this script to extend
Search 'new syntax' to find the place in make.py to uncomment
TSV Template Update:
The old <term> column is now <terms>,
and the column should be populated with a csv list of keys in _terms.yaml
E.g., "A, B, C"
Where A, B, and C are entry titles (keys) in _terms.yaml
And this would get tranlsated to:
terms:
- $ref: "_terms.yaml#/A"
- $ref: "_terms.yaml#/B"
- $ref: "_terms.yaml#/C"
'''
# first function called in main script - before modify_dictionary()
def setup():
'''Performs all the necessary preparation work so that we can run modify_dictionary().'''
parse_options()
create_output_path()
load_make_config()
get_all_changes_map()
# first function called in setup()
def parse_options():
'''Obtain path_to_schemas, namespace value, name of directory containing target nodes and variables TSV files, and name of output dictionary.'''
global args
parser = argparse.ArgumentParser(description="Obtain path_to_schemas, namespace value, name of directory containing target nodes and variables TSV files, and name of output dictionary.")
parser.add_argument("-p", "--path_to_schemas", dest="path_to_schemas", required=False, help="Path to input schemas, relative to directory dictionary_tools.")
parser.add_argument("-n", "--namespace", dest="namespace", required=True, help="Desired namespace for the output dictionary - e.g., niaid.bionimbus.org")
parser.add_argument("-i", "--input_tsv", dest="input_tsv", required=True, help="Name of directory containing target nodes and variables TSV files.")
parser.add_argument("-o", "--out_dict_name", dest="out_dict_name", required=False, help="Name of output dictionary.")
args = parser.parse_args()
return args
# second function called in setup()
def create_output_path():
'''Create path to the output dictionary: dictionary_tools/output/modify/<out_dict_name>'''
global out_path
if args.out_dict_name:
out_dict_name = args.out_dict_name
else:
out_dict_name = datetime.strftime(datetime.now(), 'out_dict_%m.%d_%H.%M')
out_path = '../../output/modify/' + out_dict_name + '/'
make.mkdir(out_path)
# third function called in setup()
def load_make_config():
'''Load templates used for creating new nodes and constructing links section text-blocks.'''
global content_template, link_template, group_template, link_props
content_template, link_template, group_template, link_props = make.load_config()
# fourth function called in setup()
def get_all_changes_map():
'''
Aggregates information from nodes and variables input TSV sheets into one data structure.
Result is a dictionary with node names (e.g., 'sample', 'case') as keys, and the values are dictionaries:
{
'action': 'add'/'update'/'delete',
'link': [{<row>}],
'variable': [{<row_1>}, {<row_2>}, ..., {<row_k>}]
}
Where 'link' maps to the list of rows corresponding to the node in the nodes.tsv file,
and 'variable' maps to the list of rows corresponding to the node in the variables.tsv file.
The dictionaries which correspond to rows in the TSV sheets have the headers from the sheet as keys,
and the values are the row's values for that header/column.
'''
global all_changes_map
nodes, variables = make.get_data(args.input_tsv)
nodes_to_modify = set(list(nodes.keys()) + list(variables.keys()))
all_changes_map = {}
for node in nodes_to_modify:
action = 'update' # action is always update, unless field <node_action> is specified as 'add' or 'delete' in nodes.tsv
# determine if the node_action is add or delete
if nodes.get(node):
for node_action in ['add', 'delete']:
if nodes[node][0]['<node_action>'] == node_action:
action = node_action
all_changes_map[node] = {'action': action,
'link': nodes.get(node),
'variable': variables.get(node)}
# return all_changes_map
# not returning, since using it as a global name
# second function called in main script - after setup()
def modify_dictionary():
'''Iterates through the node list and passes each node into the handle_node() processing pipeline.'''
node_list = get_node_list() # this is a list of all the nodes we need to process
for node in node_list:
# node is 'sample', 'case', etc.
handle_node(node)
# called in modify_dictionary()
def handle_node(node):
'''Main pipeline for processing nodes.
Each node is handled first on whether or not we need to process it at all,
and then if we need to process it, if we are adding, updating, or deleting this node.
The first check is made by seeing if the node appears in all_changes_map,
and if it does, then we read the 'action' value associated with that node
to determine what action to take: add/update/delete.
If the node does not appear in all_changes_map, then we keep that schema
with no changes except to populate the namespace with the value from args.
'''
print '\n~~~~~ ' + node + ' ~~~~~'
if node in all_changes_map:
if all_changes_map[node]['action'] == 'delete':
print 'Removing node - ' + node + '\n'
return None
elif all_changes_map[node]['action'] == 'add':
print 'Creating node - ' + node + '\n'
make_schema(node)
elif all_changes_map[node]['action'] == 'update':
print 'Modifying node - ' + node + '\n'
modify_schema(node)
else: # for debugging purposes
print '\nHey here is a problem: ' + all_changes_map[node]['action']
else:
print 'Keeping node - ' + node + '\n'
keep_schema(node)
# called in handle_node()
def make_schema(node):
'''This is a new node which needs to be created, so we refer to the make.py module,
which contains the functionality from the old dict_creator.py script.
We pass this node into the make.create_node_schema() pipeline which
results in the creation of the YAML file for the new node.
See the make.py module for details.
'''
make.create_node_schema(node, args, all_changes_map, out_path)
# called in handle_node()
def keep_schema(node):
'''No changes are specified for this node, but we always update the namespace.
Here we load the schema, make the namespace change, and then write the file.
'''
schema_text, schema_dict = get_schema(node)
schema_text = modify_namespace(schema_text, schema_dict)
write_file(schema_text, schema_dict)
# called in handle_node()
def modify_schema(node):
'''Pipeline for the 'update' node action.
Loads in existing schema as a string and a dictionary.
Updates the namespace in schema_text,
then updates the links in both the text and the dictionary,
next updates the properties in the dictionary,
and finally passes the fully updated (text, dictionary)
pair to write_file() to create the new YAML file.
'''
schema_text, schema_dict = get_schema(node)
schema_text = modify_namespace(schema_text, schema_dict)
# if no changes, schema_text and schema_dict are returned untouched
schema_text, schema_dict = modify_links(schema_text, schema_dict)
# if no changes, schema_dict is returned untouched
schema_dict = modify_properties(schema_dict)
write_file(schema_text, schema_dict)
# called in keep_schema() and modify_schema()
def get_schema(node):
'''Load and return contents of node YAML file, as a (string, dictionary) pair.'''
path = path_to_schemas + node + '.yaml'
# 'input/dictionaries/gdcdictionary/gdcdictionary/schemas/' + 'sample' + '.yaml'
schema_text = open(path).read()
schema_dict = yaml.load(open(path))
return schema_text, schema_dict
# called in keep_schema() and modify_schema()
def modify_namespace(schema_text, schema_dict):
'''Update namespace in schema_text.'''
if 'namespace' in schema_dict:
schema_text = schema_text.replace(schema_dict['namespace'], args.namespace)
else: # no namespace listed!
print '\nWARNING: No namespace listed in file - ' + schema_dict['id'] + '.yaml\n'
return schema_text
# called in modify_schema()
def modify_properties(schema_dict):
'''Takes the schema dictionary as input,
makes all the property changes for the given node (to the property list and required list)
as indicated in all_changes_map (which is the information from variables.tsv),
and returns the appropriately modified schema dictionary.
'''
node = schema_dict['id']
if all_changes_map[node]['variable']:
print ' - Updating Property List -\n'
for row in all_changes_map[node]['variable']:
print '\t' + row['<field_action>'] + ' - ' + row['<field>'] + '\n'
if row['<field_action>'] in ['add', 'update']:
prop_entry = make.build_prop_entry(schema_dict, row)
schema_dict['properties'][row['<field>']] = prop_entry
elif row['<field_action>'] == 'delete':
schema_dict['properties'].pop(row['<field>'])
if row['<field>'] in schema_dict['required']:
schema_dict['required'].remove(row['<field>'])
if str(row['<required>']).lower().strip() == 'true' and row['<field>'] not in schema_dict['required']:
schema_dict['required'].append(row['<field>'])
elif str(row['<required>']).lower().strip() == 'false' and row['<field>'] in schema_dict['required']:
schema_dict['required'].remove(row['<field>'])
return schema_dict
# called in modify_schema()
def modify_links(schema_text, schema_dict):
'''For the given node, reads the link update information in all_changes_map (if any),
creates the new link section (as a text block) as specified in the nodes.tsv file,
updates the schema dictionary with this new link section,
replaces the old links section in the schema text with the new links section,
puts the links in the property list in the schema dictionary,
and finally returns the updated (text, dictionary) pair.
'''
node = schema_dict['id']
# if there are changes to be made
# None (False) if no changes, else it is a list containing a single row from nodes.tsv
if all_changes_map[node]['link']:
print ' - Updating Links -\n'
# remove old links from property list and required list if there
schema_dict = remove_old_links(schema_dict)
# put new links in property list and required if specified as such
schema_dict = put_links_in_prop_list(node, schema_dict)
# create new links section text block
updated_link_block = make.return_link_block(node, all_changes_map)
# update the 'links' entry in schema_dict
schema_dict['links'] = yaml.load(updated_link_block)
# update 'links' block in schema_text
prev_link_block = get_links_text(schema_text)
schema_text = schema_text.replace(prev_link_block, updated_link_block)
return schema_text, schema_dict
# called in modify_dictionary()
def get_node_list():
'''Returns the list of all nodes that we need to consider:
1) Nodes from the input dictionary, and
2) Nodes listed in the input TSV sheets
'''
if args.path_to_schemas:
input_dict = set(get_input_dict()) # files from input dictionary
ignore_files = set(['projects', 'README.md', '_definitions.yaml', '_settings.yaml', '_terms.yaml', '.DS_Store'])
handle_ignore_files(ignore_files)
# get just the node names, without file extensions
input_nodes = [k[:-5] for k in input_dict.difference(ignore_files)]
else:
input_nodes = []
# pool these node names with those in the input_tsv sheets (in all_changes_map), remove duplicates, convert back to a list
node_list = list(set(input_nodes + list(all_changes_map.keys())))
return sorted(node_list)
# called in get_node_list()
def handle_ignore_files(ignore_files):
'''Copies the ignore files (if they exist in the input dictionary) over from input dictionary to the output dictionary.'''
for file_name in ignore_files:
in_file = path_to_schemas + file_name
# this filters out non-file entities, e.g., directories - see directory 'projects'
if os.path.isfile(in_file):
out_file = out_path + file_name
copy_file(in_file, out_file)
# called in get_node_list()
def get_input_dict():
'''Returns a list containing all the filenames from the input dictionary.'''
global path_to_schemas
# path from args, relative to dictionary_tools/
# e.g., input/dictionaries/gdcdictionary/gdcdictionary/schemas/
path_to_schemas = '../../' + args.path_to_schemas
if path_to_schemas[-1] != '/':
path_to_schemas += '/'
input_dict = os.listdir(path_to_schemas)
return input_dict
# called in modify_links()
def get_links_text(schema_text):
'''Returns the link section text from the yaml file. As in '<link>' in the config yaml_template.'''
temp = schema_text.split('links:\n')
temp = temp[1].split('\n\nuniqueKeys:')
return temp[0]
# called in modify_links()
def remove_old_links(schema_dict):
'''Remove old links from property list and required list if there.'''
old_link_names = get_link_names(schema_dict)
for old_link in old_link_names:
schema_dict['properties'].pop(old_link)
if old_link in schema_dict['required']:
schema_dict['required'].remove(old_link)
return schema_dict
# called in modify_links()
def put_links_in_prop_list(node, schema_dict):
'''Updates the property list and required list of the given node
with the new links as specified in all_changes_map (which is the information from nodes.tsv)'''
link_map = make.build_link_map(node, all_changes_map)
for i in range(len(link_map['<link_name>'])):
if type(link_map['<link_name>'][i]) is str:
schema_dict = make_link_property(link_map['<link_name>'][i], link_map['<multiplicity>'][i], schema_dict)
if link_map['<link_required>'][i].lower() == 'true':
schema_dict['required'].append(link_map['<link_name>'][i])
else:
link_group = link_map['<link_name>'][i]
link_mult_group = link_map['<multiplicity>'][i]
for k in range(len(link_group)):
schema_dict = make_link_property(link_group[k], link_mult_group[k], schema_dict)
if link_map['<link_required>'][i][k].lower() == 'true':
schema_dict['required'].append(link_group[k])
return schema_dict
# called in put_links_in_prop_list()
def make_link_property(link_name, link_mult, schema_dict):
'''Updates the schema_dict property list with an entry for the given link.'''
if 'to_one' in link_mult:
schema_dict['properties'][link_name] = {'$ref': "_definitions.yaml#/to_one"}
else:
schema_dict['properties'][link_name] = {'$ref': "_definitions.yaml#/to_many"}
return schema_dict
# called in modify_links()
def get_link_names(schema_dict):
'''Return a list containing all the link names from the links section of the given schema dictionary.'''
link_names = []
try:
links = schema_dict['links']
for link in links:
if 'subgroup' in link:
group = link['subgroup']
for item in group:
link_names.append(item['name'])
else:
link_names.append(link['name'])
except KeyError:
print('no links for - ' + schema_dict['id'])
return link_names
# called in modify_schema() and keep_schema
def write_file(schema_text, schema_dict):
'''Pipeline for creating a YAML file from the given schema text and dictionary.'''
node = schema_dict['id']
with open(out_path + node + '.yaml', 'w') as out_file:
schema_content = schema_text.split('\nrequired:')[0]
# write everything through 'uniqueKeys' here
out_file.write(schema_content)
'''
.
.
.
links:
- name: subjects
backref: samples
label: derived_from
target_type: subject
multiplicity: many_to_one
required: TRUE
uniqueKeys:
- [id]
- [project_id, submitter_id]
^ this much is written - start next with writing '\nrequired:\n'
'''
# here we write the list of required properties/links
out_file.write('\nrequired:\n')
for req in sorted(schema_dict['required']):
out_file.write(' - %s\n' % req)
# finally we write ordered property list
out_file.write('\nproperties:\n')
ordered_properties = OrderedDict(sorted(schema_dict['properties'].items(), key=lambda t: t[0]))
if '$ref' in ordered_properties:
ref_list = ordered_properties.popitem(last=False)[1]
out_file.write(' $ref: "%s"\n' % ref_list)
'''
else:
print 'No $ref property list!'
'''
link_names = get_link_names(schema_dict)
links = []
for pair in ordered_properties.items():
if pair[0] in link_names:
links.append(pair) # collect the links to write at the bottom of the list
else:
write_property(pair, out_file)
# first we write all the properties, then lastly the links as properties
for pair in links:
write_property(pair, out_file)
# called in write_file()
def write_property(pair, out_file):
'''Writes the property entry for the given property pair, where
pair[0] is the property name, and
pair[1] is the property entry in dictionary form.'''
# clean up, break into smaller bits
# pair[0] is the property name
# pair[1] is the property block
out_file.write('\n')
out_file.write(' %s:\n' % pair[0].strip().encode("utf-8"))
if '$ref' in pair[1]:
out_file.write(' $ref: "%s"\n' % pair[1]['$ref'].strip().encode("utf-8"))
pair[1].pop('$ref')
# write systemAlias (for 'id' on, e.g., keyword.yaml)
if 'systemAlias' in pair[1]:
out_file.write(' systemAlias: %s\n' % pair[1]['systemAlias'].strip().encode("utf-8"))
pair[1].pop('systemAlias')
# eventually we need to convert all the old terms syntax to new terms syntax
# that conversion can happen right HERE
# write term - OLD SYNTAX
if 'term' in pair[1] and '$ref' in pair[1]['term']:
out_file.write(' term:\n')
out_file.write(' $ref: "%s"\n' % pair[1]['term']['$ref'].strip().encode("utf-8"))
pair[1].pop('term')
'''
# NEW SYNTAX
# write terms
if 'terms' in pair[1]:
out_file.write(' terms:\n')
for term in pair[1]['terms']:
out_file.write(' - $ref: "_terms.yaml#/%s"\n' % term.strip().encode("utf-8"))
pair[1].pop('terms')
'''
# write description
if 'description' in pair[1]:
if isinstance(pair[1]['description'], unicode):
desc = pair[1]['description'].strip().encode("utf-8")
else:
desc = unicode(pair[1]['description'], 'utf-8').strip().encode("utf-8")
desc = desc.replace('\n', '\n ')
out_file.write(' description: >\n')
out_file.write(' %s\n' % desc)
pair[1].pop('description')
# see 'in_review' on project
if 'default' in pair[1]:
out_file.write(' default: %s\n' % pair[1]['default'])
pair[1].pop('default')
# write type
# presently NOT handling the case where type is a list
if 'type' in pair[1]:
'''
if type(pair[1]['type']) is list:
print 'Type is a list, not a string - ' + pair[0]
'''
out_file.write(' type: %s\n' % pair[1]['type']) #.strip().encode("utf-8") if string, not on list
pair[1].pop('type')
for item in ['format', 'minimum', 'maximum']:
if item in pair[1]:
out_file.write(' %s: %s\n' % (item, pair[1][item]))
pair[1].pop(item)
# write enum
if 'enum' in pair[1]:
out_file.write(' enum:\n')
for option in sorted(pair[1]['enum']):
out_file.write(' - "%s"\n' % option.strip().encode("utf-8"))
pair[1].pop('enum')
if len(pair[1]) > 0:
print 'WARNING: unaddressed items for this property!! - ' + pair[0]
print json.dumps(pair[1], indent=2)
if __name__ == "__main__":
setup()
modify_dictionary()
# call to COMPARE module
|
from .manager import GeneralMomentAccountant, PrivacyManager # noqa: F401
|
import logging
from collections import defaultdict
from typing import Dict, List, Tuple, Union
import numpy
from openforcefield.topology import Molecule
from openforcefield.typing.engines.smirnoff import ForceField, ParameterHandler
from simtk import unit
from simtk.openmm import copy, openmm
from inspector.library.forcefield import label_molecule
from inspector.library.models.energy import DecomposedEnergy
from inspector.library.models.molecule import RESTMolecule
from inspector.library.models.smirnoff import SMIRNOFFParameterType
logger = logging.getLogger(__name__)
_SUPPORTED_VALENCE_TAGS = [
"Bonds",
"Angles",
"ProperTorsions",
"ImproperTorsions",
]
def _get_openmm_parameters(
force: openmm.Force,
) -> Dict[Tuple[int, ...], List[Tuple[unit.Quantity, ...]]]:
"""Returns the parameters stored in a given force.
Args:
force: The force to retrieve the parameters from.
Returns:
A dictionary of the retrieved parameters where each key is a tuple of atom
indices, and each value is a list of the parameter sets associated with those
atom indices.
"""
omm_parameters = defaultdict(list)
if isinstance(force, openmm.HarmonicBondForce):
for i in range(force.getNumBonds()):
index_a, index_b, *parameters = force.getBondParameters(i)
omm_parameters[(index_a, index_b)].append(parameters)
assert sum(len(x) for x in omm_parameters.values()) == force.getNumBonds()
elif isinstance(force, openmm.HarmonicAngleForce):
for i in range(force.getNumAngles()):
index_a, index_b, index_c, *parameters = force.getAngleParameters(i)
omm_parameters[(index_a, index_b, index_c)].append(parameters)
assert sum(len(x) for x in omm_parameters.values()) == force.getNumAngles()
elif isinstance(force, openmm.PeriodicTorsionForce):
for i in range(force.getNumTorsions()):
(
index_a,
index_b,
index_c,
index_d,
*parameters,
) = force.getTorsionParameters(i)
omm_parameters[(index_a, index_b, index_c, index_d)].append(parameters)
assert sum(len(x) for x in omm_parameters.values()) == force.getNumTorsions()
else:
raise NotImplementedError
return omm_parameters
def _add_openmm_parameter(
force: openmm.Force,
atom_indices: Tuple[int, ...],
parameters: Tuple[unit.Quantity, ...],
):
"""A convenience method to add a set of parameters to a force.
Args:
force: The force to add the parameters to.
atom_indices: The atom indices the parameters apply to.
parameters: The parameters to add.
"""
if isinstance(force, openmm.HarmonicBondForce):
force.addBond(*atom_indices, *parameters)
elif isinstance(force, openmm.HarmonicAngleForce):
force.addAngle(*atom_indices, *parameters)
elif isinstance(force, openmm.PeriodicTorsionForce):
force.addTorsion(*atom_indices, *parameters)
else:
raise NotImplementedError
def group_force_by_parameter_id(
handler_force: openmm.Force,
parameters: List[SMIRNOFFParameterType],
parameter_map: Dict[str, List[Tuple[int, ...]]],
) -> Dict[str, openmm.Force]:
"""Partitions the parameters in a force into a separate force for each parameter
id.
Args:
handler_force: The force containing the parameters to partition.
parameters: The original SMIRNOFF parameters used to populate the
``handler_force``.
parameter_map: A mapping between each parameter id and the atom indices the
parameter was applied to.
Returns:
A dictionary of parameter ids and the associated force.
"""
grouped_forces = {}
grouped_counter = 0
# Store the force parameters in a dictionary partitioned by atom indices
omm_parameters = _get_openmm_parameters(handler_force)
# Copy each term in the force over to the correct force group
for parameter in parameters:
grouped_force = handler_force.__class__()
grouped_forces[parameter.id] = grouped_force
for mapped_atom_indices in parameter_map[parameter.id]:
# Improper torsions are a special case due to the trefoil enumeration.
if parameter.type == "ImproperTorsionType":
others = [
mapped_atom_indices[0],
mapped_atom_indices[2],
mapped_atom_indices[3],
]
enumerated_atom_indices = [
(mapped_atom_indices[1], p[0], p[1], p[2])
for p in [
(others[i], others[j], others[k])
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]
]
else:
enumerated_atom_indices = [mapped_atom_indices]
for atom_indices in enumerated_atom_indices:
for omm_parameter in omm_parameters[atom_indices]:
_add_openmm_parameter(grouped_force, atom_indices, omm_parameter)
grouped_counter += 1
return grouped_forces
def group_forces_by_parameter_id(
molecule: Molecule, force_field: ForceField
) -> Tuple[openmm.System, Dict[str, Dict[str, int]]]:
"""Applies a particular force field to a specified molecule creating an OpenMM
system object where each valence parameter (as identified by it's unique id)
is separated into a different force group.
Notes:
* All nonbonded forces will be assigned to force group 0.
Args:
molecule: The molecule to apply thr force field to.
force_field: The force field to apply.
Returns:
A tuple of the created OpenMM system, and a dictionary of the form
``force_groups[HANDLER_TAG][PARAMETER_ID] = FORCE_GROUP_INDEX``.
"""
# Label the molecule with the parameters which will be assigned so we can access
# which 'slot' is filled by which parameter. This allows us to carefully split the
# potential energy terms into different force groups.
applied_parameters = label_molecule(molecule, force_field)
# Create an OpenMM system which will not have force groups yet.
omm_system: openmm.System = force_field.create_openmm_system(molecule.to_topology())
# Create a new OpenMM system to store the grouped forces in and copy over the
# nonbonded forces.
grouped_omm_system = openmm.System()
matched_forces = set()
for i in range(omm_system.getNumParticles()):
grouped_omm_system.addParticle(omm_system.getParticleMass(i))
nonbonded_forces = [
(i, force)
for i, force in enumerate(omm_system.getForces())
if isinstance(force, openmm.NonbondedForce)
]
assert (
len(nonbonded_forces) == 1
), "expected only one instance of a NonbondedForce force."
grouped_omm_system.addForce(copy.deepcopy(nonbonded_forces[0][1]))
matched_forces.add(nonbonded_forces[0][0])
# Split the potential energy terms into per-parameter-type force groups.
force_group_indices = defaultdict(dict) # force_groups[HANDLER][PARAM_ID] = INDEX
force_group_counter = 1
for handler_type in applied_parameters.parameters:
if handler_type not in _SUPPORTED_VALENCE_TAGS:
continue
handler: ParameterHandler = force_field.get_parameter_handler(handler_type)
omm_force_type = handler._OPENMMTYPE
# Find the force associated with this handler.
handler_forces = [
(i, force)
for i, force in enumerate(omm_system.getForces())
if isinstance(force, omm_force_type)
]
assert (
len(handler_forces) == 1
), f"expected only one instance of a {omm_force_type.__name__} force."
force_index, handler_force = handler_forces[0]
matched_forces.add(force_index)
# Group the force into force per parameter id.
grouped_forces = group_force_by_parameter_id(
handler_force,
applied_parameters.parameters[handler_type],
applied_parameters.parameter_map,
)
for parameter_id, grouped_force in grouped_forces.items():
force_group = force_group_counter
grouped_force.setForceGroup(force_group)
grouped_omm_system.addForce(grouped_force)
force_group_indices[handler_type][parameter_id] = force_group
force_group_counter += 1
return grouped_omm_system, force_group_indices
def evaluate_energy(
omm_system: openmm.System, conformer: unit.Quantity
) -> Tuple[unit.Quantity, Dict[int, unit.Quantity]]:
"""Computes both the total potential energy, and potential energy per force group,
of a given conformer.
Args:
omm_system: The system encoding the potential energy function.
conformer: The conformer to compute the energy of.
Returns
A tuple of the total potential energy, and a dictionary of the potential energy
per force group.
"""
integrator = openmm.VerletIntegrator(0.001 * unit.femtoseconds)
platform = openmm.Platform.getPlatformByName("Reference")
openmm_context = openmm.Context(omm_system, integrator, platform)
openmm_context.setPositions(conformer.value_in_unit(unit.nanometers))
energy_per_force_id = {}
for force in omm_system.getForces():
state = openmm_context.getState(
getEnergy=True, groups=1 << force.getForceGroup()
)
energy_per_force_id[force.getForceGroup()] = state.getPotentialEnergy()
state = openmm_context.getState(getEnergy=True)
total_energy = state.getPotentialEnergy()
return total_energy, energy_per_force_id
def evaluate_per_term_energies(
molecule: Union[Molecule, RESTMolecule],
conformer: unit.Quantity,
force_field: ForceField,
) -> DecomposedEnergy:
molecule = copy.deepcopy(molecule)
force_field = copy.deepcopy(force_field)
if isinstance(molecule, RESTMolecule):
molecule = molecule.to_openff()
# Remove constraints so we can access the bond energies.
if len(force_field.get_parameter_handler("Constraints").parameters) > 0:
logger.warning(
"Constraints will be removed when evaluating the per term energy."
)
force_field.deregister_parameter_handler("Constraints")
# Apply the force field to the molecule, making sure to add each parameter type into
# a separate force group.
omm_system, id_to_force_group = group_forces_by_parameter_id(molecule, force_field)
# Evaluate the energy.
total_energy, energy_per_force_id = evaluate_energy(omm_system, conformer)
summed_energy = sum(
x.value_in_unit(unit.kilojoules_per_mole) for x in energy_per_force_id.values()
)
assert numpy.isclose(
total_energy.value_in_unit(unit.kilojoules_per_mole), summed_energy
), "the ungrouped and grouped energies do not match."
# Decompose the contributions of the vdW and electrostatic interactions.
nonbonded_energy = energy_per_force_id[0].value_in_unit(unit.kilojoules_per_mole)
nonbonded_force = [
force
for force in omm_system.getForces()
if isinstance(force, openmm.NonbondedForce)
][0]
for i in range(nonbonded_force.getNumParticles()):
_, sigma, epsilon = nonbonded_force.getParticleParameters(i)
nonbonded_force.setParticleParameters(i, 0.0, sigma, epsilon)
for i in range(nonbonded_force.getNumExceptions()):
index_a, index_b, _, sigma, epsilon = nonbonded_force.getExceptionParameters(i)
nonbonded_force.setExceptionParameters(i, index_a, index_b, 0.0, sigma, epsilon)
_, no_charge_energies = evaluate_energy(omm_system, conformer)
vdw_energy = no_charge_energies[0].value_in_unit(unit.kilojoules_per_mole)
electrostatic_energy = nonbonded_energy - vdw_energy
return DecomposedEnergy(
valence_energies={
handler_name: {
parameter_id: energy_per_force_id[
id_to_force_group[handler_name][parameter_id]
].value_in_unit(unit.kilojoules_per_mole)
for parameter_id in id_to_force_group[handler_name]
}
for handler_name in id_to_force_group
},
vdw_energy=vdw_energy,
electrostatic_energy=electrostatic_energy,
)
|
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = pd.read_csv(path)
#Code starts here
bank=pd.DataFrame(bank_data)
categorical_var=bank.select_dtypes(include='object')
print(categorical_var.shape)
numerical_var=bank.select_dtypes(include='number')
print(numerical_var.shape)
bank.drop('Loan_ID',inplace=True,axis=1)
banks=pd.DataFrame(bank)
print(banks.shape)
null_val=banks.isnull().sum()
print(null_val)
bank_mode=banks.mode(axis=0,numeric_only=False)
print(bank_mode)
banks.fillna(bank_mode.iloc[0],inplace=True)
print(banks.isnull().sum().values.sum())
avg_loan_amount=pd.pivot_table(banks,index=['Gender','Married','Self_Employed'],values='LoanAmount',aggfunc=np.mean)
print(avg_loan_amount)
se=banks['Self_Employed']=='Yes'
ls=banks['Loan_Status']=='Y'
loan_approved_se=len(banks[(se) & (ls)])
print(loan_approved_se)
nse=banks['Self_Employed']=='No'
loan_approved_nse=len(banks[(nse) & (ls)])
print(loan_approved_nse)
ls_len=len(banks['Loan_Status'])
print(ls_len)
percentage_se=(loan_approved_se/ls_len)*100
print(percentage_se)
percentage_nse=(loan_approved_nse/ls_len)*100
print(percentage_nse)
def mon_to_year(num):
return num/12
loan_term=banks['Loan_Amount_Term'].apply(lambda x:mon_to_year(x))
print(loan_term)
big_loan_term=0
for i in range(len(loan_term)):
if(loan_term[i]>=25):
big_loan_term+=1
print(big_loan_term)
mean_values=banks.groupby(['Loan_Status'])['ApplicantIncome','Credit_History'].agg(np.mean)
print(mean_values.iloc[1])
|
class Binary:
@classmethod
def decToBin(cls, dec):
if(dec == -128):
return '10000000'
b = ''
sign = '0'
if(dec < 0):
sign = '1'
dec = abs(dec)
for i in range(7):
b = str(dec % 2) + b
dec = dec // 2
b = sign + b
return b
@classmethod
def binToDec(cls, bin):
d = 0
for i in range(1,8):
d += int(bin[i]) * (2**(7-i))
if(bin[0]=='1'):
d = -d
return d
def __init__(self, dec):
self.dec = dec
def __str__(self):
return self.bin
@property
def dec(self):
return self.__dec
@property
def bin(self):
return self.__bin
@dec.setter
def dec(self, dec):
if(dec < -128):
dec = -128
elif(dec > 127):
dec = 127
self.__dec = dec
self.__bin = Binary.decToBin(dec)
@bin.setter
def bin(self, bin):
if(len(bin)==8):
self.__bin = bin
self.__dec = Binary.binToDec(bin)
def __add__(self, other):
return Binary(self.dec + other.dec)
def __sub__(self, other):
return Binary(self.dec - other.dec)
def __and__(self, other):
r = ''
for i in range(8):
if(self.bin[i]=='1' and other.bin[i]=='1'):
r += '1'
else:
r += '0'
b = Binary(0)
b.bin = r
return b
def __or__(self, other):
r = ''
for i in range(8):
if(self.bin[i]=='1' or other.bin[i]=='1'):
r += '1'
else:
r += '0'
b = Binary(0)
b.bin = r
return b
class BinMat:
def __init__(self, mat):
self.__mat = mat
def __str__(self):
r = ''
for row in self.__mat:
for element in row:
r += str(element) + ' '
r += '\n'
return r
def __repr__(self):
return f'Binary Matrix({self.__mat})'
def __add__(self, other):
r = []
for i in range(len(self.__mat)):
rr = []
for j in range(len(self.__mat[i])):
rr.append(self.__mat[i][j] + other.__mat[i][j])
r.append(rr)
return BinMat(r)
def __sub__(self, other):
r = []
for i in range(len(self.__mat)):
rr = []
for j in range(len(self.__mat[i])):
rr.append(self.__mat[i][j] - other.__mat[i][j])
r.append(rr)
return BinMat(r)
def __and__(self, other):
r = []
for i in range(len(self.__mat)):
rr = []
for j in range(len(self.__mat[i])):
rr.append(self.__mat[i][j] & other.__mat[i][j])
r.append(rr)
return BinMat(r)
def __or__(self, other):
r = []
for i in range(len(self.__mat)):
rr = []
for j in range(len(self.__mat[i])):
rr.append(self.__mat[i][j] | other.__mat[i][j])
r.append(rr)
return BinMat(r)
def main():
print('Testing Binary')
print('Basics')
for i in range(-129, 138, 10):
print(f'{i}: {Binary(i)}')
print('\nSetters & Getters')
b1 = Binary(10)
print(f'dec: {b1.dec}, bin: {b1.bin}')
b1.dec = 100
print(f'dec: {b1.dec}, bin: {b1.bin}')
b1.bin = '11100100'
print(f'dec: {b1.dec}, bin: {b1.bin}')
print('\nOperators')
b2 = Binary(20)
print(f'b1 + b2 = {b1 + b2}')
print(f'b1 - b2 = {b1 - b2}')
print(f'b1 & b2 = {b1 & b2}')
print(f'b1 | b2 = {b1 | b2}')
print('\nTesting BinMat')
m1 = []
m2 = []
for i in range(1,4):
mm1 = []
mm2 = []
for j in range(1,4):
mm1.append(Binary(i*j))
mm2.append(Binary(i+j*2))
m1.append(mm1)
m2.append(mm2)
mb1 = BinMat(m1)
mb2 = BinMat(m2)
print('MB1:')
print(mb1)
print('MB2:')
print(mb2)
print(f'MB1 + MB2 = \n{mb1 + mb2}')
print(f'MB1 - MB2 = \n{mb1 - mb2}')
print(f'MB1 & MB2 = \n{mb1 & mb2}')
print(f'MB1 | MB2 = \n{mb1 | mb2}')
main() |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from arch.api.utils import file_utils
from fate_flow.settings import CHECK_NODES_IDENTITY, FATE_MANAGER_NODE_CHECK_ENDPOINT, \
SERVER_CONF_PATH, SERVERS
from fate_flow.utils.service_utils import ServiceUtils
def nodes_check(src_party_id, src_role, appKey, appSecret, dst_party_id):
if CHECK_NODES_IDENTITY:
body = {
'srcPartyId': int(src_party_id),
'role': src_role,
'appKey': appKey,
'appSecret': appSecret,
'dstPartyId': int(dst_party_id),
'federatedId': file_utils.load_json_conf_real_time(SERVER_CONF_PATH).get(SERVERS).get('fatemanager', {}).get('federatedId')
}
try:
response = requests.post(url="http://{}:{}{}".format(
ServiceUtils.get_item("fatemanager", "host"),
ServiceUtils.get_item("fatemanager", "port"),
FATE_MANAGER_NODE_CHECK_ENDPOINT), json=body).json()
if response['code'] != 0:
raise Exception(str(response['msg']))
except Exception as e:
raise Exception('role {} party id {} authentication failed: {}'.format(src_role, src_party_id, str(e)))
|
# checking in dict
print("This script will check if monument is present in dictionary")
print("It will add if not present and exit if monument is found.")
dictionary = {
"Taj": "Agra",
"Qutub-Minar": "Delhi",
"Sun-Temple": "Puri"
}
print(f"This is the dictionary: {dictionary}")
name = input("Enter the monument: ")
for i in dictionary:
variable = ""
if name != i and name != variable:
variable = name
else:
print(dictionary)
exit()
dictionary[name] = ""
print(dictionary)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from tkdet.layers import Conv2d
from tkdet.layers import SEModule
from tkdet.layers import make_divisible
from .base import Backbone
from .build import BACKBONE_REGISTRY
__all__ = ["GhostNet", "ghostnet_1_0"]
class GhostModule(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
norm="BN",
relu=True
):
super().__init__()
self.out_channels = out_channels
init_channels = math.ceil(out_channels / ratio)
new_channels = init_channels * (ratio - 1)
self.primary_conv = Conv2d(
in_channels,
init_channels,
kernel_size,
stride,
(kernel_size - 1) // 2,
bias=False,
norm=norm,
activation="ReLU" if relu else ""
)
self.cheap_operation = Conv2d(
init_channels,
new_channels,
dw_size,
1,
(dw_size - 1) // 2,
groups=init_channels,
bias=False,
norm=norm,
activation="ReLU" if relu else ""
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.out_channels, :, :,]
class GhostBottleneck(nn.Module):
def __init__(
self,
in_channels,
mid_channels,
out_channels,
dw_kernel_size=3,
stride=1,
se_ratio=0,
norm="BN"
):
super().__init__()
self.use_se = se_ratio is not None and se_ratio > 0
self.stride = stride
self.ghost1 = GhostModule(in_channels, mid_channels, relu=True)
if stride > 1:
self.dw_conv = Conv2d(
mid_channels,
mid_channels,
dw_kernel_size,
stride,
(dw_kernel_size - 1) // 2,
groups=mid_channels,
bias=False,
norm=norm
)
if self.use_se:
self.se = SEModule(
mid_channels,
se_ratio,
activation=("ReLU", "HardSigmoid"),
divisor=4
)
self.ghost2 = GhostModule(mid_channels, out_channels, relu=False)
if (in_channels == out_channels and stride == 1):
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
Conv2d(
in_channels,
in_channels,
dw_kernel_size,
stride,
(dw_kernel_size - 1) // 2,
groups=in_channels,
bias=False,
norm=norm
),
Conv2d(in_channels, out_channels, 1, bias=False, norm=norm)
)
def forward(self, x):
residual = x
x = self.ghost1(x)
if self.stride > 1:
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.ghost2(x)
x += self.shortcut(residual)
return x
class GhostNet(Backbone):
def __init__(
self,
ghostnet_cfg=None,
multiplier=1.0,
dropout=0.2,
norm="BN",
num_classes=1000,
out_features=None
):
super().__init__()
if ghostnet_cfg is None:
ghostnet_cfg = [
[3, 16, 16, 0, 1],
[3, 48, 24, 0, 2],
[3, 72, 24, 0, 1],
[5, 72, 40, 0.25, 2],
[5, 120, 40, 0.25, 1],
[3, 240, 80, 0, 2],
[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1],
[5, 672, 160, 0.25, 2],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 0.25, 1]
]
output_channel = make_divisible(16 * multiplier, 4)
layers = []
layers.append(Conv2d(3, output_channel, 3, 2, 1, bias=False, norm=norm, activation="ReLU"))
self._out_feature_channels = {"0": output_channel}
stride = 2
self._out_feature_strides = {"0": stride}
input_channel = output_channel
block = GhostBottleneck
index = 1
for k, exp_size, c, se_ratio, s in ghostnet_cfg:
output_channel = make_divisible(c * multiplier, 4)
hidden_channel = make_divisible(exp_size * multiplier, 4)
layers.append(
block(input_channel, hidden_channel, output_channel, k, s, se_ratio)
)
input_channel = output_channel
stride *= s
self._out_feature_channels[str(index)] = output_channel
self._out_feature_strides[str(index)] = stride
index += 1
output_channel = make_divisible(exp_size * multiplier, 4)
layers.append(Conv2d(input_channel, output_channel, 1, norm=norm, activation="ReLU"))
self._out_feature_channels[str(index)] = output_channel
self._out_feature_strides[str(index)] = stride
self.features = nn.Sequential(*layers)
if not out_features:
out_features = ["linear"]
if "linear" in out_features and num_classes is not None:
self.conv_head = Conv2d(input_channel, 1280, 1, activation="ReLU")
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(last_channel, num_classes)
)
self._out_features = out_features
def forward(self, x):
outputs = {}
for idx, layer in enumerate(self.features):
x = layer(x)
if str(idx) in self._out_features:
outputs[str(idx)] = x
if "linear" in self._out_features:
x = F.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.conv_head(x)
x = self.classifier(x)
outputs["linear"] = x
return outputs
def _ghostnet(multiplier, cfg):
out_features = cfg.MODEL.BACKBONE.OUT_FEATURES
norm = cfg.GHOSTNET.NORM
return GhostNet(multiplier=multiplier, norm=norm, out_features=out_features)
@BACKBONE_REGISTRY.register("GhostNet-1.0")
def ghostnet_1_0(cfg):
return _ghostnet(1.0, cfg)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add status attributes
Revision ID: 12c1bc8d7026
Revises: 31b399f08b1c
Create Date: 2016-03-08 15:28:57.170563
"""
# revision identifiers, used by Alembic.
revision = '12c1bc8d7026'
down_revision = '31b399f08b1c'
from alembic import op
import sqlalchemy as sa
def upgrade():
table_names = ['gp_policy_targets', 'gp_policy_target_groups',
'gp_l2_policies', 'gp_l3_policies', 'gp_policy_rules',
'gp_policy_classifiers', 'gp_policy_actions',
'gp_policy_rule_sets', 'gp_nat_pools',
'gp_network_service_policies',
'gp_external_segments', 'gp_external_policies', 'sc_nodes',
'sc_instances', 'sc_specs', 'service_profiles']
for tname in table_names:
op.add_column(tname, sa.Column('status', sa.String(length=16),
nullable=True))
op.add_column(tname, sa.Column('status_details',
sa.String(length=4096), nullable=True))
def downgrade():
pass
|
#import io
#import re
from __future__ import print_function
from setuptools import setup
import os, sys
import shutil
NAME = "odoo_downloader"
def get_version():
"""Get version and version_info without importing the entire module."""
print("NAME:", NAME)
path = os.path.join(os.path.dirname(__file__), NAME, '__meta__.py')
if sys.version_info.major == 3:
import importlib.util
spec = importlib.util.spec_from_file_location("__meta__", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
vi = module.__version_info__
return vi._get_canonical(), vi._get_dev_status()
else:
import imp
vi = imp.load_source("meat", "__meta__.py")
return vi.__version__, vi.__status__
def get_requirements(req):
"""Load list of dependencies."""
install_requires = []
with open(req) as f:
for line in f:
if not line.startswith("#"):
install_requires.append(line.strip())
return install_requires
def get_description():
"""Get long description."""
desc = ''
if os.path.isfile('README.md'):
with open("README.md", 'r') as f:
desc = f.read()
return desc
VER, DEVSTATUS = get_version()
try:
os.makedirs(os.path.join(os.path.dirname(__file__), NAME))
except:
pass
try:
os.remove(os.path.join(NAME, '__version__.py'))
except:
pass
shutil.copy2('__version__.py', NAME)
shutil.copy2('downloader.py', NAME)
shutil.copy2('odoo_downloader.ini', NAME)
shutil.copy2('odoo_downloader.py', NAME)
shutil.copy2('README.md', NAME)
shutil.copy2('__init__.py', NAME)
shutil.copy2('__meta__.py', NAME)
# with io.open("README.rst", "rt", encoding="utf8") as f:
# readme = f.read()
# with io.open("__version__.py", "rt", encoding="utf8") as f:
# version = re.search(r"version = \'(.*?)\'", f.read()).group(1)
import __version__
version = __version__.version
requirements = [
'configset',
'requests',
'make_colors',
'pydebugger',
'pheader',
'bs4',
'bitmath',
'clipboard',
'unidecode',
'lxml',
'pywget',
]
entry_points = {
"console_scripts": [
"odoo_downloader = odoo_downloader.odoo_downloader:usage",
]
}
if sys.version_info.major == 3:
entry_points = {
"console_scripts": [
"odoo_downloader3 = odoo_downloader.odoo_downloader:usage",
]
}
setup(
name=NAME,
version=VER or version,
url="https://github.com/cumulus13/{}".format(NAME),
keywords='odoo downloader',
project_urls={
"Documentation": "https://github.com/cumulus13/{}".format(NAME),
"Code": "https://github.com/cumulus13/{}".format(NAME),
},
license='MIT License',
author="Hadi Cahyadi LD",
author_email="[email protected]",
maintainer="cumulus13 Team",
maintainer_email="[email protected]",
description="Command line for download odoo installer or source",
# long_description=readme,
# long_description_content_type="text/markdown",
packages=[NAME],
install_requires=requirements,
entry_points = entry_points,
# data_files=['__version__.py'],
include_package_data=True,
python_requires=">=2.7",
classifiers=[
'Development Status :: %s' % DEVSTATUS,
'Environment :: Console',
"Intended Audience :: Developers",
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
import csv
# シンプルに配列を書き込む
with open('outFile.csv', mode='w', encoding='utf-8') as wf:
outdata = [
['Id', 'Name', 'Cost'],
['01', '白き鋼鉄のX', '3818'],
['02', 'Ori and the Blind Forest Definitive Edition', '1900']
]
writer = csv.writer(wf)
writer.writerows(outdata)
# 読み取り
with open('outFile.csv', mode='r', encoding='utf-8') as rf:
readlines = csv.reader(rf)
# イテレートオブジェクトなので、List に変換して出力
print(list(readlines))
# 辞書として書き込む
with open('outFileDict.csv', mode='w', encoding='utf-8') as wf:
varline = {'Id': '01', 'Name': '白き鋼鉄のX', 'Cost':3818 }
writer = csv.DictWriter(wf, ['Id', 'Name', 'Cost'])
writer.writeheader()
writer.writerow(varline)
# 読み込み手段
with open('outFileDict.csv', mode='r', encoding='utf-8') as rf:
readlines = csv.DictReader(rf)
print(list(readlines))
|
# Copyright © 2021, Oracle and/or its affiliates. All rights reserved.
from oci.core.models import DrgRouteDistributionStatement, DrgRouteRule, VirtualCircuitBandwidthShape
# extend VirtualCircuitBandwidthShape to add provider id
class ExtendedVirtualCircuitBandwidthShape(VirtualCircuitBandwidthShape):
_fastconnect_provider_id = None
@property
def fastconnect_provider_id(self):
return self._fastconnect_provider_id
def __init__(self, fastconnect_provider_id, export: VirtualCircuitBandwidthShape):
attrs = [item for item in dir(VirtualCircuitBandwidthShape) if not item.startswith('_') and item[0].islower()]
init_args = {}
for attr in attrs:
init_args[attr] = getattr(export, attr)
super().__init__(**init_args)
self._fastconnect_provider_id = fastconnect_provider_id
self.swagger_types.update({"fastconnect_provider_id": "str"})
# extend DrgRouteDistributionStatement to add parent and compartment id
class ExtendedDrgRouteDistributionStatement(DrgRouteDistributionStatement):
_compartment_id = None
_drg_route_distribution_id = None
@property
def compartment_id(self):
return self._compartment_id
@property
def drg_route_distribution_id(self):
return self._drg_route_distribution_id
def __init__(self, compartment_id, drg_route_distribution_id, export: DrgRouteDistributionStatement):
attrs = [item for item in dir(DrgRouteDistributionStatement) if not item.startswith('_') and item[0].islower()]
init_args = {}
for attr in attrs:
init_args[attr] = getattr(export, attr)
super().__init__(**init_args)
self._compartment_id = compartment_id
self._drg_route_distribution_id = drg_route_distribution_id
self.swagger_types.update({"compartment_id": "str"})
self.swagger_types.update({"drg_route_distribution_id": "str"})
# extend DrgRouteRule to add parent and compartment id
class ExtendedDrgRouteRule(DrgRouteRule):
_compartment_id = None
_drg_route_table_id = None
@property
def compartment_id(self):
return self._compartment_id
@property
def drg_route_table_id(self):
return self._drg_route_table_id
def __init__(self, compartment_id, drg_route_table_id, export: DrgRouteRule):
attrs = [item for item in dir(DrgRouteRule) if not item.startswith('_') and item[0].islower()]
init_args = {}
for attr in attrs:
init_args[attr] = getattr(export, attr)
super().__init__(**init_args)
self._compartment_id = compartment_id
self._drg_route_table_id = drg_route_table_id
self.swagger_types.update({"compartment_id": "str"})
self.swagger_types.update({"drg_route_table_id": "str"}) |
from sanic import Sanic
from wadsworth.applications.redirect import attach_redirect_app
from wadsworth.blueprints.info.view import bp as info_view
from wadsworth.blueprints.view import bp
def create_app():
app = Sanic("MainApp")
app.config.SERVER_NAME = "localhost:8443"
app.blueprint(bp)
app.blueprint(info_view)
attach_redirect_app(app)
return app
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from copy import deepcopy, copy
import cython
TEMPORAL_IDX: cython.int = 0
ENC_IDX: cython.int = 1
@cython.cclass
class SDR(object):
# declare these class instance attributes with cython
#
encoding = cython.declare(dict, visibility='public')
encoders = cython.declare(dict, visibility='public')
def __init__(self, sdr=None):
# a map of keys to bit encodings
#
self.encoding = {}
# the encoders used
#
self.encoders = {}
if sdr is not None:
self.copy(sdr)
def to_dict(self, decode: bool = True, max_bit_weight: float = 1.0):
dict_sdr: dict = {}
sdr_key: tuple
if decode:
dict_sdr['encoding'] = self.decode(max_bit_weight)
else:
dict_sdr['encoding'] = deepcopy(self.encoding)
dict_sdr['encoders'] = {sdr_key: self.encoders[sdr_key].type if self.encoders[sdr_key] is not None else None
for sdr_key in self.encoders}
return dict_sdr
def copy(self, sdr):
self.encoding = deepcopy(sdr.encoding)
self.encoders = copy(sdr.encoders)
def copy_from(self, sdr, from_temporal_key: cython.int = 0, to_temporal_key: cython.int = 0):
sdr_key: tuple
for sdr_key in sdr.encoding:
if sdr_key[TEMPORAL_IDX] == from_temporal_key:
self.encoding[(to_temporal_key, sdr_key[ENC_IDX])] = sdr.encoding[sdr_key]
if sdr_key[ENC_IDX] not in self.encoders:
self.encoders[sdr_key[ENC_IDX]] = sdr.encoders[sdr_key[ENC_IDX]]
def add_encoding(self, enc_key: tuple, temporal_key: cython.int = 0, value=None, encoding: dict = None, encoder=None):
bit: cython.int
# the encoding key consists of a tuple of temporal key, encoding key
#
sdr_key: tuple = (temporal_key, enc_key)
if encoding is None:
self.encoding[sdr_key] = {bit: 1.0 for bit in encoder.encode(value)}
self.encoders[enc_key] = encoder
else:
self.encoding[sdr_key] = encoding
self.encoders[enc_key] = None
def decode(self, max_bit_weight: float = 1.0) -> dict:
decode_sdr: dict = {}
key: tuple
for key in self.encoding:
if self.encoders[key[ENC_IDX]] is not None:
decode_sdr[key] = self.encoders[key[ENC_IDX]].decode(self.encoding[key], max_bit_weight)
else:
# convert from frequency to probability
#
decode_sdr[key] = [(bit, self.encoding[key][bit] / max_bit_weight) for bit in self.encoding[key]]
decode_sdr[key].sort(key=lambda x: x[1], reverse=True)
return decode_sdr
def learn_delta(self, sdr, learn_temporal_keys: set = None, learn_enc_keys: set = None, learn_rate: float = 1.0, prune_threshold: float = 0.01):
temporal_keys: set
temporal_key: cython.int
bit: cython.int
sdr_key: tuple
keys_to_process = set(self.encoding.keys()) | set(sdr.encoding.keys())
if len(self.encoding) == 0:
learn_rate = 1.0
for sdr_key in keys_to_process:
if ((learn_temporal_keys is None or sdr_key[TEMPORAL_IDX] in learn_temporal_keys) and
(learn_enc_keys is None or sdr_key[ENC_IDX] in learn_enc_keys)):
# if sdr_key not in self then learn bits and encoder
#
if sdr_key not in self.encoding:
self.encoding[sdr_key] = {bit: sdr.encoding[sdr_key][bit] * learn_rate
for bit in sdr.encoding[sdr_key]
if sdr.encoding[sdr_key][bit] * learn_rate > prune_threshold}
self.encoders[sdr_key[ENC_IDX]] = sdr.encoders[sdr_key[ENC_IDX]]
# if sdr_key in both self and sdr then process each bit
#
elif sdr_key in self.encoding and sdr_key in sdr.encoding:
# will need to process all bits
#
bits_to_process = set(self.encoding[sdr_key].keys()) | set(sdr.encoding[sdr_key].keys())
for bit in bits_to_process:
# if we don't have bit learn it if its above the adjusted prune threshold
#
if bit not in self.encoding[sdr_key]:
bit_value = sdr.encoding[sdr_key][bit] * learn_rate
if bit_value > prune_threshold:
self.encoding[sdr_key][bit] = bit_value
# if bit is in both the calc bit value and assign it if above prune_threshold else delete it
#
elif bit in self.encoding[sdr_key] and bit in sdr.encoding[sdr_key]:
bit_value = self.encoding[sdr_key][bit] + ((sdr.encoding[sdr_key][bit] - self.encoding[sdr_key][bit]) * learn_rate)
if bit_value > prune_threshold:
self.encoding[sdr_key][bit] = bit_value
else:
del self.encoding[sdr_key][bit]
# if bit only in this sdr then decay bit value and delete it of not above prune_threshold
#
elif bit in self.encoding[sdr_key]:
bit_value = self.encoding[sdr_key][bit] - (self.encoding[sdr_key][bit] * learn_rate)
if bit_value > prune_threshold:
self.encoding[sdr_key][bit] = bit_value
else:
del self.encoding[sdr_key][bit]
# sdr_key only in self so decay bit values, deleting if below prune threshold
#
else:
bits_to_process = list(self.encoding[sdr_key].keys())
for bit in bits_to_process:
bit_value = self.encoding[sdr_key][bit] - (self.encoding[sdr_key][bit] * learn_rate)
if bit_value > prune_threshold:
self.encoding[sdr_key][bit] = bit_value
else:
del self.encoding[sdr_key][bit]
def learn_frequency(self, sdr, learn_temporal_keys: set = None, learn_enc_keys: set = None, min_frequency_prune: int = None):
temporal_keys: set
temporal_key: cython.int
bit: cython.int
sdr_key: tuple
for sdr_key in sdr.encoding.keys():
if ((learn_temporal_keys is None or sdr_key[TEMPORAL_IDX] in learn_temporal_keys) and
(learn_enc_keys is None or sdr_key[ENC_IDX] in learn_enc_keys)):
# if sdr_key not in self then learn bits and encoder
#
if sdr_key not in self.encoding:
self.encoding[sdr_key] = {bit: sdr.encoding[sdr_key][bit]
for bit in sdr.encoding[sdr_key]
if sdr.encoding[sdr_key][bit]}
self.encoders[sdr_key[ENC_IDX]] = sdr.encoders[sdr_key[ENC_IDX]]
# if sdr_key in both self and sdr then process each bit
#
else:
# will need to process all bits
#
for bit in sdr.encoding[sdr_key].keys():
# if we don't have bit learn it if its above the adjusted prune threshold
#
if bit not in self.encoding[sdr_key]:
self.encoding[sdr_key][bit] = sdr.encoding[sdr_key][bit]
else:
self.encoding[sdr_key][bit] += sdr.encoding[sdr_key][bit]
if min_frequency_prune is not None:
self.encoding = {sdr_key: {bit: self.encoding[sdr_key][bit]
for bit in self.encoding[sdr_key]
if self.encoding[sdr_key][bit] >= min_frequency_prune}
for sdr_key in self.encoding.keys()}
|
# Generated by Django 3.1.3 on 2020-11-17 19:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attendance', '0014_auto_20201117_1453'),
]
operations = [
migrations.AlterField(
model_name='case',
name='attorney',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
#!coding:utf8
#author:yqq
#date:2020/5/8 0008 15:15
#description: 测试用例
import json
import string
import time
import unittest
import requests
from ed25519 import SigningKey
from src.api.handlers.handler_base import sign_msg, verify_sig
class TestWalletAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_generateaddress(self):
# url = 'http://127.0.0.1:59009/addaddresses'
address_count = 10
pro_id = 52
# for token_name in ['BTC', 'HTDF', 'ETH']:
for token_name in ['ETH']:
post_data = {
'address_count': address_count,
# 'account_index': account_index,
'pro_id': pro_id,
'token_name': token_name,
}
jdata = json.dumps(post_data, separators=(',', ':'), sort_keys=True) # 按照key字母顺序排序
# '1590040704197'
timestamp = str(int(time.time() * 1000))
# method = 'POST'
api_name = 'addaddresses'
param = '|'.join([timestamp, api_name, jdata])
# print(param)
msg = param.encode('utf8')
sign_key = '2ed28bf53120a4d07ce82e614be060a15322563bada59f16d2ac1f7c323acdb0'
# sk = SigningKey(sk_s=sign_key.encode('latin1'), prefix='', encoding='hex')
# sig = sk.sign(msg=msg, prefix='', encoding='base64')
sig = sign_msg(sign_key=sign_key, msg=msg)
sig = sig.decode('utf8')
print(f'sig:{sig}')
# if verify_sig(verify_key=ASCCI_VERIFY_KEY, sig=sig, msg=msg):
# print('verify ok')
# else:
# print('verify failed')
header = {
'ContentType': 'application/json',
'PG_API_KEY':'1f0e774c81e2b3545493a76873d019b42d2273a617535b3848dcca17e7334efe',
'PG_API_TIMESTAMP':timestamp,
'PG_API_SIGNATURE':sig
}
rsp = requests.post(url='http://192.168.10.174/api/wallet/addaddresses', json=post_data, headers=header)
# rsp = requests.post(url='http://192.168.10.155:59000/addaddresses', json=post_data, headers=header)
self.assertEqual(rsp.status_code, 200)
rsp_data = rsp.json()
print(rsp_data)
pass
def test_queryaddress(self):
pro_id = 1
url = 'http://127.0.0.1:59000/queryaddresses'
# url = 'http://192.168.10.174/api/wallet/queryaddresses'
for token_name in ['HTDF', 'ETH']:
post_data = {
'pro_id': pro_id,
'token_name': token_name,
'page_index':1,
'page_size':1,
'order_id':'202006081010508973077'
}
jdata = json.dumps(post_data, separators=(',', ':'), sort_keys=True) # 按照key字母顺序排序
timestamp = str(int(time.time() * 1000))
api_name = 'queryaddresses'
param = '|'.join([timestamp, api_name, jdata])
msg = param.encode('utf8')
sign_key = '3e4b948ae2a54554d13138d64c5fb6b9764489099803f2f7360306d8e9db98f8'
sig = sign_msg(sign_key=sign_key, msg=msg)
sig = sig.decode('utf8')
print(f'sig:{sig}')
header = {
'ContentType': 'application/json',
'PG_API_KEY': 'd5a08d84603f5714914bf39915d198b501e8f389e31fe12ec6f18d7b906f5c0c',
'PG_API_TIMESTAMP': timestamp,
'PG_API_SIGNATURE': sig
}
rsp = requests.post(url=url, json=post_data, headers=header)
self.assertEqual(rsp.status_code, 200)
rsp_data = rsp.json()
print(rsp_data)
break
pass
def test_queryaddaddressorder(self):
# pro_id = 1
# url = 'http://127.0.0.1:59009/queryaddaddressesorder'
url = 'http://192.168.10.174/api/wallet/queryaddaddressesorder'
if True:
post_data = {
'pro_id': 1,
'order_id':'202005281828093277212'
}
jdata = json.dumps(post_data, separators=(',', ':'), sort_keys=True) # 按照key字母顺序排序
timestamp = str(int(time.time() * 1000))
api_name = 'queryaddaddressesorder'
param = '|'.join([timestamp, api_name, jdata])
msg = param.encode('utf8')
sign_key = '3e4b948ae2a54554d13138d64c5fb6b9764489099803f2f7360306d8e9db98f8'
sig = sign_msg(sign_key=sign_key, msg=msg)
sig = sig.decode('utf8')
print(f'sig:{sig}')
header = {
'ContentType': 'application/json',
'PG_API_KEY': 'd5a08d84603f5714914bf39915d198b501e8f389e31fe12ec6f18d7b906f5c0c',
'PG_API_TIMESTAMP': timestamp,
'PG_API_SIGNATURE': sig
}
rsp = requests.post(url=url, json=post_data, headers=header)
self.assertEqual(rsp.status_code, 200)
rsp_data = rsp.json()
print(rsp_data)
def main():
suite = unittest.TestSuite()
suite.addTests([
TestWalletAPI('test_generateaddress'),
])
runner = unittest.TextTestRunner()
runner.run(test=suite)
pass
if __name__ == '__main__':
main() |
import pytest
import os
import itertools
from unittest.mock import patch, MagicMock, call
from test_utils.test_utils import AsyncContextManagerMock, coroutine_of
from boto3.dynamodb.conditions import Key
from decimal import Decimal
TEST_ENV = {
"REGION": "eu-west-wood",
"STAGE": "door",
"APP_NAME": "me-once",
"USE_XRAY": "0"
}
@pytest.fixture
async def scan_initiator():
with patch.dict(os.environ, TEST_ENV), \
patch("aioboto3.client") as boto_client, \
patch("aioboto3.resource") as boto_resource:
# ensure each client is a different mock
boto_client.side_effect = (MagicMock() for _ in itertools.count())
boto_resource.side_effect = (MagicMock() for _ in itertools.count())
from scan_initiator import scan_initiator
yield scan_initiator
scan_initiator.dynamo_resource.reset_mock()
scan_initiator.sqs_client.reset_mock()
await scan_initiator.clean_clients()
@patch.dict(os.environ, TEST_ENV)
def set_ssm_return_vals(ssm_client, period, buckets):
stage = os.environ["STAGE"]
app_name = os.environ["APP_NAME"]
ssm_prefix = f"/{app_name}/{stage}"
ssm_client.get_parameters.return_value = coroutine_of({
"Parameters": [
{"Name": f"{ssm_prefix}/scheduler/dynamodb/scans_planned/id", "Value": "MyTableId"},
{"Name": f"{ssm_prefix}/scheduler/dynamodb/scans_planned/plan_index", "Value": "MyIndexName"},
{"Name": f"{ssm_prefix}/scheduler/dynamodb/address_info/id", "Value": "MyIndexName"},
{"Name": f"{ssm_prefix}/scheduler/config/period", "Value": str(period)},
{"Name": f"{ssm_prefix}/scheduler/config/buckets", "Value": str(buckets)},
{"Name": f"{ssm_prefix}/scheduler/scan_delay_queue", "Value": "MyDelayQueue"}
]
})
def _mock_delete_responses(mock_plan_table, side_effects):
mock_batch_writer = AsyncContextManagerMock()
mock_plan_table.batch_writer.return_value = mock_batch_writer
mock_batch_writer.aenter.delete_item.side_effect = side_effects
return mock_batch_writer.aenter
@patch("time.time", return_value=1984)
@pytest.mark.unit
def test_paginates_scan_results(_, scan_initiator):
# ssm params don"t matter much in this test
set_ssm_return_vals(scan_initiator.ssm_client, 40, 10)
# access mock for dynamodb table
mock_info_table, mock_plan_table = _mock_resources(scan_initiator)
# return a single result but with a last evaluated key present, second result wont have
# that key
mock_plan_table.scan.side_effect = iter([
coroutine_of({
"Items": [{
"Address": "123.456.123.456",
"DnsIngestTime": 12345,
"PlannedScanTime": 67890
}],
"LastEvaluatedKey": "SomeKey"
}),
coroutine_of({
"Items": [{
"Address": "456.345.123.123",
"DnsIngestTime": 123456,
"PlannedScanTime": 67890
}]
}),
])
mock_info_table.update_item.side_effect = iter([coroutine_of(None), coroutine_of(None)])
# pretend the sqs messages are all successfully dispatched
scan_initiator.sqs_client.send_message_batch.side_effect = [
coroutine_of(None),
coroutine_of(None)
]
# pretend the delete item calls are all successful too
writer = _mock_delete_responses(mock_plan_table, [coroutine_of(None), coroutine_of(None)])
# actually do the test
scan_initiator.initiate_scans({}, MagicMock())
# check the scan happens twice, searching for planned scans earlier than 1984 + 40/10 i.e. now + bucket_length
assert mock_plan_table.scan.call_args_list == [
call(
IndexName="MyIndexName",
FilterExpression=Key("PlannedScanTime").lte(Decimal(1988))
),
call(
IndexName="MyIndexName",
FilterExpression=Key("PlannedScanTime").lte(Decimal(1988)),
ExclusiveStartKey="SomeKey"
)
]
# Doesn"t batch across pages
assert scan_initiator.sqs_client.send_message_batch.call_count == 2
assert writer.delete_item.call_count == 2
def _mock_resources(scan_initiator):
mock_plan_table, mock_info_table = (MagicMock(), MagicMock())
scan_initiator.dynamo_resource.Table.side_effect = iter([mock_plan_table, mock_info_table])
scan_initiator.dynamo_resource.close.return_value = coroutine_of(None)
scan_initiator.ssm_client.close.return_value = coroutine_of(None)
scan_initiator.sqs_client.close.return_value = coroutine_of(None)
return mock_info_table, mock_plan_table
@patch("time.time", return_value=1984)
@pytest.mark.unit
def test_replace_punctuation_in_address_ids(_, scan_initiator):
# ssm params don"t matter much in this test
set_ssm_return_vals(scan_initiator.ssm_client, 100, 4)
# access mock for dynamodb table
mock_info_table, mock_plan_table = _mock_resources(scan_initiator)
# return a single result with ip4 and another with ip6
mock_plan_table.scan.side_effect = iter([
coroutine_of({
"Items": [
{
"Address": "123.456.123.456",
"DnsIngestTime": 12345,
"PlannedScanTime": 67890
},
{
"Address": "2001:0db8:85a3:0000:0000:8a2e:0370:7334",
"DnsIngestTime": 12345,
"PlannedScanTime": 67890
}
]
})
])
mock_info_table.update_item.side_effect = iter([coroutine_of(None), coroutine_of(None)])
# pretend the sqs and dynamo deletes are all ok
scan_initiator.sqs_client.send_message_batch.side_effect = [coroutine_of(None)]
_mock_delete_responses(mock_plan_table, [coroutine_of(None), coroutine_of(None)])
# actually do the test
scan_initiator.initiate_scans({}, MagicMock())
# check both addresses have : and . replaced with -
scan_initiator.sqs_client.send_message_batch.assert_called_once_with(
QueueUrl="MyDelayQueue",
Entries=[
{
"Id": "123-456-123-456",
"DelaySeconds": 67890-1984, # planned scan time minus now time
"MessageBody": "{\"AddressToScan\":\"123.456.123.456\"}"
},
{
"Id": "2001-0db8-85a3-0000-0000-8a2e-0370-7334",
"DelaySeconds": 67890-1984, # planned scan time minus now time
"MessageBody": "{\"AddressToScan\":\"2001:0db8:85a3:0000:0000:8a2e:0370:7334\"}"
}
]
)
@patch("time.time", return_value=1984)
@pytest.mark.unit
def test_batches_sqs_writes(_, scan_initiator):
# ssm params don"t matter much in this test
set_ssm_return_vals(scan_initiator.ssm_client, 100, 4)
# access mock for dynamodb table
mock_info_table, mock_plan_table = _mock_resources(scan_initiator)
# send 32 responses in a single scan result, will be batched into groups of 10 for
# sqs
mock_plan_table.scan.side_effect = iter([
coroutine_of({
"Items": [
{
"Address": f"123.456.123.{item_num}",
"DnsIngestTime": 12345,
"PlannedScanTime": 67890
}
for item_num in range(0, 32)
]
})
])
mock_info_table.update_item.side_effect = iter([coroutine_of(None) for _ in range(0, 32)])
# pretend the sqs and dynamo deletes are all ok, there are 4 calls to sqs
# and
scan_initiator.sqs_client.send_message_batch.side_effect = [
coroutine_of(None) for _ in range(0, 4)
]
writer = _mock_delete_responses(mock_plan_table, [
coroutine_of(None) for _ in range(0, 32)
])
# actually do the test
scan_initiator.initiate_scans({}, MagicMock())
# There will be 4 calls to sqs
assert scan_initiator.sqs_client.send_message_batch.call_count == 4
# The last batch will have 2 remaining items in it N.B. a call object is a tuple of the
# positional args and then the kwags
assert len(scan_initiator.sqs_client.send_message_batch.call_args_list[3][1]["Entries"]) == 2
# There will be individual deletes for each address i.e. 32 of them
assert writer.delete_item.call_count == 32
@patch("time.time", return_value=1984)
@pytest.mark.unit
def test_no_deletes_until_all_sqs_success(_, scan_initiator):
# ssm params don"t matter much in this test
set_ssm_return_vals(scan_initiator.ssm_client, 100, 4)
# access mock for dynamodb table
mock_info_table, mock_plan_table = _mock_resources(scan_initiator)
# send a single response
mock_plan_table.scan.side_effect = [
coroutine_of({
"Items": [
{
"Address": f"123.456.123.5",
"DnsIngestTime": 12345,
"PlannedScanTime": 67890
}
]
})
]
# pretend the sqs and dynamo deletes are all ok, there are 4 calls to sqs
# and
scan_initiator.sqs_client.send_message_batch.side_effect = [
Exception("test error")
]
writer = _mock_delete_responses(mock_plan_table, [])
# actually do the test
with pytest.raises(Exception):
scan_initiator.initiate_scans({}, MagicMock())
# There will be 1 call to sqs
assert scan_initiator.sqs_client.send_message_batch.call_count == 1
# and none to dynamo
assert writer.delete_item.call_count == 0
|
from extra import strToBool
class Machine(object):
"""
Provides the implementation of a Machine in a Plant.
"""
def __init__(self, name, quantity = 1, canUnhook = False,
precedence = False, breaks = []):
"""
name is the unique Machine name.
precedence is whether the quantity should be dealt with as capacity or
as parallel different Machine instances.
canUnhook is whether a crane can leave an Order at this Machine or not.
quantity is the number of available machines of this type (name) in
the Plant.
"""
assert name != None
assert name != ""
assert breaks != None
assert quantity >= 1
self.quantity = quantity
self.canUnhook = canUnhook
self.precedence = precedence
self.name = name
self.breaks = breaks
def __repr__(self):
return str(self.name)
def setOfBreaks(self):
res = []
for b in self.breaks:
res.extend(range(b[0], b[0] + b[1]))
return res
@staticmethod
def fromXml(element):
"""
Creates a Machine instance from XML node tree element and returns it.
"""
breaks = []
for e in element.getElementsByTagName("break"):
breaks.append((int(e.getAttribute("start")),
int(e.getAttribute("duration"))))
return Machine(
name = element.getAttribute("name").lower(),
quantity = int(element.getAttribute("quantity")),
precedence = strToBool(element.getAttribute("precedence")),
canUnhook = strToBool(element.getAttribute("canUnhook")),
breaks = breaks
)
|
from ode4jax._src.ODEBase.algorithms import (
Euler,
Midpoint,
Heun,
RK4,
RK12,
RK23,
RK4Fehlberg,
RK45,
)
|
get_all_user_in_pay = "SELECT * FROM tbl_user;"
get_all_user_in_dms = "SELECT * FROM student;"
find_user_by_uuid_in_pay = "SELECT * FROM tbl_user WHERE 'user_uuid' = %s"
insert_user = "INSERT INTO tbl_user(user_number, coin, user_name, user_uuid) VALUE (%s, %s, %s, %s)"
insert_teacher = "INSERT INTO tbl_teacher(id, name, number, password) VALUE (%s, %s, %s, %s)"
insert_booth = "INSERT INTO tbl_booth(booth_id, booth_name, coin, password, total_coin) VALUE (%s, %s, %s, %s, %s)" |
#Standard library modules
import glob
import sys
import os
import logging
from fnmatch import fnmatch
import configparser
#Third Party imports
from music21 import *
import numpy as np
import torch
from sklearn import preprocessing
import pandas as pd
#Local Modules
from .midi_class_mapping import MidiClassMapping
from .midi_notes_mapping import MidiNotesMapping
class PreprocessingTrainingData():
"""
This class is created to preprocess the training data and return the input, output and min and max midi values which will be required for training
"""
def __init__(self,sequence_length=50):
self.sequence_length=sequence_length
#Create a Logging File
logging.basicConfig(filename="test.log", level=logging.DEBUG)
"""
This function is to extract the notes from the midi file
Input Parameters: Absolute File path of the midi file
Output Parameters: List of notes
"""
def extract_notes(self,file_path):
#Intializing empty set
notes = {}
#Check if the input path is a file or not
notes = self.get_notes(file_path)
#Return the list of notes
return notes
"""
This function is to read midi file and get notes
Input Parameters:Absolute File path of the midi file
Output Parameters:List of notes
"""
def get_notes(self,filename):
#Read the midi file
print("Entered here")
midi = converter.parse(filename)
notes_i = []
notes_to_parse = None
#Logging file
logging.debug("File that is being parsed currently is {}".format(filename))
try:
# Extracting the instrument parts
parts = midi.getElementsByClass(stream.Part)
notes_to_parse= midi.getElementsByClass(stream.Part)
print(parts[1])
# print(midi.recurse())
except Exception as e:
print(e)
# Extracting the notes in a flat structure
notes_to_parse = midi.flat.notes
#Iterate through each and every element in the notes
for element in notes_to_parse:
if isinstance(element, note.Note):
# Taking the note
notes_i.append(str(element.pitch))
elif isinstance(element, chord.Chord):
# Taking the note with the highest octave.
notes_i.append(str(element.pitches[-1]))
return notes_i
"""
This function to calculate the count of unique number of notes
Input Parameters: List of all notes from file
Output Parameters: Number of unique number of notes
"""
def number_of_output_notes_generated(self,notes):
#Convert 2D list into 1D list
all_notes=[]
#Iterate through the 2D list
for item in notes:
all_notes.extend(item)
#Number of unique notes
number_of_output_notes=len(set(all_notes))
return number_of_output_notes
"""
This function is to normalize data
Input Parameters: List of input values
Output Parameters: List of normalized data
"""
def normalize_data(self,list_of_input_values,max_value):
#Normalize each value of the list
for i in range(len(list_of_input_values)):
list_of_input_values[i]=list_of_input_values[i]/max_value
return list_of_input_values
"""
This function is to generate training data i.e model input,output,max value,min value
Input Parameters: Set of input notes read from midi files
Output Parameters: Network Input,Network Output, Max midi Value,Min midi value
"""
def generate_training_data(self,notes):
#Generate a flat list of input notes
notes_from_training_data = []
for item in notes:
notes_from_training_data.extend(item)
# get all right hand note names
right_hand_notes = sorted(set(item for item in notes_from_training_data))
#Get note to midi number mapping
note_to_midi_number_mapping=MidiNotesMapping().get_midi_number_notes_mapping("preprocessing/A.txt")
#Get maximum and minimum midi number values
note_to_int,int_to_note,max_midi_value,min_midi_value=MidiClassMapping().midi_notes_to_class_mapping(right_hand_notes,note_to_midi_number_mapping)
network_input = []
network_output = []
for song in notes:
for i in range(0, len(song) - self.sequence_length, 1):
sequence_in = song[i:i + self.sequence_length]
sequence_out = song[i + self.sequence_length]
for notes in range(len(sequence_in)):
for key,value in note_to_midi_number_mapping.items():
if str(sequence_in[notes]) in value:
sequence_in[notes]=key
for key,value in note_to_midi_number_mapping.items():
if str(sequence_out) in value:
sequence_out=key
network_input.append(sequence_in)
network_output.append(int(sequence_out) )
#Check if length of input and output is same
assert len(network_input) == len(network_output), len(network_input)
#Number of input batches
n_patterns = len(network_input)
#Normalize the input data
for i in range(len(network_input)):
network_input[i]=self.normalize_data(network_input[i],max_midi_value)
#Converting the output data in range of 0-37
for i in range(len(network_output)):
network_output[i]=note_to_int[network_output[i]]
#Converting 2d list to 2d numpy array
network_input=np.array(network_input)
#Reshaping the 2d numpy array to 3d array
network_input = np.reshape(network_input, (n_patterns, self.sequence_length, 1))
return (network_input, network_output,max_midi_value,min_midi_value,int_to_note)
"""
This is the main function which has to be called it acts like a wrapper function
Input Parameters:
Output Parameters:
"""
def preprocess_notes(self,path):
pattern = "*.mid"
notes=[]
print(os.getcwd())
if not path.endswith(".mid"):
for path, subdirs, files in os.walk(path):
for name in files:
if fnmatch(name, pattern):
notes.append(self.extract_notes(os.path.join(path, name)))
else:
notes.append(self.extract_notes(path))
number_of_output_notes=self.number_of_output_notes_generated(notes)
network_input,network_output,max_midi_number,min_midi_number,int_to_note=self.generate_training_data(notes)
for i in range(len(network_input)):
for j in range(len(network_input[i])):
temp=[]
temp.append((network_input[i][j]))
network_input[i][j]=temp
network_input = np.asarray(network_input,dtype=np.float32)
network_input=torch.tensor(network_input)
network_output=torch.tensor(network_output)
return network_input,network_output,max_midi_number,min_midi_number,int_to_note
# if __name__=="__main__":
# network_input,network_output,max_midi_number,min_midi_number,int_to_note=PreprocessingTrainingData().preprocess_notes("D:\\Prem\\Sem1\\MM in AI\\Project\\Project\\Sonification-using-Deep-Learning\\Dataset\\Clementi dataset\\Clementi dataset\\clementi_opus36_1_1.mid")
# print(max_midi_number)
# print(min_midi_number)
# print(int_to_note)
# print(network_input)
# network_input=network_input.cpu().numpy().tolist()
# network_output=network_output.cpu().numpy().tolist()
# final_array=[]
# for i in range(len(network_input)):
# temp=[]
# for j in range(len(network_input[i])):
# temp.extend(network_input[i][j])
# final_array.append(temp)
# df=pd.DataFrame(final_array)
# df.to_csv('network_input_original.csv', index=False, header=False)
# df=pd.DataFrame(network_output)
# df.to_csv('network_output_original.csv', index=False, header=False)
# network_input=network_input.cpu().numpy().tolist()
# network_output=network_output.cpu().numpy().tolist()
# temp=[]
# for i in range(len(network_input)):
# for j in range(len(network_input[i])):
# temp.extend(network_input[i][j])
# temp=sorted(temp)
# network_output=sorted(network_output)
# print(Counter(temp))
# print(Counter(network_output))
# labels, values = zip(*Counter(network_output).items())
# indexes = np.arange(len(labels))
# width = 1
# plt.bar(indexes, values, width)
# plt.xticks(indexes + width * 0.5, labels)
# plt.show()
# labels, values = zip(*Counter(temp).items())
# indexes = np.arange(len(labels))
# width = 1
# plt.bar(indexes, values, width)
# plt.xticks(indexes + width * 0.5, labels)
# plt.show()
#print("Hello")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" typing.py
"""
import datetime
import json
import os
import random
CHAR_COUNT = 6
TRIAL_COUNT = 5
CHARSET = (
' !"#$%&'
+ "'"
+ "()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
)
HISTORY_FILE = os.path.dirname(os.path.abspath(__file__)) + "/" + "history.json"
def main():
now = datetime.datetime.now()
print(" hit enter to start")
input("> ")
result = []
for i in range(TRIAL_COUNT):
result.append(do_trial(i + 1))
show_result(result)
write_history(now.isoformat(), result)
def do_trial(index):
problem = "".join(random.choices(CHARSET, k=CHAR_COUNT))
print(" %s (%d)" % (problem, index))
stat = []
while True:
st = datetime.datetime.now()
s = input("> ")
et = datetime.datetime.now()
ok = s == problem
dt = (et - st).total_seconds()
stat.append([ok, dt])
if ok:
break
return [problem, stat]
def show_result(result):
print("---------------------------")
for ret in result:
print(" %s" % ret[0])
for data in ret[1]:
ok = "ng"
if data[0]:
ok = "ok"
print(" %s %s" % (ok, data[1]))
def write_history(name, result):
history = {}
if os.path.exists(HISTORY_FILE):
fp = open(HISTORY_FILE)
history = json.load(fp)
fp.close()
fp = open(HISTORY_FILE, "w")
history[name] = result
fp.write(json.dumps(history, indent=4))
fp.close()
if __name__ == "__main__":
main()
|
# coding=utf-8
# This file is part of Rubber and thus covered by the GPL
# (c) Emmanuel Beffara, 2002--2006
# Modified by Olivier Verdier <[email protected]>
# LICENSE
# Copyright © 2010–2013, Olivier Verdier
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of "pydflatex" nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import string
import codecs
# The function `_' is defined here to prepare for internationalization.
def _(txt):
return txt
re_loghead = re.compile("This is [0-9a-zA-Z-]*(TeX|Omega)")
re_rerun = re.compile("LaTeX Warning:.*Rerun")
re_file = re.compile("(\\((?P<file>[^ \n\t(){}]*)|\\))")
re_badbox = re.compile(r"(Ov|Und)erfull \\[hv]box ")
re_line = re.compile(r"(l\.(?P<line>[0-9]+)( (?P<code>.*))?$|<\*>)")
re_cseq = re.compile(r".*(?P<seq>\\[^ ]*) ?$")
re_page = re.compile("\[(?P<num>[0-9]+)\]")
re_atline = re.compile(
"( detected| in paragraph)? at lines? (?P<line>[0-9]*)(--(?P<last>[0-9]*))?")
re_reference = re.compile("LaTeX Warning: Reference `(?P<ref>.*)' \
on page (?P<page>[0-9]*) undefined on input line (?P<line>[0-9]*)\\.$")
re_citation = re.compile(
"^.*Citation `(?P<cite>.*)' on page (?P<page>[0-9]*) undefined on input line (?P<line>[0-9]*)\\.$")
re_label = re.compile("LaTeX Warning: (?P<text>Label .*)$")
re_warning = re.compile(
"(LaTeX|Package)( (?P<pkg>.*))? Warning: (?P<text>.*)$")
re_online = re.compile("(; reported)? on input line (?P<line>[0-9]*)")
re_ignored = re.compile("; all text was ignored after line (?P<line>[0-9]*).$")
re_missing_character = re.compile('^Missing character: There is no (?P<missing>\S)', flags=re.UNICODE)
class LogCheck(object):
"""
This class performs all the extraction of information from the log file.
For efficiency, the instances contain the whole file as a list of strings
so that it can be read several times with no disk access.
"""
#-- Initialization {{{2
def __init__(self):
self.lines = None
def read(self, name):
"""
Read the specified log file, checking that it was produced by the
right compiler. Returns true if the log file is invalid or does not
exist.
"""
self.lines = None
with codecs.open(name, encoding='utf-8', errors='replace') as log_file:
self.lines = log_file.readlines()
if not self.lines:
raise ValueError("Empty file")
line = self.lines[0]
if not re_loghead.match(line):
raise ValueError("This doesn't seem to be a tex log file")
#-- Process information {{{2
def errors(self):
"""
Returns true if there was an error during the compilation.
"""
skipping = 0
for line in self.lines:
if line.strip() == "":
skipping = 0
continue
if skipping:
continue
m = re_badbox.match(line)
if m:
skipping = 1
continue
if line[0] == "!":
# We check for the substring "pdfTeX warning" because pdfTeX
# sometimes issues warnings (like undefined references) in the
# form of errors...
if string.find(line, "pdfTeX warning") == -1:
return True
return False
def run_needed(self):
"""
Returns true if LaTeX indicated that another compilation is needed.
"""
for line in self.lines:
if re_rerun.match(line):
return True
return False
#-- Information extraction {{{2
def continued(self, line):
"""
Check if a line in the log is continued on the next line. This is
needed because TeX breaks messages at 79 characters per line. We make
this into a method because the test is slightly different in Metapost.
"""
return len(line) == 79 and line[-3:] != '...'
def parse(self, errors=False, boxes=False, refs=False, warnings=False):
"""
Parse the log file for relevant information. The named arguments are
booleans that indicate which information should be extracted:
- errors: all errors
- boxes: bad boxes
- refs: warnings about references
- warnings: all other warnings
The function returns a generator. Each generated item is a dictionary
that contains (some of) the following entries:
- kind: the kind of information ("error", "box", "ref", "warning")
- text: the text of the error or warning
- code: the piece of code that caused an error
- file, line, last, pkg: as used by Message.format_pos.
"""
if not self.lines:
return
last_file = None
pos = [last_file]
page = 1
parsing = False # True if we are parsing an error's text
skipping = False # True if we are skipping text until an empty line
something = False # True if some error was found
prefix = None # the prefix for warning messages from packages
accu = "" # accumulated text from the previous line
for line in self.lines:
line = line[:-1] # remove the line feed
# TeX breaks messages at 79 characters, just to make parsing
# trickier...
if self.continued(line):
accu += line
continue
line = accu + line
accu = ""
# Text that should be skipped (from bad box messages)
if prefix is None and line == "":
skipping = False
continue
if skipping:
continue
# Errors (including aborted compilation)
if parsing:
if error == "Undefined control sequence.":
# This is a special case in order to report which control
# sequence is undefined.
m = re_cseq.match(line)
if m:
error = "Undefined control sequence %s." % m.group("seq")
m = re_line.match(line)
if m:
parsing = False
skipping = True
pdfTeX = string.find(line, "pdfTeX warning") != -1
if (pdfTeX and warnings) or (errors and not pdfTeX):
if pdfTeX:
d = {
"kind": "warning",
"pkg": "pdfTeX",
"text": error[error.find(":") + 2:]
}
else:
d = {
"kind": "error",
"text": error
}
d.update(m.groupdict())
m = re_ignored.search(error)
if m:
d["file"] = last_file
if d.has_key("code"):
del d["code"]
d.update(m.groupdict())
elif pos[-1] is None:
d["file"] = last_file
else:
d["file"] = pos[-1]
yield d
elif line[0] == "!":
error = line[2:]
elif line[0:3] == "***":
parsing = False
skipping = True
if errors:
yield {
"kind": "abort",
"text": error,
"why": line[4:],
"file": last_file
}
elif line[0:15] == "Type X to quit ":
parsing = False
skipping = False
if errors:
yield {
"kind": "error",
"text": error,
"file": pos[-1]
}
continue
if len(line) > 0 and line[0] == "!":
error = line[2:]
parsing = True
continue
if line == "Runaway argument?":
error = line
parsing = True
continue
# Long warnings
if prefix is not None:
if line[:len(prefix)] == prefix:
text.append(string.strip(line[len(prefix):]))
else:
text = " ".join(text)
m = re_online.search(text)
if m:
info["line"] = m.group("line")
text = text[:m.start()] + text[m.end():]
if warnings:
info["text"] = text
d = {"kind": "warning"}
d.update(info)
yield d
prefix = None
continue
# Undefined references
m = re_reference.match(line)
if m:
if refs:
d = {
"kind": "warning",
"text": _("Reference `%s' undefined.") % m.group("ref"),
"file": pos[-1]
}
d.update(m.groupdict())
yield d
continue
m = re_citation.match(line)
if m:
if refs:
d = {
"kind": "warning",
"text": _("Citation `%s' undefined.") % m.group("cite"),
"file": pos[-1]
}
d.update(m.groupdict())
yield d
continue
m = re_label.match(line)
if m:
if refs:
d = {
"kind": "warning",
"file": pos[-1]
}
d.update(m.groupdict())
yield d
continue
missing_char = re_missing_character.match(line)
if missing_char:
mpos = {"file": pos[-1], "page": page}
if warnings:
info = missing_char.groupdict()
missing_char = info['missing']
## raise Exception(info)
d = {'kind': 'warning', 'text': u'Missing character: "{}"'.format(missing_char)}
d.update(mpos)
yield d
continue
# Other warnings
if line.find("Warning") != -1:
m = re_warning.match(line)
if m:
info = m.groupdict()
info["file"] = pos[-1]
info["page"] = page
if info["pkg"] is None:
del info["pkg"]
prefix = ""
else:
prefix = ("(%s)" % info["pkg"])
prefix = prefix.ljust(m.start("text"))
text = [info["text"]]
continue
# Bad box messages
m = re_badbox.match(line)
if m:
if boxes:
mpos = {"file": pos[-1], "page": page}
m = re_atline.search(line)
if m:
md = m.groupdict()
for key in "line", "last":
if md[key]: mpos[key] = md[key]
line = line[:m.start()]
d = {
"kind": "warning",
"text": line
}
d.update(mpos)
yield d
skipping = True
continue
# If there is no message, track source names and page numbers.
last_file = self.update_file(line, pos, last_file)
page = self.update_page(line, page)
def get_errors(self):
return self.parse(errors=True)
def get_boxes(self):
return self.parse(boxes=True)
def get_references(self):
return self.parse(refs=True)
def get_warnings(self):
return self.parse(warnings=True)
def update_file(self, line, stack, last):
"""
Parse the given line of log file for file openings and closings and
update the list `stack'. Newly opened files are at the end, therefore
stack[1] is the main source while stack[-1] is the current one. The
first element, stack[0], contains the value None for errors that may
happen outside the source. Return the last file from which text was
read (the new stack top, or the one before the last closing
parenthesis).
"""
m = re_file.search(line)
while m:
if line[m.start()] == '(':
last = m.group("file")
stack.append(last)
else:
last = stack[-1]
del stack[-1]
line = line[m.end():]
m = re_file.search(line)
return last
def update_page(self, line, before):
"""
Parse the given line and return the number of the page that is being
built after that line, assuming the current page before the line was
`before'.
"""
ms = re_page.findall(line)
if ms == []:
return before
return int(ms[-1]) + 1
if __name__ == '__main__':
parser = LogCheck()
parser.read('short.log')
errs = list(parser.get_errors())
|
"""DNA, by Al Sweigart [email protected]
A simple animation of a DNA double-helix. Press Ctrl-C to stop.
Thanks to matoken for inspiration: https://asciinema.org/a/155441"""
__version__ = 1
import random, sys, time
# Setup the constants:
# These are the individual rows of the DNA animation:
ROWS = [
' ##', # Index 0 has no {}.
' #{}-{}#',
' #{}---{}#',
' #{}-----{}#',
' #{}------{}#',
' #{}------{}#',
' #{}-----{}#',
' #{}---{}#',
' #{}-{}#',
' ##', # Index 9 has no {}.
' #{}-{}#',
' #{}---{}#',
' #{}-----{}#',
' #{}------{}#',
' #{}------{}#',
' #{}-----{}#',
' #{}---{}#',
' #{}-{}#']
def main():
"""Run the DNA animation."""
print('Press Ctrl-C to quit...')
time.sleep(2)
rowIndex = 0
while True: # Main program loop.
# Increment rowIndex to draw next row:
rowIndex += 1
if rowIndex == len(ROWS):
rowIndex = 0
# Row indexs 0 and 9 don't have nucleotides:
if rowIndex == 0 or rowIndex == 9:
print(ROWS[rowIndex])
continue
# Select random nucleotide pairs:
randomSelection = random.randint(1, 4)
if randomSelection == 1:
leftNucleotide, rightNucleotide = 'A', 'T'
elif randomSelection == 2:
leftNucleotide, rightNucleotide = 'T', 'A'
elif randomSelection == 3:
leftNucleotide, rightNucleotide = 'C', 'G'
elif randomSelection == 4:
leftNucleotide, rightNucleotide = 'G', 'C'
# Print the row.
print(ROWS[rowIndex].format(leftNucleotide, rightNucleotide))
time.sleep(0.15) # Add a slight pause.
# At this point, go back to the start of the main program loop.
# If this program was run (instead of imported), run the game:
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit() # When Ctrl-C is pressed, end the program.
|
# -*- coding: utf-8 -*-
'''
Plot Information Data
=====================
'''
from __future__ import annotations
__all__ = ('PlotInfo',)
from builder.utils import assertion
class PlotInfo(object):
''' Plot Info Data class.
'''
def __init__(self, title: str, *args):
self._title = assertion.is_str(title)
self._data = assertion.is_tuple(args)
#
# property
#
@property
def title(self) -> str:
return self._title
@property
def data(self) -> tuple:
return self._data
|
from .stream import DerivedStream
class Augmentstream(DerivedStream):
def __init__(self, aug_dimensions, field, affect_flags=(DATAFLAG_TRAIN,)):
DerivedStream.__init__(self, [field])
self.field = self.inputs[0]
self.affect_flags = affect_flags
self.aug_dimensions = range(aug_dimensions) if isinstance(aug_dimensions, int) else aug_dimensions
self.augmentation_factor = 2 ** len(self.aug_dimensions)
def affects(self, datasource):
if not self.affect_flags:
return True
for flag in self.affect_flags:
if flag in datasource.flags:
return True
return False
def size(self, datasource, lookup=False):
return self.field.size(datasource, lookup=lookup) * (self.augmentation_factor if self.affects(datasource) else 1)
def shape(self, datasource):
return self.field.shape(datasource)
def get(self, datasource, indices):
if not self.affects(datasource):
return self.field.get(datasource, indices)
else:
return self._augment_interleave(datasource, indices)
def _augment_interleave(self, datasource, indices):
arrays = self.field.get(datasource, [i // self.augmentation_factor for i in indices])
for array, index in zip(arrays, indices):
perm = index % self.augmentation_factor
if perm != 0:
array = self.augment_single(array, perm)
yield array
def augment_single(self, array, perm):
raise NotImplementedError()
class AxisFlip(Augmentstream):
def __init__(self, flip_dimensions, field, flip_vectors=True, affect_flags=(DATAFLAG_TRAIN,)):
Augmentstream.__init__(self, flip_dimensions, field, affect_flags)
self.flip_vectors = flip_vectors
def augment_single(self, array, perm):
slices = [slice(None, None, -1) if d >= 1 and perm & 2 ** (d - 1) != 0 else slice(None) for d in range(len(array.shape))]
array = array[slices]
if self.flip_vectors and array.shape[-1] == len(array.shape) - 2:
flipped_components = [len(array.shape) - d - 3 for d in self.aug_dimensions if perm & 2 ** (d) != 0]
array[..., flipped_components] *= -1
return array
|
#
# Copyright (c) 2019 by Delphix. All rights reserved.
#
# -*- coding: utf-8 -*-
"""UpgradeOperations for the Virtualization Platform
There are 5 different objects that we can upgrade. All migration ids must be
unique. To upgrade a specific schema, the plugin author would use that specific
decorator specifying the migration id. We save the implementations of each of
the upgrade functions in a dict for the specific schema. For each new upgrade
operation of the same schema, the key will be the migration id, and the value
will be the function that was implemented.
"""
import json
import logging
from dlpx.virtualization.api import platform_pb2
from dlpx.virtualization.platform import (LuaUpgradeMigrations, MigrationType,
PlatformUpgradeMigrations)
from dlpx.virtualization.platform.exceptions import (
IncorrectUpgradeObjectTypeError, UnknownMigrationTypeError)
logger = logging.getLogger(__name__)
__all__ = ['UpgradeOperations']
class UpgradeOperations(object):
def __init__(self):
self.platform_migrations = PlatformUpgradeMigrations()
self.lua_migrations = LuaUpgradeMigrations()
def repository(self, migration_id, migration_type=MigrationType.PLATFORM):
def repository_decorator(repository_impl):
if migration_type == MigrationType.PLATFORM:
self.platform_migrations.add_repository(
migration_id, repository_impl)
elif migration_type == MigrationType.LUA:
self.lua_migrations.add_repository(migration_id,
repository_impl)
else:
raise UnknownMigrationTypeError(migration_type)
return repository_impl
return repository_decorator
def source_config(self,
migration_id,
migration_type=MigrationType.PLATFORM):
def source_config_decorator(source_config_impl):
if migration_type == MigrationType.PLATFORM:
self.platform_migrations.add_source_config(
migration_id, source_config_impl)
elif migration_type == MigrationType.LUA:
self.lua_migrations.add_source_config(migration_id,
source_config_impl)
else:
raise UnknownMigrationTypeError(migration_type)
return source_config_impl
return source_config_decorator
def linked_source(self,
migration_id,
migration_type=MigrationType.PLATFORM):
def linked_source_decorator(linked_source_impl):
if migration_type == MigrationType.PLATFORM:
self.platform_migrations.add_linked_source(
migration_id, linked_source_impl)
elif migration_type == MigrationType.LUA:
self.lua_migrations.add_linked_source(migration_id,
linked_source_impl)
else:
raise UnknownMigrationTypeError(migration_type)
return linked_source_impl
return linked_source_decorator
def virtual_source(self,
migration_id,
migration_type=MigrationType.PLATFORM):
def virtual_source_decorator(virtual_source_impl):
if migration_type == MigrationType.PLATFORM:
self.platform_migrations.add_virtual_source(
migration_id, virtual_source_impl)
elif migration_type == MigrationType.LUA:
self.lua_migrations.add_virtual_source(migration_id,
virtual_source_impl)
else:
raise UnknownMigrationTypeError(migration_type)
return virtual_source_impl
return virtual_source_decorator
def snapshot(self, migration_id, migration_type=MigrationType.PLATFORM):
def snapshot_decorator(snapshot_impl):
if migration_type == MigrationType.PLATFORM:
self.platform_migrations.add_snapshot(migration_id,
snapshot_impl)
elif migration_type == MigrationType.LUA:
self.lua_migrations.add_snapshot(migration_id, snapshot_impl)
else:
raise UnknownMigrationTypeError(migration_type)
return snapshot_impl
return snapshot_decorator
@property
def migration_id_list(self):
return self.platform_migrations.get_sorted_ids()
@staticmethod
def _success_upgrade_response(upgraded_dict):
upgrade_result = platform_pb2.UpgradeResult(
post_upgrade_parameters=upgraded_dict)
upgrade_response = platform_pb2.UpgradeResponse(
return_value=upgrade_result)
return upgrade_response
@staticmethod
def _run_migration_upgrades(request, lua_impls_getter,
platform_impls_getter):
"""
Given the list of lua and platform migration to run, iterate and
invoke these migrations on each object and its metadata, and return a
dict containing the upgraded parameters.
"""
post_upgrade_parameters = {}
#
# For the request.migration_ids list, protobuf will preserve the
# ordering of repeated elements, so we can rely on the backend to
# give us the already sorted list of migrations
#
impls_list = lua_impls_getter(
request.lua_upgrade_version) + platform_impls_getter(
request.migration_ids)
for (object_ref, metadata) in request.pre_upgrade_parameters.items():
# Load the object metadata into a dictionary
current_metadata = json.loads(metadata)
for migration_function in impls_list:
current_metadata = migration_function(current_metadata)
post_upgrade_parameters[object_ref] = json.dumps(current_metadata)
return post_upgrade_parameters
def _internal_repository(self, request):
"""Upgrade repositories for plugins.
"""
if request.type != platform_pb2.UpgradeRequest.REPOSITORY:
raise IncorrectUpgradeObjectTypeError(
request.type, platform_pb2.UpgradeRequest.REPOSITORY)
logger.debug('Upgrade repositories [{}]'.format(', '.join(
sorted(request.pre_upgrade_parameters.keys()))))
post_upgrade_parameters = self._run_migration_upgrades(
request, self.lua_migrations.get_repository_impls_to_exec,
self.platform_migrations.get_repository_impls_to_exec)
return self._success_upgrade_response(post_upgrade_parameters)
def _internal_source_config(self, request):
"""Upgrade source configs for plugins.
"""
if request.type != platform_pb2.UpgradeRequest.SOURCECONFIG:
raise IncorrectUpgradeObjectTypeError(
request.type, platform_pb2.UpgradeRequest.SOURCECONFIG)
logger.debug('Upgrade source configs [{}]'.format(', '.join(
sorted(request.pre_upgrade_parameters.keys()))))
post_upgrade_parameters = self._run_migration_upgrades(
request, self.lua_migrations.get_source_config_impls_to_exec,
self.platform_migrations.get_source_config_impls_to_exec)
return self._success_upgrade_response(post_upgrade_parameters)
def _internal_linked_source(self, request):
"""Upgrade linked source for plugins.
"""
if request.type != platform_pb2.UpgradeRequest.LINKEDSOURCE:
raise IncorrectUpgradeObjectTypeError(
request.type, platform_pb2.UpgradeRequest.LINKEDSOURCE)
logger.debug('Upgrade linked sources [{}]'.format(', '.join(
sorted(request.pre_upgrade_parameters.keys()))))
post_upgrade_parameters = self._run_migration_upgrades(
request, self.lua_migrations.get_linked_source_impls_to_exec,
self.platform_migrations.get_linked_source_impls_to_exec)
return self._success_upgrade_response(post_upgrade_parameters)
def _internal_virtual_source(self, request):
"""Upgrade virtual sources for plugins.
"""
if request.type != platform_pb2.UpgradeRequest.VIRTUALSOURCE:
raise IncorrectUpgradeObjectTypeError(
request.type, platform_pb2.UpgradeRequest.VIRTUALSOURCE)
logger.debug('Upgrade virtual sources [{}]'.format(', '.join(
sorted(request.pre_upgrade_parameters.keys()))))
post_upgrade_parameters = self._run_migration_upgrades(
request, self.lua_migrations.get_virtual_source_impls_to_exec,
self.platform_migrations.get_virtual_source_impls_to_exec)
return self._success_upgrade_response(post_upgrade_parameters)
def _internal_snapshot(self, request):
"""Upgrade snapshots for plugins.
"""
if request.type != platform_pb2.UpgradeRequest.SNAPSHOT:
raise IncorrectUpgradeObjectTypeError(
request.type, platform_pb2.UpgradeRequest.SNAPSHOT)
logger.debug('Upgrade snapshots [{}]'.format(', '.join(
sorted(request.pre_upgrade_parameters.keys()))))
post_upgrade_parameters = self._run_migration_upgrades(
request, self.lua_migrations.get_snapshot_impls_to_exec,
self.platform_migrations.get_snapshot_impls_to_exec)
return self._success_upgrade_response(post_upgrade_parameters)
|
#32 - Top Scores.py
# You rank players in the game from highest to lowest score. So far you're using an algorithm that sorts in O(n\lg{n})O(nlgn) time, but players are complaining that their rankings aren't updated fast enough. You need a faster sorting algorithm.
# Write a function that takes:
# a list of unsorted_scores
# the highest_possible_score in the game
# and returns a sorted list of scores in less than O(nlog(n)) time.
unsorted_scores = [5,20, 3, 89, 89, 99, 98]
highest_possible_score = 100
def get_highest_scores(unsorted_scores, highest_possible_score):
# define a dictionary
scores_to_counts = {}
for score in unsorted_scores:
if score in scores_to_counts:
scores_to_counts[score] += 1
else:
scores_to_counts[score] = 1
sorted_scores = sorted(scores_to_counts, reverse = True)
full_sorted_list = []
for score in sorted_scores:
j = scores_to_counts[score]
while j > 0:
full_sorted_list.append(score)
j = j - 1
return(full_sorted_list)
print get_highest_scores(unsorted_scores, highest_possible_score)
|
# Under MIT License, see LICENSE.txt
from PyQt5.QtGui import QPixmap
from PyQt5.QtGui import QPainter
from PyQt5.QtGui import QImage
from PyQt5.QtGui import QColor
from PyQt5.QtGui import QTransform
from PyQt5.QtCore import QThread
from PyQt5.QtCore import QRect
from Controller.DrawingObject.BaseDrawingObject import BaseDrawingObject
from Controller.QtToolBox import QtToolBox
from Model.DataObject.DrawingData.DrawInfluenceMapDataIn import DrawInfluenceMapDataIn
__author__ = 'RoboCupULaval'
class InfluenceMapDrawing(BaseDrawingObject):
def __init__(self, data_in):
BaseDrawingObject.__init__(self, data_in)
self._pixmap = None
self._thread = QThread()
self._thread.run = self._draw_image
self._thread.start()
def _draw_image(self):
width, height = self.data['size']
image = QImage(height, width, QImage.Format_RGB16)
for nb_line, line in enumerate(self.data['field_data']):
for nb_col, case in enumerate(line):
if case > self.data['hottest_numb']:
case = self.data['hottest_numb']
if case < self.data['coldest_numb']:
case = self.data['coldest_numb']
rgb_value = InfluenceMapDrawing._convert_rgb_value_with_minmax(case,
self.data['coldest_numb'],
self.data['hottest_numb'],
self.data['coldest_color'],
self.data['hottest_color'])
image.setPixel(nb_line, nb_col, QColor(*rgb_value).rgb())
self._pixmap = image.transformed(QTransform().rotate(90))
def draw(self, painter):
if self._pixmap is not None and self.isVisible():
x, y = QtToolBox.field_ctrl.get_top_left_to_screen()
width = QtToolBox.field_ctrl.size[0] * QtToolBox.field_ctrl.ratio_screen
height = QtToolBox.field_ctrl.size[1] * QtToolBox.field_ctrl.ratio_screen
pixmap = self._pixmap.mirrored(horizontal=QtToolBox.field_ctrl.is_x_axe_flipped,
vertical=QtToolBox.field_ctrl.is_y_axe_flipped)
painter.drawImage(QRect(x, y, width, height),
pixmap)
@staticmethod
def _convert_rgb_value_with_minmax(value, value_min, value_max, rgb_min, rgb_max):
""" Retourne une valeur RGB pour un dégradé de couleur en fonction de la valeur max et min """
value_r = InfluenceMapDrawing._convert_color_value_with_minmax(value, value_min, value_max, rgb_min[0], rgb_max[0])
value_g = InfluenceMapDrawing._convert_color_value_with_minmax(value, value_min, value_max, rgb_min[1], rgb_max[1])
value_b = InfluenceMapDrawing._convert_color_value_with_minmax(value, value_min, value_max, rgb_min[2], rgb_max[2])
return value_r, value_g, value_b
@staticmethod
def _convert_color_value_with_minmax(value, value_min, value_max, color_min, color_max):
""" Retourne une valeur pour un dégradé de couleur en fonction de la valeur max et min """
percentage_value = (value - value_min) / (value_max - value_min)
if color_max > color_min:
return int((color_max - color_min) * percentage_value + color_min)
else:
return int((color_min - color_max) * (1 - percentage_value) + color_max)
@staticmethod
def get_datain_associated():
return DrawInfluenceMapDataIn.__name__
|
load(
"@io_bazel_rules_dotnet//dotnet/private:common.bzl",
"paths",
)
def _vs2017_ref_net_impl(ctx):
prefix = "vs"
for vs_type in ["Community", "Professional", "Enterprise"]:
vs_ref_path = paths.join("C:/Program Files (x86)/Microsoft Visual Studio/2017",
vs_type, "Common7/IDE/ReferenceAssemblies")
if ctx.path(vs_ref_path).exists:
ctx.symlink(vs_ref_path, prefix)
build_file_content = r'''package(default_visibility = [ "//visibility:public" ])
load("@io_bazel_rules_dotnet//dotnet:defs.bzl", "net_import_library")
[net_import_library(
name = vs_ref_assembly[:-len(".dll")],
src = vs_ref_assembly
) for ref_version in ["v2.0", "v4.0", "v4.5"]
for vs_ref_assembly in glob(["%s/" + ref_version + "/*.dll"])]
''' % prefix
ctx.file("BUILD", build_file_content)
vs2017_ref_net = repository_rule(
implementation=_vs2017_ref_net_impl,
) |
# A class for performing hidden markov models
import copy
import numpy as np
class HMM():
def __init__(self, transmission_prob, emission_prob, obs=None):
'''
Note that this implementation assumes that n, m, and T are small
enough not to require underflow mitigation.
Required Inputs:
- transmission_prob: an (n+2) x (n+2) numpy array, initial, where n is
the number of hidden states
- emission_prob: an (m x n) 2-D numpy array, where m is the number of
possible observations
Optional Input:
- obs: a list of observation labels, in the same order as their
occurence within the emission probability matrix; otherwise, will assume
that the emission probabilities are in alpha-numerical order.
'''
self.transmission_prob = transmission_prob
self.emission_prob = emission_prob
self.n = self.emission_prob.shape[1]
self.m = self.emission_prob.shape[0]
self.observations = None
self.forward = []
self.backward = []
self.psi = []
self.obs = obs
self.emiss_ref = {}
self.forward_final = [0 , 0]
self.backward_final = [0 , 0]
self.state_probs = []
if obs is None and self.observations is not None:
self.obs = self.assume_obs()
def assume_obs(self):
'''
If observation labels are not given, will assume that the emission
probabilities are in alpha-numerical order.
'''
obs = list(set(list(self.observations)))
obs.sort()
for i in range(len(obs)):
self.emiss_ref[obs[i]] = i
return obs
def train(self, observations, iterations = 10, verbose=True):
'''
Trains the model parameters according to the observation sequence.
Input:
- observations: 1-D string array of T observations
'''
self.observations = observations
self.obs = self.assume_obs()
self.psi = [[[0.0] * (len(self.observations)-1) for i in range(self.n)] for i in range(self.n)]
self.gamma = [[0.0] * (len(self.observations)) for i in range(self.n)]
for i in range(iterations):
old_transmission = self.transmission_prob.copy()
old_emission = self.emission_prob.copy()
if verbose:
print("Iteration: {}".format(i + 1))
self.expectation()
self.maximization()
def expectation(self):
'''
Executes expectation step.
'''
self.forward = self.forward_recurse(len(self.observations))
self.backward = self.backward_recurse(0)
self.get_gamma()
self.get_psi()
def get_gamma(self):
'''
Calculates the gamma matrix.
'''
self.gamma = [[0, 0] for i in range(len(self.observations))]
for i in range(len(self.observations)):
self.gamma[i][0] = (float(self.forward[0][i] * self.backward[0][i]) /
float(self.forward[0][i] * self.backward[0][i] +
self.forward[1][i] * self.backward[1][i]))
self.gamma[i][1] = (float(self.forward[1][i] * self.backward[1][i]) /
float(self.forward[0][i] * self.backward[0][i] +
self.forward[1][i] * self.backward[1][i]))
def get_psi(self):
'''
Runs the psi calculation.
'''
for t in range(1, len(self.observations)):
for j in range(self.n):
for i in range(self.n):
self.psi[i][j][t-1] = self.calculate_psi(t, i, j)
def calculate_psi(self, t, i, j):
'''
Calculates the psi for a transition from i->j for t > 0.
'''
alpha_tminus1_i = self.forward[i][t-1]
a_i_j = self.transmission_prob[j+1][i+1]
beta_t_j = self.backward[j][t]
observation = self.observations[t]
b_j = self.emission_prob[self.emiss_ref[observation]][j]
denom = float(self.forward[0][i] * self.backward[0][i] + self.forward[1][i] * self.backward[1][i])
return (alpha_tminus1_i * a_i_j * beta_t_j * b_j) / denom
def maximization(self):
'''
Executes maximization step.
'''
self.get_state_probs()
for i in range(self.n):
self.transmission_prob[i+1][0] = self.gamma[0][i]
self.transmission_prob[-1][i+1] = self.gamma[-1][i] / self.state_probs[i]
for j in range(self.n):
self.transmission_prob[j+1][i+1] = self.estimate_transmission(i, j)
for obs in range(self.m):
self.emission_prob[obs][i] = self.estimate_emission(i, obs)
def get_state_probs(self):
'''
Calculates total probability of a given state.
'''
self.state_probs = [0] * self.n
for state in range(self.n):
summ = 0
for row in self.gamma:
summ += row[state]
self.state_probs[state] = summ
def estimate_transmission(self, i, j):
'''
Estimates transmission probabilities from i to j.
'''
return sum(self.psi[i][j]) / self.state_probs[i]
def estimate_emission(self, j, observation):
'''
Estimate emission probability for an observation from state j.
'''
observation = self.obs[observation]
ts = [i for i in range(len(self.observations)) if self.observations[i] == observation]
for i in range(len(ts)):
ts[i] = self.gamma[ts[i]][j]
return sum(ts) / self.state_probs[j]
def backward_recurse(self, index):
'''
Runs the backward recursion.
'''
# Initialization at T
if index == (len(self.observations) - 1):
backward = [[0.0] * (len(self.observations)) for i in range(self.n)]
for state in range(self.n):
backward[state][index] = self.backward_initial(state)
return backward
# Recursion for T --> 0
else:
backward = self.backward_recurse(index+1)
for state in range(self.n):
if index >= 0:
backward[state][index] = self.backward_probability(index, backward, state)
if index == 0:
self.backward_final[state] = self.backward_probability(index, backward, 0, final=True)
return backward
def backward_initial(self, state):
'''
Initialization of backward probabilities.
'''
return self.transmission_prob[self.n + 1][state + 1]
def backward_probability(self, index, backward, state, final=False):
'''
Calculates the backward probability at index = t.
'''
p = [0] * self.n
for j in range(self.n):
observation = self.observations[index + 1]
if not final:
a = self.transmission_prob[j + 1][state + 1]
else:
a = self.transmission_prob[j + 1][0]
b = self.emission_prob[self.emiss_ref[observation]][j]
beta = backward[j][index + 1]
p[j] = a * b * beta
return sum(p)
def forward_recurse(self, index):
'''
Executes forward recursion.
'''
# Initialization
if index == 0:
forward = [[0.0] * (len(self.observations)) for i in range(self.n)]
for state in range(self.n):
forward[state][index] = self.forward_initial(self.observations[index], state)
return forward
# Recursion
else:
forward = self.forward_recurse(index-1)
for state in range(self.n):
if index != len(self.observations):
forward[state][index] = self.forward_probability(index, forward, state)
else:
# Termination
self.forward_final[state] = self.forward_probability(index, forward, state, final=True)
return forward
def forward_initial(self, observation, state):
'''
Calculates initial forward probabilities.
'''
self.transmission_prob[state + 1][0]
self.emission_prob[self.emiss_ref[observation]][state]
return self.transmission_prob[state + 1][0] * self.emission_prob[self.emiss_ref[observation]][state]
def forward_probability(self, index, forward, state, final=False):
'''
Calculates the alpha for index = t.
'''
p = [0] * self.n
for prev_state in range(self.n):
if not final:
# Recursion
obs_index = self.emiss_ref[self.observations[index]]
p[prev_state] = forward[prev_state][index-1] * self.transmission_prob[state + 1][prev_state + 1] * self.emission_prob[obs_index][state]
else:
# Termination
p[prev_state] = forward[prev_state][index-1] * self.transmission_prob[self.n][prev_state + 1]
return sum(p)
def likelihood(self, new_observations):
'''
Returns the probability of a observation sequence based on current model
parameters.
'''
new_hmm = HMM(self.transmission_prob, self.emission_prob)
new_hmm.observations = new_observations
new_hmm.obs = new_hmm.assume_obs()
forward = new_hmm.forward_recurse(len(new_observations))
return sum(new_hmm.forward_final)
if __name__ == '__main__':
# Example inputs from Jason Eisner's Ice Cream and Baltimore Summer example
# http://www.cs.jhu.edu/~jason/papers/#eisner-2002-tnlp
emission = np.array([[0.7, 0], [0.2, 0.3], [0.1, 0.7]])
transmission = np.array([ [0, 0, 0, 0], [0.5, 0.8, 0.2, 0], [0.5, 0.1, 0.7, 0], [0, 0.1, 0.1, 0]])
observations = ['2','3','3','2','3','2','3','2','2','3','1','3','3','1','1',
'1','2','1','1','1','3','1','2','1','1','1','2','3','3','2',
'3','2','2']
model = HMM(transmission, emission)
model.train(observations)
print("Model transmission probabilities:\n{}".format(model.transmission_prob))
print("Model emission probabilities:\n{}".format(model.emission_prob))
# Probability of a new sequence
new_seq = ['1', '2', '3']
print("Finding likelihood for {}".format(new_seq))
likelihood = model.likelihood(new_seq)
print("Likelihood: {}".format(likelihood))
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
"""
import sys
import os
import task3_utils
from fastsubs_utils import read_sub_vectors
sub_file = sys.argv[1]
test_f = sys.stdin
if len(sys.argv) == 3:
test_f = sys.argv[2]
sentences = task3_utils.get_sentences(test_f)
wordset = task3_utils.get_wordset(sentences)
sc_vecs = read_sub_vectors(sub_file, wordset)
sys.stderr.write("Substitute file reading done.\n")
|
import jane
jane.janefunc()
|
import os, sys
import tempfile
from Bio import SeqIO
import shutil
import networkx as nx
import argparse
import textwrap
import ast
from .isvalid import *
from .set_default_args import set_default_args
from .prokka import process_prokka_input
from .cdhit import check_cdhit_version
from .cdhit import run_cdhit
from .generate_network import generate_network
from .generate_output import *
from .clean_network import *
from .find_missing import find_missing
from .generate_alignments import check_aligner_install
from intbitset import intbitset
from .__init__ import __version__
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
lines = []
for l in text[2:].splitlines():
if l == "":
lines += [""]
else:
lines += textwrap.wrap(l, width=55)
return lines
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
def get_options(args):
description = 'panaroo: an updated pipeline for pangenome investigation'
parser = argparse.ArgumentParser(description=description,
prog='panaroo',
formatter_class=SmartFormatter)
io_opts = parser.add_argument_group('Input/output')
io_opts.add_argument(
"-i",
"--input",
dest="input_files",
required=True,
help=("input GFF3 files (usually output from running Prokka). " +
"Can also take a file listing each gff file line by line."),
type=str,
nargs='+')
io_opts.add_argument("-o",
"--out_dir",
dest="output_dir",
required=True,
help="location of an output directory",
type=str)
mode_opts = parser.add_argument_group('Mode')
mode_opts.add_argument(
"--clean-mode",
dest="mode",
help=
('''R|The stringency mode at which to run panaroo. Must be one of 'strict',\
'moderate' or 'sensitive'. Each of these modes can be fine tuned using the\
additional parameters in the 'Graph correction' section.
strict:
Requires fairly strong evidence (present in at least 5%% of genomes)\
to keep likely contaminant genes. Will remove genes that are refound more often than\
they were called originally.
moderate:
Requires moderate evidence (present in at least 1%% of genomes)\
to keep likely contaminant genes. Keeps genes that are refound more often than\
they were called originally.
sensitive:
Does not delete any genes and only performes merge and refinding\
operations. Useful if rare plasmids are of interest as these are often hard to\
disguish from contamination. Results will likely include higher number of\
spurious annotations.'''),
choices=['strict', 'moderate', 'sensitive'],
required=True)
mode_opts.add_argument(
"--remove-invalid-genes",
dest="filter_invalid",
action='store_true',
default=False,
help=(
"removes annotations that do not conform to the expected Prokka" +
" format such as those including premature stop codons."))
matching = parser.add_argument_group('Matching')
matching.add_argument("-c",
"--threshold",
dest="id",
help="sequence identity threshold (default=0.95)",
type=float)
matching.add_argument(
"-f",
"--family_threshold",
dest="family_threshold",
help="protein family sequence identity threshold (default=0.7)",
type=float)
matching.add_argument("--len_dif_percent",
dest="len_dif_percent",
help="length difference cutoff (default=0.98)",
type=float)
matching.add_argument("--merge_paralogs",
dest="merge_paralogs",
help="don't split paralogs",
action='store_true',
default=False)
refind = parser.add_argument_group('Refind')
refind.add_argument(
"--search_radius",
dest="search_radius",
help=("the distance in nucleotides surronding the " +
"neighbour of an accessory gene in which to search for it"),
default=5000,
type=int)
refind.add_argument(
"--refind_prop_match",
dest="refind_prop_match",
help=("the proportion of an accessory gene that must " +
"be found in order to consider it a match"),
default=0.2,
type=float)
graph = parser.add_argument_group('Graph correction')
graph.add_argument(
"--min_trailing_support",
dest="min_trailing_support",
help=("minimum cluster size to keep a gene called at the " +
"end of a contig"),
type=int)
graph.add_argument(
"--trailing_recursive",
dest="trailing_recursive",
help=("number of times to perform recursive trimming of low support " +
"nodes near the end of contigs"),
type=int)
graph.add_argument(
"--edge_support_threshold",
dest="edge_support_threshold",
help=(
"minimum support required to keep an edge that has been flagged" +
" as a possible mis-assembly"),
type=float)
graph.add_argument(
"--length_outlier_support_proportion",
dest="length_outlier_support_proportion",
help=
("proportion of genomes supporting a gene with a length more " +
"than 1.5x outside the interquatile range for genes in the same cluster"
+
" (default=0.01). Genes failing this test will be re-annotated at the "
+ "shorter length"),
type=float,
default=0.1)
graph.add_argument(
"--remove_by_consensus",
dest="remove_by_consensus",
type=ast.literal_eval,
choices=[True, False],
help=
("if a gene is called in the same region with similar sequence a minority "
+ "of the time, remove it. One of 'True' or 'False'"),
default=None)
graph.add_argument(
"--high_var_flag",
dest="cycle_threshold_min",
help=(
"minimum number of nested cycles to call a highly variable gene " +
"region (default = 5)."),
type=int,
default=5)
graph.add_argument(
"--min_edge_support_sv",
dest="min_edge_support_sv",
help=("minimum edge support required to call structural variants" +
" in the presence/absence sv file"),
type=int)
graph.add_argument(
"--all_seq_in_graph",
dest="all_seq_in_graph",
help=("Retains all DNA sequence for each gene cluster in the graph " +
"output. Off by default as it uses a large amount of space."),
action='store_true',
default=False)
graph.add_argument(
"--no_clean_edges",
dest="clean_edges",
help=("Turn off edge filtering in the final output graph."),
action='store_false',
default=True)
core = parser.add_argument_group('Gene alignment')
core.add_argument(
"-a",
"--alignment",
dest="aln",
help=("Output alignments of core genes or all genes. Options are" +
" 'core' and 'pan'. Default: 'None'"),
type=str,
choices=['core', 'pan'],
default=None)
core.add_argument(
"--aligner",
dest="alr",
help=
"Specify an aligner. Options:'prank', 'clustal', and default: 'mafft'",
type=str,
choices=['prank', 'clustal', 'mafft'],
default="mafft")
core.add_argument("--core_threshold",
dest="core",
help="Core-genome sample threshold (default=0.95)",
type=float,
default=0.95)
# Other options
parser.add_argument("-t",
"--threads",
dest="n_cpu",
help="number of threads to use (default=1)",
type=int,
default=1)
parser.add_argument("--quiet",
dest="verbose",
help="suppress additional output",
action='store_false',
default=True)
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args(args)
args = set_default_args(args)
return (args)
def main():
args = get_options(sys.argv[1:])
# Check cd-hit is installed
check_cdhit_version()
#Make sure aligner is installed if alignment requested
if args.aln != None:
check_aligner_install(args.alr)
# create directory if it isn't present already
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# make sure trailing forward slash is present
args.output_dir = os.path.join(args.output_dir, "")
# Create temporary directory
temp_dir = os.path.join(tempfile.mkdtemp(dir=args.output_dir), "")
# check if input is a file containing filenames
if len(args.input_files) == 1:
files = []
with open(args.input_files[0], 'r') as infile:
for line in infile:
files.append(line.strip())
args.input_files = files
if args.verbose:
print("pre-processing gff3 files...")
# convert input GFF3 files into summary files
process_prokka_input(args.input_files, args.output_dir, args.filter_invalid,
(not args.verbose), args.n_cpu)
# Cluster protein sequences using cdhit
cd_hit_out = args.output_dir + "combined_protein_cdhit_out.txt"
run_cdhit(input_file=args.output_dir + "combined_protein_CDS.fasta",
output_file=cd_hit_out,
id=args.id,
s=args.len_dif_percent,
quiet=(not args.verbose),
n_cpu=args.n_cpu)
if args.verbose:
print("generating initial network...")
# generate network from clusters and adjacency information
G, centroid_contexts, seqid_to_centroid = generate_network(
cluster_file=cd_hit_out + ".clstr",
data_file=args.output_dir + "gene_data.csv",
prot_seq_file=args.output_dir + "combined_protein_CDS.fasta",
all_dna=args.all_seq_in_graph)
# merge paralogs
if args.verbose:
print("Processing paralogs...")
G = collapse_paralogs(G, centroid_contexts, quiet=(not args.verbose))
# write out pre-filter graph in GML format
for node in G.nodes():
G.nodes[node]['size'] = len(G.nodes[node]['members'])
G.nodes[node]['genomeIDs'] = ";".join(
[str(m) for m in G.nodes[node]['members']])
G.nodes[node]['geneIDs'] = ";".join(G.nodes[node]['seqIDs'])
G.nodes[node]['degrees'] = G.degree[node]
for edge in G.edges():
G.edges[edge[0], edge[1]]['genomeIDs'] = ";".join(
[str(m) for m in G.edges[edge[0], edge[1]]['members']])
nx.write_gml(G,
args.output_dir + "pre_filt_graph.gml",
stringizer=custom_stringizer)
if args.verbose:
print("collapse mistranslations...")
# clean up translation errors
G = collapse_families(G,
seqid_to_centroid=seqid_to_centroid,
outdir=temp_dir,
dna_error_threshold=0.98,
correct_mistranslations=True,
length_outlier_support_proportion=args.
length_outlier_support_proportion,
n_cpu=args.n_cpu,
quiet=(not args.verbose))[0]
if args.verbose:
print("collapse gene families...")
# collapse gene families
G, distances_bwtn_centroids, centroid_to_index = collapse_families(
G,
seqid_to_centroid=seqid_to_centroid,
outdir=temp_dir,
family_threshold=args.family_threshold,
correct_mistranslations=False,
length_outlier_support_proportion=args.
length_outlier_support_proportion,
n_cpu=args.n_cpu,
quiet=(not args.verbose))
if args.verbose:
print("trimming contig ends...")
# re-trim low support trailing ends
G = trim_low_support_trailing_ends(G,
min_support=args.min_trailing_support,
max_recursive=args.trailing_recursive)
if args.verbose:
print("refinding genes...")
# find genes that Prokka has missed
G = find_missing(G,
args.input_files,
dna_seq_file=args.output_dir + "combined_DNA_CDS.fasta",
prot_seq_file=args.output_dir +
"combined_protein_CDS.fasta",
gene_data_file=args.output_dir + "gene_data.csv",
remove_by_consensus=args.remove_by_consensus,
search_radius=args.search_radius,
prop_match=args.refind_prop_match,
pairwise_id_thresh=args.id,
merge_id_thresh=max(0.8, args.family_threshold),
n_cpu=args.n_cpu,
verbose=args.verbose)
# remove edges that are likely due to misassemblies (by consensus)
# merge again in case refinding has resolved issues
if args.verbose:
print("collapse gene families with refound genes...")
G = collapse_families(G,
seqid_to_centroid=seqid_to_centroid,
outdir=temp_dir,
family_threshold=args.family_threshold,
correct_mistranslations=False,
length_outlier_support_proportion=args.
length_outlier_support_proportion,
n_cpu=args.n_cpu,
quiet=(not args.verbose),
distances_bwtn_centroids=distances_bwtn_centroids,
centroid_to_index=centroid_to_index)[0]
if args.clean_edges:
G = clean_misassembly_edges(
G, edge_support_threshold=args.edge_support_threshold)
# if requested merge paralogs
if args.merge_paralogs:
G = merge_paralogs(G)
isolate_names = [
os.path.splitext(os.path.basename(x))[0] for x in args.input_files
]
G.graph['isolateNames'] = isolate_names
mems_to_isolates = {}
for i, iso in enumerate(isolate_names):
mems_to_isolates[i] = iso
if args.verbose:
print("writing output...")
# write out roary like gene_presence_absence.csv
# get original annotaiton IDs, lengts and whether or
# not an internal stop codon is present
orig_ids = {}
ids_len_stop = {}
with open(args.output_dir + "gene_data.csv", 'r') as infile:
next(infile)
for line in infile:
line = line.split(",")
orig_ids[line[2]] = line[3]
ids_len_stop[line[2]] = (len(line[4]), "*" in line[4][1:-3])
G = generate_roary_gene_presence_absence(G,
mems_to_isolates=mems_to_isolates,
orig_ids=orig_ids,
ids_len_stop=ids_len_stop,
output_dir=args.output_dir)
#Write out presence_absence summary
generate_summary_stats(output_dir=args.output_dir)
# write pan genome reference fasta file
generate_pan_genome_reference(G,
output_dir=args.output_dir,
split_paralogs=False)
# write out common structural differences in a matrix format
generate_common_struct_presence_absence(
G,
output_dir=args.output_dir,
mems_to_isolates=mems_to_isolates,
min_variant_support=args.min_edge_support_sv)
# add helpful attributes and write out graph in GML format
for node in G.nodes():
G.nodes[node]['size'] = len(G.nodes[node]['members'])
G.nodes[node]['centroid'] = ";".join(G.nodes[node]['centroid'])
G.nodes[node]['dna'] = ";".join(conv_list(G.nodes[node]['dna']))
G.nodes[node]['protein'] = ";".join(conv_list(
G.nodes[node]['protein']))
G.nodes[node]['genomeIDs'] = ";".join(
[str(m) for m in G.nodes[node]['members']])
G.nodes[node]['geneIDs'] = ";".join(G.nodes[node]['seqIDs'])
G.nodes[node]['degrees'] = G.degree[node]
G.nodes[node]['members'] = list(G.nodes[node]['members'])
G.nodes[node]['seqIDs'] = list(G.nodes[node]['seqIDs'])
for edge in G.edges():
G.edges[edge[0], edge[1]]['genomeIDs'] = ";".join(
[str(m) for m in G.edges[edge[0], edge[1]]['members']])
G.edges[edge[0],
edge[1]]['members'] = list(G.edges[edge[0],
edge[1]]['members'])
nx.write_gml(G, args.output_dir + "final_graph.gml")
#Write out core/pan-genome alignments
if args.aln == "pan":
if args.verbose: print("generating pan genome MSAs...")
generate_pan_genome_alignment(G, temp_dir, args.output_dir, args.n_cpu,
args.alr, isolate_names)
core_nodes = get_core_gene_nodes(G, args.core, len(args.input_files))
concatenate_core_genome_alignments(core_nodes, args.output_dir)
elif args.aln == "core":
if args.verbose: print("generating core genome MSAs...")
generate_core_genome_alignment(G, temp_dir, args.output_dir,
args.n_cpu, args.alr, isolate_names,
args.core, len(args.input_files))
# remove temporary directory
shutil.rmtree(temp_dir)
return
if __name__ == '__main__':
main()
|
letters = ["a", "b", "c"]
print(letters.index("a"))
if "d" in letters:
print(letters.index("d"))
if "c" in letters:
print(letters.index("c"))
print(letters.count("a"))
print(letters.count("d"))
|
"""Features control."""
from grow.common import base_config
class Features(object):
"""Control features."""
def __call__(self, feature):
"""Ability to call the instance to shortcut to test enabled features."""
return self.is_enabled(feature)
def __init__(self, enabled=None, disabled=None, default_enabled=True):
self._enabled = set()
self._disabled = set()
self._config = {}
self.default_enabled = default_enabled
if enabled is not None:
for feature in enabled:
self.enable(feature)
if disabled is not None:
for feature in disabled:
self.disable(feature)
def config(self, feature):
"""Configuration for a feature."""
return self._config.get(feature, base_config.BaseConfig())
def disable(self, feature):
"""Disable the feature."""
self._disabled.add(feature)
def enable(self, feature, config=None):
"""Enable the feature."""
self._enabled.add(feature)
self._disabled.discard(feature)
if config:
self._config[feature] = base_config.BaseConfig(config=config)
def is_disabled(self, feature):
"""Determine if the feature is disabled."""
return not self.is_enabled(feature)
def is_enabled(self, feature):
"""Determine if the feature is enabled."""
if feature in self._disabled:
return False
if feature in self._enabled:
return True
return self.default_enabled is True
|
from functools import wraps
from warnings import warn
def add_warning(func, oldname):
@wraps(func)
def _wrapped(*args, **kwds):
warn('Deprecated function %s being called' % oldname)
return func(*args, **kwds)
return _wrapped
def test(a=2, b=4):
print(a + b)
old_test = add_warning(test, 'old_test')
old_test(123)
|
#
# @lc app=leetcode id=236 lang=python
#
# [236] Lowest Common Ancestor of a Binary Tree
#
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/description/
#
# algorithms
# Medium (43.56%)
# Total Accepted: 434K
# Total Submissions: 986.2K
# Testcase Example: '[3,5,1,6,2,0,8,null,null,7,4]\n5\n1'
#
# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes
# in the tree.
#
# According to the definition of LCA on Wikipedia: “The lowest common ancestor
# is defined between two nodes p and q as the lowest node in T that has both p
# and q as descendants (where we allow a node to be a descendant of itself).”
#
# Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
#
#
#
# Example 1:
#
#
# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
# Output: 3
# Explanation: The LCA of nodes 5 and 1 is 3.
#
#
# Example 2:
#
#
# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
# Output: 5
# Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant
# of itself according to the LCA definition.
#
#
#
#
# Note:
#
#
# All of the nodes' values will be unique.
# p and q are different and both values will exist in the binary tree.
#
#
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root is None or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left is None:
return right
elif right is None:
return left
else:
return root |
from abc import ABC, abstractmethod
import torch
class Algorithm(ABC):
"""Base class for all algorithms"""
@classmethod
@abstractmethod
def create_factory(cls):
"""Returns a function to create new Algo instances"""
raise NotImplementedError
@property
@abstractmethod
def gamma(self):
"""Returns discount factor gamma."""
raise NotImplementedError
@property
@abstractmethod
def start_steps(self):
"""Returns the number of steps to collect with initial random policy."""
raise NotImplementedError
@property
@abstractmethod
def num_epochs(self):
"""
Returns the number of times the whole buffer is re-used before data
collection proceeds.
"""
raise NotImplementedError
@property
@abstractmethod
def update_every(self):
"""
Returns the number of data samples collected between
network update stages.
"""
raise NotImplementedError
@property
@abstractmethod
def num_mini_batch(self):
"""
Returns the number of times the whole buffer is re-used before data
collection proceeds.
"""
raise NotImplementedError
@property
@abstractmethod
def mini_batch_size(self):
"""
Returns the number of mini batches per epoch.
"""
raise NotImplementedError
@property
@abstractmethod
def test_every(self):
"""Number of network updates between test evaluations."""
raise NotImplementedError
@property
@abstractmethod
def num_test_episodes(self):
"""
Returns the number of episodes to complete when testing.
"""
raise NotImplementedError
@abstractmethod
def acting_step(self, obs, rhs, done, deterministic=False, *args):
"""
Algorithm acting function.
Parameters
----------
obs: torch.tensor
Current world observation
rhs: torch.tensor
RNN recurrent hidden state (if policy is not a RNN, rhs will contain zeroes).
done: torch.tensor
1.0 if current obs is the last one in the episode, else 0.0.
deterministic: bool
Whether to randomly sample action from predicted distribution or take the mode.
Returns
-------
action: torch.tensor
Predicted next action.
clipped_action: torch.tensor
Predicted next action (clipped to be within action space).
rhs: torch.tensor
Policy recurrent hidden state (if policy is not a RNN, rhs will contain zeroes).
other: dict
Additional PPO predictions, value score and action log probability,
which are not used in other algorithms.
"""
raise NotImplementedError
@abstractmethod
def compute_gradients(self, batch, grads_to_cpu=True, *args):
"""
Compute loss and compute gradients but don't do optimization step,
return gradients instead.
Parameters
----------
data: dict
data batch containing all required tensors to compute PPO loss.
grads_to_cpu: bool
If gradient tensor will be sent to another node, need to be in CPU.
Returns
-------
grads: list of tensors
List of actor_critic gradients.
info: dict
Dict containing current PPO iteration information.
"""
raise NotImplementedError
@abstractmethod
def apply_gradients(self, gradients=None, *args):
"""
Take an optimization step, previously setting new gradients if provided.
Parameters
----------
gradients: list of tensors
List of actor_critic gradients.
"""
raise NotImplementedError
@abstractmethod
def set_weights(self, actor_weights, *args):
"""
Update actor with the given weights
Parameters
----------
actor_weights: dict of tensors
Dict containing actor_critic weights to be set.
"""
raise NotImplementedError
@abstractmethod
def update_algorithm_parameter(self, parameter_name, new_parameter_value, *args):
"""
If `parameter_name` is an attribute of the algorithm, change its value
to `new_parameter_value value`.
Parameters
----------
parameter_name : str
Attribute name
new_parameter_value : int or float
New value for `parameter_name`.
"""
raise NotImplementedError
|
import unittest
from mock import patch
from pathlib import Path
from PyQt5.QtCore import QSettings
from PyQt5.QtWidgets import QFileDialog
from .gui_test import GUITest
from inselect.gui.user_template_choice import user_template_choice
from inselect.tests.utils import temp_directory_with_files
TESTDATA = Path(__file__).parent.parent / 'test_data'
class TestUserTemplateChoice(GUITest):
"""Test the template choice
"""
@patch.object(QSettings, 'setValue')
def test_select_default(self, mock_setvalue):
"User chooses the default template"
# Set a non-default template before testing the default user template
# method
t = user_template_choice()
t.load(TESTDATA / 'test.inselect_template')
self.assertEqual('Test user template', t.current.name)
self.window.view_metadata.popup_button.default()
self.assertEqual('Simple Darwin Core terms',
user_template_choice().current.name)
self.assertEqual('Simple Darwin Core terms',
self.window.view_metadata.popup_button.text())
mock_setvalue.assert_called_with(user_template_choice().PATH_KEY, '')
@patch.object(QSettings, 'setValue')
def test_chooses_template(self, mock_setvalue):
"User chooses a template"
w = self.window
# Select default template before testing the choose method
w.view_metadata.popup_button.default()
path = TESTDATA / 'test.inselect_template'
retval = str(path), w.view_metadata.popup_button.FILE_FILTER
with patch.object(QFileDialog, 'getOpenFileName', return_value=retval) as mock_gofn:
w.view_metadata.popup_button.choose()
self.assertEqual(1, mock_gofn.call_count)
self.assertEqual('Test user template',
user_template_choice().current.name)
self.assertEqual('Test user template',
self.window.view_metadata.popup_button.text())
mock_setvalue.assert_any_call(user_template_choice().PATH_KEY, str(path))
mock_setvalue.assert_any_call(user_template_choice().DIRECTORY_KEY, str(path.parent))
@patch.object(QFileDialog, 'getOpenFileName', return_value=(None, None))
def test_cancels_choose_template(self, mock_gofn):
"User cancels the 'choose template' box"
w = self.window
# Select default template before testing the choose method
w.view_metadata.popup_button.default()
w.view_metadata.popup_button.choose()
self.assertEqual('Simple Darwin Core terms',
user_template_choice().current.name)
self.assertEqual('Simple Darwin Core terms',
self.window.view_metadata.popup_button.text())
self.assertEqual(1, mock_gofn.call_count)
def test_refresh(self):
"User refreshes the current, non-default template"
w = self.window
with temp_directory_with_files(TESTDATA / 'test.inselect_template') as tempdir,\
patch.object(QSettings, 'setValue') as mock_setvalue:
path = tempdir / 'test.inselect_template'
retval = str(path), w.view_metadata.popup_button.FILE_FILTER
# Load the test template in tempdir
with patch.object(QFileDialog, 'getOpenFileName', return_value=retval) as mock_gofn:
w.view_metadata.popup_button.choose()
self.assertEqual(1, mock_gofn.call_count)
self.assertEqual('Test user template',
user_template_choice().current.name)
self.assertEqual('Test user template',
self.window.view_metadata.popup_button.text())
# Write a new template to the file and refresh
template = """Name: An updated test template
Fields:
- Name: catalogNumber
"""
with path.open('w') as outfile:
outfile.write(template)
# Refresh loaded template
with patch.object(QSettings, 'value', return_value=str(path)) as mock_value:
w.view_metadata.popup_button.refresh()
self.assertEqual(1, mock_value.call_count)
self.assertEqual("An updated test template",
user_template_choice().current.name)
self.assertEqual('An updated test template',
self.window.view_metadata.popup_button.text())
if __name__ == '__main__':
unittest.main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Operation(Model):
"""Record to track long running operation.
:param operation_state: Operation state. Possible values include:
'Failed', 'NotStarted', 'Running', 'Succeeded'
:type operation_state: str or
~azure.cognitiveservices.knowledge.qnamaker.authoring.models.OperationStateType
:param created_timestamp: Timestamp when the operation was created.
:type created_timestamp: str
:param last_action_timestamp: Timestamp when the current state was
entered.
:type last_action_timestamp: str
:param resource_location: Relative URI to the target resource location for
completed resources.
:type resource_location: str
:param user_id: User Id
:type user_id: str
:param operation_id: Operation Id.
:type operation_id: str
:param error_response: Error details in case of failures.
:type error_response:
~azure.cognitiveservices.knowledge.qnamaker.authoring.models.ErrorResponse
"""
_attribute_map = {
'operation_state': {'key': 'operationState', 'type': 'str'},
'created_timestamp': {'key': 'createdTimestamp', 'type': 'str'},
'last_action_timestamp': {'key': 'lastActionTimestamp', 'type': 'str'},
'resource_location': {'key': 'resourceLocation', 'type': 'str'},
'user_id': {'key': 'userId', 'type': 'str'},
'operation_id': {'key': 'operationId', 'type': 'str'},
'error_response': {'key': 'errorResponse', 'type': 'ErrorResponse'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.operation_state = kwargs.get('operation_state', None)
self.created_timestamp = kwargs.get('created_timestamp', None)
self.last_action_timestamp = kwargs.get('last_action_timestamp', None)
self.resource_location = kwargs.get('resource_location', None)
self.user_id = kwargs.get('user_id', None)
self.operation_id = kwargs.get('operation_id', None)
self.error_response = kwargs.get('error_response', None)
|
__all__ = [
'make_server_socket',
'make_ssl_context',
]
import socket
import ssl
from g1.asyncs.bases import adapters
def make_server_socket(
address,
*,
family=socket.AF_INET,
backlog=128,
reuse_address=False,
reuse_port=False,
ssl_context=None,
):
sock = socket.socket(family, socket.SOCK_STREAM)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, reuse_address)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, reuse_port)
sock.bind(address)
sock.listen(backlog)
if ssl_context:
sock = ssl_context.wrap_socket(sock, server_side=True)
except Exception:
sock.close()
raise
return adapters.SocketAdapter(sock)
def make_ssl_context(
certificate,
private_key,
client_authentication=False,
protocols=(),
):
if not certificate or not private_key:
return None
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(certificate, private_key)
if client_authentication:
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.load_verify_locations(cafile=certificate)
if protocols:
if ssl.HAS_ALPN:
ssl_context.set_alpn_protocols(protocols)
if ssl.HAS_NPN:
ssl_context.set_npn_protocols(protocols)
return ssl_context
|
# coding: utf-8
"""
ExaVault API
See our API reference documentation at https://www.exavault.com/developer/api-docs/ # noqa: E501
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddUserRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'username': 'str',
'nickname': 'str',
'home_resource': 'str',
'email': 'str',
'password': 'str',
'role': 'str',
'permissions': 'UsersPermissions',
'time_zone': 'str',
'expiration': 'str',
'locked': 'bool',
'welcome_email': 'bool',
'onboarding': 'bool'
}
attribute_map = {
'username': 'username',
'nickname': 'nickname',
'home_resource': 'homeResource',
'email': 'email',
'password': 'password',
'role': 'role',
'permissions': 'permissions',
'time_zone': 'timeZone',
'expiration': 'expiration',
'locked': 'locked',
'welcome_email': 'welcomeEmail',
'onboarding': 'onboarding'
}
def __init__(self, username=None, nickname=None, home_resource=None, email=None, password=None, role=None, permissions=None, time_zone=None, expiration=None, locked=None, welcome_email=None, onboarding=None): # noqa: E501
"""AddUserRequestBody - a model defined in Swagger""" # noqa: E501
self._username = None
self._nickname = None
self._home_resource = None
self._email = None
self._password = None
self._role = None
self._permissions = None
self._time_zone = None
self._expiration = None
self._locked = None
self._welcome_email = None
self._onboarding = None
self.discriminator = None
self.username = username
if nickname is not None:
self.nickname = nickname
self.home_resource = home_resource
self.email = email
self.password = password
self.role = role
self.permissions = permissions
self.time_zone = time_zone
if expiration is not None:
self.expiration = expiration
if locked is not None:
self.locked = locked
if welcome_email is not None:
self.welcome_email = welcome_email
if onboarding is not None:
self.onboarding = onboarding
@property
def username(self):
"""Gets the username of this AddUserRequestBody. # noqa: E501
Username of the user to create. This should follow standard username conventions - spaces are not allowed, etc. We do allow email addresses as usernames. **Note** Usernames must be unique across all ExaVault accounts. # noqa: E501
:return: The username of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this AddUserRequestBody.
Username of the user to create. This should follow standard username conventions - spaces are not allowed, etc. We do allow email addresses as usernames. **Note** Usernames must be unique across all ExaVault accounts. # noqa: E501
:param username: The username of this AddUserRequestBody. # noqa: E501
:type: str
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
@property
def nickname(self):
"""Gets the nickname of this AddUserRequestBody. # noqa: E501
An optional nickname (e.g. 'David from Sales'). # noqa: E501
:return: The nickname of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._nickname
@nickname.setter
def nickname(self, nickname):
"""Sets the nickname of this AddUserRequestBody.
An optional nickname (e.g. 'David from Sales'). # noqa: E501
:param nickname: The nickname of this AddUserRequestBody. # noqa: E501
:type: str
"""
self._nickname = nickname
@property
def home_resource(self):
"""Gets the home_resource of this AddUserRequestBody. # noqa: E501
Resource identifier for the user's home folder. See details on [how to specify resources](#section/Identifying-Resources) above. The user will be locked to this directory and unable to move 'up' in the account. If the folder does not exist in the account, it will be created when the user is created. Users with the `role` **admin** should have their homeResource set to '/' # noqa: E501
:return: The home_resource of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._home_resource
@home_resource.setter
def home_resource(self, home_resource):
"""Sets the home_resource of this AddUserRequestBody.
Resource identifier for the user's home folder. See details on [how to specify resources](#section/Identifying-Resources) above. The user will be locked to this directory and unable to move 'up' in the account. If the folder does not exist in the account, it will be created when the user is created. Users with the `role` **admin** should have their homeResource set to '/' # noqa: E501
:param home_resource: The home_resource of this AddUserRequestBody. # noqa: E501
:type: str
"""
if home_resource is None:
raise ValueError("Invalid value for `home_resource`, must not be `None`") # noqa: E501
self._home_resource = home_resource
@property
def email(self):
"""Gets the email of this AddUserRequestBody. # noqa: E501
Email address for the user # noqa: E501
:return: The email of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this AddUserRequestBody.
Email address for the user # noqa: E501
:param email: The email of this AddUserRequestBody. # noqa: E501
:type: str
"""
if email is None:
raise ValueError("Invalid value for `email`, must not be `None`") # noqa: E501
self._email = email
@property
def password(self):
"""Gets the password of this AddUserRequestBody. # noqa: E501
Password for the user # noqa: E501
:return: The password of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this AddUserRequestBody.
Password for the user # noqa: E501
:param password: The password of this AddUserRequestBody. # noqa: E501
:type: str
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def role(self):
"""Gets the role of this AddUserRequestBody. # noqa: E501
The type of user to create, either **user** or **admin**. # noqa: E501
:return: The role of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this AddUserRequestBody.
The type of user to create, either **user** or **admin**. # noqa: E501
:param role: The role of this AddUserRequestBody. # noqa: E501
:type: str
"""
if role is None:
raise ValueError("Invalid value for `role`, must not be `None`") # noqa: E501
allowed_values = ["user", "admin"] # noqa: E501
if role not in allowed_values:
raise ValueError(
"Invalid value for `role` ({0}), must be one of {1}" # noqa: E501
.format(role, allowed_values)
)
self._role = role
@property
def permissions(self):
"""Gets the permissions of this AddUserRequestBody. # noqa: E501
:return: The permissions of this AddUserRequestBody. # noqa: E501
:rtype: UsersPermissions
"""
return self._permissions
@permissions.setter
def permissions(self, permissions):
"""Sets the permissions of this AddUserRequestBody.
:param permissions: The permissions of this AddUserRequestBody. # noqa: E501
:type: UsersPermissions
"""
if permissions is None:
raise ValueError("Invalid value for `permissions`, must not be `None`") # noqa: E501
self._permissions = permissions
@property
def time_zone(self):
"""Gets the time_zone of this AddUserRequestBody. # noqa: E501
Time zone, used for accurate time display within the application. See <a href='https://php.net/manual/en/timezones.php' target='blank'>this page</a> for allowed values. # noqa: E501
:return: The time_zone of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""Sets the time_zone of this AddUserRequestBody.
Time zone, used for accurate time display within the application. See <a href='https://php.net/manual/en/timezones.php' target='blank'>this page</a> for allowed values. # noqa: E501
:param time_zone: The time_zone of this AddUserRequestBody. # noqa: E501
:type: str
"""
if time_zone is None:
raise ValueError("Invalid value for `time_zone`, must not be `None`") # noqa: E501
self._time_zone = time_zone
@property
def expiration(self):
"""Gets the expiration of this AddUserRequestBody. # noqa: E501
Optional timestamp when the user should expire, formatted in date-time. # noqa: E501
:return: The expiration of this AddUserRequestBody. # noqa: E501
:rtype: str
"""
return self._expiration
@expiration.setter
def expiration(self, expiration):
"""Sets the expiration of this AddUserRequestBody.
Optional timestamp when the user should expire, formatted in date-time. # noqa: E501
:param expiration: The expiration of this AddUserRequestBody. # noqa: E501
:type: str
"""
self._expiration = expiration
@property
def locked(self):
"""Gets the locked of this AddUserRequestBody. # noqa: E501
If true, the user will not be able to log in # noqa: E501
:return: The locked of this AddUserRequestBody. # noqa: E501
:rtype: bool
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this AddUserRequestBody.
If true, the user will not be able to log in # noqa: E501
:param locked: The locked of this AddUserRequestBody. # noqa: E501
:type: bool
"""
self._locked = locked
@property
def welcome_email(self):
"""Gets the welcome_email of this AddUserRequestBody. # noqa: E501
If **true**, send this new user a welcome email upon creation. The content of the welcome email can be configured with the [PATCH /accounts](#operation/updateAccount) method. # noqa: E501
:return: The welcome_email of this AddUserRequestBody. # noqa: E501
:rtype: bool
"""
return self._welcome_email
@welcome_email.setter
def welcome_email(self, welcome_email):
"""Sets the welcome_email of this AddUserRequestBody.
If **true**, send this new user a welcome email upon creation. The content of the welcome email can be configured with the [PATCH /accounts](#operation/updateAccount) method. # noqa: E501
:param welcome_email: The welcome_email of this AddUserRequestBody. # noqa: E501
:type: bool
"""
self._welcome_email = welcome_email
@property
def onboarding(self):
"""Gets the onboarding of this AddUserRequestBody. # noqa: E501
Set this to **true** to enable extra help popups in the web file manager for this user. # noqa: E501
:return: The onboarding of this AddUserRequestBody. # noqa: E501
:rtype: bool
"""
return self._onboarding
@onboarding.setter
def onboarding(self, onboarding):
"""Sets the onboarding of this AddUserRequestBody.
Set this to **true** to enable extra help popups in the web file manager for this user. # noqa: E501
:param onboarding: The onboarding of this AddUserRequestBody. # noqa: E501
:type: bool
"""
self._onboarding = onboarding
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddUserRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddUserRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import logging
import os
import tempfile
from typing import Any, Dict
from allennlp.training.trainer import EpochCallback, GradientDescentTrainer
import mlflow
from xallennlp.utils import flatten_dict_for_mlflow_log, str_to_timedelta
logger = logging.getLogger(__name__)
@EpochCallback.register("mlflow_metrics")
class MLflowMetrics(EpochCallback):
def __call__(
self,
trainer: GradientDescentTrainer,
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
if mlflow.active_run() is None:
logger.warning("MLflow active run not found."
" Recommend to use 'train-with-mlflow' command.")
if "training_duration" in metrics:
trainig_duration = str_to_timedelta(metrics["training_duration"])
metrics["training_duration"] = trainig_duration.total_seconds()
flattened_metrics = flatten_dict_for_mlflow_log(metrics)
for key, value in flattened_metrics.items():
if isinstance(value, (int, float)):
mlflow.log_metric(key, float(value))
else:
log_nonnumerical_metric(key, value, epoch)
def log_nonnumerical_metric(key: str, value: Any, epoch: int):
with tempfile.TemporaryDirectory() as tempdir:
temppath = os.path.join(tempdir, key)
with open(temppath, "w") as f:
f.write(repr(value))
mlflow.log_artifact(temppath, f"metrics/epoch_{epoch}")
|
from httpx.client import Dispatcher
from httpx import Request, Response
from httpx.config import CertTypes, TimeoutTypes, VerifyTypes
from httpx.status_codes import codes
from tests import get_fixture_abspath
class MockDispatch(Dispatcher):
"""Network dispatcher for httpx that returns the given fixture file content instead of making a network call."""
def __init__(self, filename: str = None):
self.filename = filename
async def send(self, request: Request, verify: VerifyTypes = None,
cert: CertTypes = None, timeout: TimeoutTypes = None) -> Response:
if self.filename:
with open(get_fixture_abspath(self.filename), 'rb') as fixture_file:
content = fixture_file.read()
return Response(codes.OK, content=content, request=request)
return Response(codes.NOT_FOUND, request=request)
|
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.spatial.distance import pdist, squareform
from scipy.misc import toimage
import numpy as np
from glymur import Jp2k
import StringIO
def hide_this(xxx):
qstrings = '''FGIJJJKFLLGLGPKMFMMILMHMMKIKIKMJKLLKLMHIIDKNHLIKKOLJLNELNLLFKKKKIM8FKLNJJ?KLJIKIKLEILKNJDILJIHDECAA@
FEIFGKFFLLLJGDKKFGHCINNHHIHEJKMHKDLKLH;JLHGMHMJJLKIJLKFLJLCHKKKIIMJGLFNJJIKJJJA?G6HJEMAJK1IB?FGECBA5
BEIGHGKLHHMLGFKMHGKNINNMMHKMJK=JGMIHLHMHKJKILLLINKLJKGELKLKHGKKIBE8GJFJJLLKMJLKJHLE7>KIJK>HG6BDACA3C
EEIBJKKLJLDJILKMKMJKMMNMJMJEJIMHHGJLLKMJLKKLLHNKLOJJBKFLILKMJMKILMJ,KMNLJLKJMIMJKMJHI@LEKM>JFGGDC?AC
F?F6AGCBBBDJBGKM/AHGIGCDMF?@I/=BG;EDHH;CGN=ILHL:LAL;KGFA:LG@-KI7IDJ7K8@F7I7987KJ6GKIMJIJKIIE??,D>+>&
B?IJILCFLGGJJKKMKMMJHKNDMMJKMLIKNGLIHH4ILNKJLMNKLILJ?KKCKLMMKJKIIMJJKKLJ7L7JJJJJDLJJEJNJKM>5,E@AC?A=
FEEBILELJHJJLPCMHIJMHFNMM<EIIIIKKLJHLMMGIKKMLMII:KJJKKK9NLKFH,IIIK8FKMM7JIFLEI6JGIE7'FI76JLBIHD6AA>=
BEGGJKKFLLKFKDKMIGKNANHMHMEIIIMMKLLHHHMHIJKJHHJLLOIJKKL-NLI8:MLJMD8ALKJJ7GKMJIJJJLKJEKIJKMIJJGGACAAA
F9@=5C:JBBLL7:+C:AA:4AN32<2E.<F==.>=L/;:DKKM:L;BLACHDK:--FKFCF'7B7J,6L77J?7C87676G5'5@IJ%>6G?F6,A?3=
AEDBJIKFLKJGGKKG;MKNIMHMMHKMMGIMHHLK=KMLHHLJLENHD:HEMGLGALCMK8LJBEBJKKMIJ?CIEJ6J@>>LJJIJKDHGBEGDA5?=
EGIJIIKLLLJJJMIMKMKIIHHFEMIIM<MMKJGHGMMJLNKLLEKILKCDMLKLHLKMKG7KHKHHKKLFJJKGHJ@JJI>LMK7J6GHEIBGDC<A:
FGDJJKKIKGLKGJKMIMKEHKNMMHJKILFKHMKHLKMNLDKLLHNHKOLHLKKLILMKKKKIBMJHKLNJLICMJJJJNMHHIJLEKI6>IHEDCBA5
EEI=JCCBLKLGJ5KC2AJJFHCDHHKIMCFFKDLH=HFCIKKE:H;I:D:H9:LCAA;>9J,KJ7HJGKEL7JEM8J@J@,HI5FN7KD%7IB,6,??(
HGILJKKJJLKLGKKMKGKMMINMMMKMMLMKKGLLLHMIHKLILMNJLOLJKKKLNLCGJJLKJMJ,KL7@LLKLAGIJK>JJD7IJK5HJ,,=AA<?@
FGGFJIKLLKGLIJKHEGKGL4N?JMJGJKMHNM>KLHMHDKKLFHGBDA:JK:KGHFLK-BLKJDJ,JK'JDGKME,JJ6G+IDJ7JFDIJ6,DA<<A:
EEIGJKCLFLJGJJKMIIAEHHCHJMJIILMKGJLIHKEIHJGLFHLLKKLJLKKLHLGH:MHKIEG'KLJILLFJA>>B@MK7JFF7=16+IFG,<?3A
F.IKJKGLHKLAOMKMEDKCIHNMMMHMJLMBKDLHFKHGHKKIJHJKNGICLNKLILKMLKOKMJJJKFILL?KMJIJJKMJ7DFIJKIEJJ,GECB3(
BCIFJCKBLLGFBGKCEGNJMANFEME@IMHHKMCIHHHJIJFEAMCHKGLJMGLIK9;JHGHFIMILKMIJJGKMMJKJJLJJJMICCG>I66CDCA>-
HEIJJGKJLMJJIMCMFMHMIKNMMMIKMNMMMJGKLKMNLKKMLLKJLKLJLKML:LIMKJOILMJMKKLJHK@MJIIJNMHJMFLEEMIGIHDD6?A=
BEIBHKKIILKKJMKMKIDIMIJMHM?IJNIKGLLILKMFLNKGLHNBLOIJLKKLNLIMKKIFJMJLFKJLLLFCJ@KJKLJJIKLJDMHJJG=DCB;:
A?@BACCIFKDEGDCEFGAEIKCM?I?KDIMBGJJIFHFFHNKIC;CKN.J;B:FADFGBKF7AJAH,?L,7J7E-A7IIH>5HIFAJFD,>'B,6<5A(
BGIJIKKIBHKEBFIJKGKCLKNMJH?KMIMJGMLIFHMGI;KLF;KKLGJDMKLKGLLGKKKKHJJFKMNJFDKGMJKDNI>JJKNJKMHGF?DD<AA=
FEFKJIKLJLKGIMKHKIAIHKCM?MKKE<MMGJIKLD<HIHKIFMJKJKLJLLEL:LLH:FIAMMHGKK7JFIKJJJEINIKAIK7EDM>GI6DAA<A(
=CIFILCJLLKLBJKMKJHJLHNMEMKEMLMMMDLLLKMIHHFELHKHMOIJLKLLNLKHLMHA9KJG>0J7HGCGM@IBJGKIIMNA+@HGI?,DC+A@
FGIJJKKLJKLJLJKMCMJMMMNMJMHIIMMMNHLKLMMNHKKOLMGLJOLJLLLGHLCHKMIIHMJJJKIL7DKMJJJJHLHJJJOJDDH5FGDACA;=
>#IJJLKIJLJGIKKMHMDKHHGMMMIIJNIMMMLKHHMJIKLLLMNLJKJJLKMLKLK@6KLKHMJLKJJLHLKGM@E7KL>IJMLJD5A3?FE=CA?=
HEIKJKKJKLJJJGKMIMJKMMNMMMLKJLMJMMLKLMMIIKNNLMJJNKLJKLLLNLKMJMIILMIJLLLJJKKIJLKJKMJAMJIJKIHHJEGDCB>>
FE@KJGGILLKLJFEM:GKKMNNMMM?MILFHMJLDLHIHHJKOLJJJLILJLKEGJLKJKKIKJMJKGKLLJLKJJJJJNMJLJKIJKMEGBGGEC?;>
<CGFJJKLKLKJFJFMFMKN4KC3?H?@I/HBGC>HFMF:LKG<JLL:LE7AFABIKLK@HGLIRMHAJEEJ7I7GJJK?,>>ED@7,EDEGF6C6@B=0
CEIKJKKLLKJKJMKJHMHMMKNMEMJGMMKMNHGKLMHJLKNMLHLKLOLJKKMLNLLMLJKIREJKLMLLLLKNJIMIGMKEMKL7KMIGJBDECB>(
EEGJHKKILLDJODKMHAJELKNMHMH@MIMHMGCIGMIILNGILMJLL<:JMKKLNLGKKKHFJKJGJLNJLLKIJIJJDLKHJJIJKDLJFBGEC<>0
FGIKJLKLJKKLLMKMHMMJFMNMMMJIJLIMNMILLMHJKNKJLMNJKOLJLKMLNLLMKMKFIMJHKMMJJJKJMLMJNMEEIJLJKAHHIEDEC<AC
;EDFJKKJKMJKILEMKMKMIMNMMMKMJLMMK(GHGHAHIJDI:EIBJ:H;D-LIHF.,(B7'9'8A>#%J7,,JH7,7,,E,D'',#/6+''@D6+A-
FCEKJGKLHHLJJDKMHJKHLMNFMH2GII=BKMCHLDMGIH=IHHGIND:JMGLGJLLM:KKAB,JJ6EJIJKEIJGEJGCJEIKNJD5E+IHDAC5>5
FEI=JIFLFKKJODIMIMMILMDMHKLIJMMMKGIKFHMJIHK<LEIKLKLJ?GKLILKMKFIFMKJL?LMJII7JHGKJK,HEDKLE=MBII?DE<B?-
CGFJJKKBLKIGFPKMKMKCLNC?MMJIJMMKKLLKGHGJHNKOJHC:NKLJFKMLILKHKJIFJMEHLMILF?BIJJKJNLJLJKIEKILJBGEECBA(
<C@GJKKLJLDJOMKMCMDKLHCMMIEKMIMJNMELHHEGLJFMLLIJFILFLGKEJFKHJ8HKLKJKFKELJKKIJJJJ@GJ7LKLJ@IIHIB@ECBA-
EGIGJIKLJLILGLEMKMGKIKNMMMJKMLHHK;>LLHELLJKLLHGLLKLJLKLLKFKMKKIKHKJLGJNLLJKLMLKJKLKHEMNJFGHBFFGDCBA>
FGIJJKKJHLKJJJKMKGGMIKNMMMJIMNIMKJLKEKMJHKKOLLJKLOIJLKKLILLMKBKFLMHLKKNIJLKLJGKINMKEI@NJDJLHFGGDCBA-
EGIFJLKILKDLGJKMHMNJIMNFMMJKMMMJLLLLLMMGINNGLLJLJOJJLKMLNLLKJKOORMJLLMEJLJKCJJJJNMHHJJFEKM>BIGDAAAA@'''
qstrings = '''!111177777@@@@@@@@@C@@@C@@@CC@@@;@@C@@CC@C@C@@C@C@####!!!!!!!!!!##!###!!!#!!!!!!!!!!!!!!#####!###!!!!
!#################################!!#!###############!!!!!!!!!!!!#!###!!!#!!!!!!!!!!!!!!####!!###!!!!
!#################################!!#!#!####!########!!!!!!!!!!!!#!###!!!#!!!!!!!!!!!!!!####!!###!!!!
IIIGIGIIIHIDIIIDIIIGHIIHIIG@GIIHIFIDIGIIDGEGEGIIIDIFIDI=<?!!!!!############!!!!!!!!!!!!##########!!!!
!######################################################!!!!!!!!!###########!!!!!!!!!!!!!#########!!!!
!,2+*24525@@@@@@@@@@@@@@@@?@?@@@@@@@@@@@@@@@@@@@@@#####!!!!!!!!!###########!!!!!!!!!!!!!#########!!!!
!--++22355@@C@@C@@@@@@@@@@@@@@@@@@@@@@C@@@@@@@@@@@#####!!!!!!!!!##########!!!!!!!!!!!!!!#########!!!!
!121-88888C@@C@@@@C@@C@CC@@@@@@@@@@<<<::@@@@@@@@@######!!!!!!!!!###########!!!!!!!!!!!!!#########!!!!
IIIIIHIIIIIGHIIHDIIIGG8GGIGIGIIGGIHHI<HGIDIEEEI@BDE>FEEE<DDG@@EECAACC<8>=A?###!!!##!!!############!!#
##########################################################!#!!!#############!!!!!!!!!!!###########!!#
############################################################################!#!!!#!!!!!###########!!#
HIIIIHIIIIIIIIIIIIHIIIIDIHHBHIIHIIIIBIIFIIIDHBDDDGDGGG@GGICGHFHEIIBIEHB-=A?#!#!!!#!!!!############!!#
GGDGG@GGGEGGG@GGEGGGHHFHHHHHGHHFHHHBEEGGHHGHHBEBGGEBEEDEF<G@DEDE>>AAAC?#######!!!##!!!############!!#
DDDB?=0B?=DBDDDDDD@DBB@>1B@BB?>B<>>D><DBD@D@@DDBDDCBBAB5BB####################!!!##!!!############!!#
HHHHHEGEGGFGGGGHHGHHHHHHEGHHFHHHHFGHHHHBGDGBEEHHFHHGFGHGDGHEFEF3E####################################
IIIIIIIIIIIIIIGIIIIIIHIIHIIHIDIIGIHIIIIIIHIH>CEEHHGDHBGGDBEDEB@8C=BB@ACBA@>B@BC@@BB>>?76?#########!!#
HHHHHHHHHHHGHHHFGHHHHHHGDHHHHHBHHHFEDDFFHHGGGHHDG8EEEEEAE<EE@BBDB#################################!##
#####################################################################################################
IIIIGIHIIIIIIIIIIIIIIHIIIHIHDIGIIIGIGHIBBG@GGIGGIDGGHIHGIBEGEB@FEB<@DBD@A@D###!#####!#############!!#
?BDDD@D?DDBBD@B:?4BCDDD@DD>DBD################################################!#!###!!############!!#
HHHHHHDHHHHHGHHGHHBHHGHHHHHHHHDDFGEHHHHHHHHHH>DBBDGGEGBFEBG@BDDGBHFHHEEG8G>E@C7A/1646<C##############
IIIIIIIIIIIHHIIHIIIHIHHIHIIIIFHHIIIHHIGE>IHEFFHHHCFGHGCGGBGCCBC@CDD>EB>9:B@##########################
GGGGGGGGGGGG>EEGGDG>E,CACD@DDGCC+>>=8=?ADDEBEA<>CA###################################################
IFHIIIIHIIIIIIIIGIDDIIIHIIIIDD??+B###################################################################
B:DD@D:DDDDDD<:D?B@@@B>B8?D<><.0'0/B>B>=DDDDB########################################################
IHIIIHIFIIIHIIIIHIIGIIIFHBIIIGE?C2CADGGDIHHDHGFHFGHF@HEGEGGEDEHEGAA?7CBCBB@@B@B@=?A?A################
:DGDG?EEE:BGGEGGEGGGDGG@EGG@G8EGBGGGDDGGGDG>>GGGEE<GG3DGG@DGD@GDBGDBB>D@A@BD8DD>D>B@#################
@DFGFEDGGBEDEEBEEEDBEBEEEDEBDE=B9?AC9A;>;A>9=?>6A955-@###############################################
GHIIIIIIIIIIIIIIIIHIIIHIHIIIIIIFIIIIIIHIHIIIGIGDFEDBFIGEF@DFGGHHFHHHFBBDDB@E<EEBBBBCEB@EAA:8>=0;7<50@
GDGGGDEGDGEDDGDGGE@GEGGGDDCFEGGG>EEC8A;ACCA9ABB>@B@@BB9BBDD5=A;A0?2-36=:748;8@>>99@<@################
HHHHHHGHHHHHHHHHHHHHHGHHHHHHHHHGHHHBHHGDHHHHHHHHHBHHHFHCHHFHDBDDDBDG<D@EFFEFECBC#####################
#####################################################################################################
EBBBEB>BBBGDG<<E?EB?8AAAA0<74;<8?2<:1;?##############################################################
IIIIIIIIIIIIIIDGIIIIIIIHIIHIIHIHGIGHHIIIBIHHHHEHG8EEEHEBECEE@BEEDEAEEC>>CB=A?########################
IIIIIIIIIIIIIIHIIIIIIIIIIHIIIIHIIIIIHHIHHIIHHBHHDEHHGGGF@DEH>FAA<A?AA?5<'8<@@BB=?=??3A=??>*-7;9?8:?7@
IIIIIIIIIIIIIIHHIIIIIFIIIIIHIHIIIIIIIHHHIHIHGFEHFFDEIBDFEEEBDEBDGBB?CF@3@?@<>?;74=7=:@@>@@@@#########
HIHIIIIIHIIIIIIIIHHIIGIGIIIIIHGIIHIGHIIHIDHDIHIHFGHFGGDEFEEF?CACC>?@?*=A<<<?#########################
IIIIIIIIIIHIIIIIIIIIIIHHIIIIIIIIIIIIIIIIGIHHHHHIIICGGEEGHHHGGEHFHADC=B<BBBB?@?@B)<=7<??@??AA;@#######
IIIIIIIIIIIIIHIHHIIHIIIIGHGIGD>EC####################################################################
IIIIIIIIIIIIIIIIIIIIIIIIIIIIIGIIIIIHII@IIBFIHIEBAIDD@BDHEEEB8EECEEE0;C@B@@=@::A?0???;3@##############
IIHIHIIIIIIIIIIIGIIIIIIIIIIIHHD>EEGGIIDBHIIHIIIFBFGFHFBEDF>,<3<;=;?2+A###############################
HIIIIIIIIIIIIIIIHIIIIIIIIIIIHIIGGIIIIHIFIHHIFGEIDCIDDEFHEBFEGGIEHD@G8DAD@BDD@CEE<@DDBBD@BB?D<B>@@?,<?
CBCBABEE@@GGEG8GGGGGGEGDGHHHHFGGEGGGHHDDHHBHDHFHHHHHBHDHEHDEHCHDEB<CEEHEFEHD<@FEDBD0B@D@=D07.7783?###
#####################################################################################################
IIIGIHIIIIIHIIIIGIIIIIIIHBIIHIIIHIIEHIHHHEIHFIIIIHGGHFHBIDEEGGEGEGDEGF@BB@>@;9=;;?1;?5?##############
#####################################################################################################
GG@GDGEGGE@@B8E>>9/;?::=8B??8?#######################################################################
IIIIIIIIIIIIIIIGIIIIIIIIIIIIIHGIIIIIHHHIHHHHIGIIIGIFFIH<CEEBCFB@G>C??8DBBBB2@CA8<<;&=?8=A############
GGGGGGGEGGGGGGGGGGGGE@###############################################################################
IIIIHIIIIIIDIIIIIIIIIIIIHIIIHIGIIGHIIHDIIGIIHIIHEIHEHHEFCFFBADEBDFEBBBBBBD<+==?;BCB##################
HHHHHHHHHHFHHDHHHHHHHHGGHHHGHHHHHGGGGGGBBBD),42454A?#################################################
IIIIIIIIIIHIIIIIIGIIIIIIIIHIIHHIIIIHHIIIHHIHEIFDIIIHFDFEIEIEC:AEA?CA>CA?@E@?8?A?5;<2;=?AA############
#####################################################################################################
G>>DGGBGGGG>DGGHB8GEE8DGGBG3DBGGEGFE>DCEEFFFG2BD>EBFCCB=?3A>?7;=:>@,>@@1::?##########################
DGEDHGG?<BBFBAFGBBEEBGEGG<GBEG2<9?7EB8CAB?<<B:BB6?###################################################
IIGIHIIGIIFIIHIIIIIIIIIIIHIHIIIIGHHIHHEHCGHEEEEE@FBEF@@DBDCBED@EDBDBBC@2B@B@==A=<9')<=6;?=?##########
HIIIIIIIFIIIIIIIIIIGHIIIIIIHEIIIHHHGGGHIGFIGHGHHDEFDDEEAEA;BCB>>BC@@?################################
HHHHHGHHDHBGGGGGGGGGGBGDBF<GEEGFC<E?+=>:BD8@>EEE>F?>A<>AD85*8/28===C@################################
IIIIIIIIGIIIIIIIEIIIHIIIIIIIGIIIIIEEIIIIIIFIHGIIIGIGEIBFFEDFDIBFHEC>DEEHHF@A3=8?=<4><@@8?:C6?@?######
GGG@EGEGG>DGGGDHHHHHHHHHHGGGHH<EEGFE>DGGGHHBHDD-B####################################################
#####################################################################################################
IIIIIIIIIIIIIIIIIIIIIIHHIIIIIIIHIIDEB>F;AD<ABB3A;@?=?;=@@86?@@BC>BBB2B=1<7.;;00;?6=70??##############
:DGDBEDGGGGGGGBGDGGBGGGGGGGBBGGGG?EGG<EG<8BB=0796:=4=B###############################################
HHHHHHHHHHHHHHGHHHHFHHHHHHHHDHHHDEHB@GBGFFEFDHHHHHFHGHEC;D>FDA6@BB@B@BEBBD###########################
#####################################################################################################
IIIIHIIIIIIIIGIIIIIIIIGDFIIIIIBFIIIIIIIHIIIIEIGIIIHHIIHIIIHGBFIDG@BD>G<GCEEEGED8BBB<@>BDDD3??>?AC2>><
DDDDDDDDDDDBD########################################################################################
CCCCCCCCCCCCCCCBB@###################################################################################
HHHHHHHHHFHHDHHBHHHGHHHHGHHHHHHGHHHHHEEBAC>3A>8A#####################################################
GBEEEBDFFFHHHHHHHHHBGGGEBG@GGGEF@GEGBD<BG8DADD@D+DAEA8,A>=A=8A#######################################
GEGGGG@G<GB=1BBGGG@GBGDGGBAEEA8?BB?08:==GFGBDEEBBD>AA8AA>CA>@BD@B2D@;;BB@BB?53?######################
IHIIIIIIIIIIIIIIIIIIIIIIIIHIIIIIIIHIHIHIHIHHHHHGGGEGG>GGEEDGD@CDB####################################
HHHHHHHHHHHHHEHEGEGDHHHHHHHHHHHCHCHBCEGEHHBHHE>FHHE@@CEBBBBBEBDHCB1DAA>CAA>B<@B>=3.:>9A=@############
DHHHGHHDHHHGEHHHHHHHDHHHHHHHHHHGHHHHGGGEDHBGEHEF@HGBGGHHGEGDFBD=>GDDDBBEBD3GG<38ABEEB8<8ABGE<BD8<B88D
IIIIIIIIIIIHIIIIIIIIHIIIIIIIIIHIIIEBHIHIIHIIIHIIGFGFFGIGGHIDGEGAGHBBB@EBEDDGGE<GBDB##################
;-<)===>3'/705<183;9,96=(?;2?===/592'42)@@@@@@@@@@###################################################
HHFHHHHHHHHHHDHHBHHHHHHHHHHDHFBGGGDHGHHHHHHEHFEDEBDDDBB==<?=A?=?ABDB@D###############################
HHHHDHGHBDGGBGGGDGGGHHHHFEDDGE<GDDDHEHHHFBDD>B8B@BDCEC@CBC###########################################
IHIIIIIIIIIIIIIIIIHIHIIIIIIIHIGIHIIHIIIGFIIGGDIFEHHHHDF<BB8D=<?######################################'''
def qual_ord(phred_string, offset=33):
return [ord(x)-offset for x in phred_string]
#quals = [qual_ord(qq)[1] for qq in qstrings.split('\n')]
def img_cluster(quals, hackish_counter=[0]):
aa = np.array(quals)
for linkage_method in ['average']: # ['average', 'complete', 'median', 'single', 'ward', 'weighted', 'centroid']:
D = squareform(pdist(aa, metric='euclidean'))
Y = linkage(D, method=linkage_method)
reorder = dendrogram(Y, no_plot=True)['leaves']
img = aa[reorder]
return img, reorder
# #j = Jp2k('dummy.jp2', 'wb')
# #j.write(img.astype(np.uint8))
# im = toimage(img, cmin=0, cmax=255)
# im.save('clustered_pngs/pil_dummy_%d.png' % hackish_counter[0])
# hackish_counter[0] = hackish_counter[0]+1
# im = toimage(img, cmin=0, cmax=255)
# im.save('clustered_quals_%s.bmp' % linkage_method)
CHUNKS = 256
png_counter = 0
with open('gage_rhodo/frag_2.quals', 'r') as infile:
quals = []
png_buffer = []
for i, line in enumerate(infile):
quals.append(qual_ord(line.rstrip()))
if (i % CHUNKS) == CHUNKS-1:
image_array, order = img_cluster(quals)
png_buffer.append((image_array, order))
if len(png_buffer) == 10:
print i+1
to_png = toimage(np.vstack([x[0] for x in png_buffer]), cmin=0, cmax=255)
to_png.save('clustered_pngs/shortjump/frag_2_%d.png' % png_counter)
with open(('clustered_pngs/shortjump/frag_2_%d.txt' % png_counter), 'w') as order_out:
ordering = [x[1] for x in png_buffer]
for oo in ordering:
order_out.write(' '.join(map(str, oo)) + '\n')
png_counter += 1
png_buffer = []
quals = []
#print i+1
#import Image
#im = Image.fromarray(np.uint8(cm.gist_earth(myarray)*255)) # apply colormap directly, convert type |
#!/usr/bin/env python
# encoding: utf-8
"""
Loupe.py
Created by Rui Carmo on 2007-01-11.
Published under the MIT license.
"""
import yaki.Engine, yaki.Store
from yaki.Utils import *
from BeautifulSoup import *
import re, urlparse
template = """
<div id="loupe%(serial)d" style="width:%(width)spx; height:%(height)spx; background:url(%(small)s) no-repeat; border:1px solid gray; margin-right: 1em; margin-bottom: 0.25em;">
<img id="loupeimg%(serial)d" onLoad="initLoupe(this.id,true);" src="%(large)s" style="cursor:wait; margin:0px; padding:0px; border: none;" width="%(width)s" height="%(height)s" border="0" />
</div>
"""
class LoupeWikiPlugin(yaki.Plugins.WikiPlugin):
def __init__(self, registry, webapp):
self.ac = webapp.getContext()
registry.register('markup', self, 'plugin','loupe')
def run(self, serial, tag, tagname, pagename, soup, request, response):
params = {'serial':serial}
try:
params['large'] = tag['src']
params['small'] = tag['alt']
params['width'] = tag['width']
params['height'] = tag['height']
except KeyError:
return True
for image in ['large','small']:
# Try to handle the uri as a schema/path pair
(schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(params[image])
if schema.lower() in ATTACHMENT_SCHEMAS or self.ac.store.isAttachment(pagename, path):
params[image] = self.ac.media + pagename + "/" + path
else:
return True
tag.replaceWith(template % params)
# No further processing is required
return False
|
from datetime import datetime
from typing import Optional
from fastapi import APIRouter
from sqlmodel import Field, SQLModel
router = APIRouter()
class Right(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Province(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Amphoe(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
province_id: int
name: str
class Tambon(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
amphoe_id: int
name: str
class Religion(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class National(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Occupation(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class MaritalStatus(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class AcademicDegree(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Allergy(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Vehicle(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Language(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Relationship(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class IdType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class FeedbackType(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class VisibilityLevel(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
class Module(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
class ModuleFunction(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
detail: str
created_at: datetime
updated_at: datetime
created_by: int
updated_by: Optional[int] = None
|
#!/usr/bin/env python3
import os
import sys
import time
import json
import re
import hashlib
import subprocess
import yaml
import git
from termcolor import colored
class WebAppUpdateChecker():
def __init__(self, rootdir):
self._apps = []
self._testdir = os.path.join(rootdir, "test")
self._configdir = os.path.join(rootdir, "config")
self._cachedir = os.path.join(rootdir, "cache")
self._cache_age = 3600
def run(self):
with open(os.path.join(self._configdir, "apps.yml"), 'r') as ymlfile:
self._apps = yaml.safe_load(ymlfile)
self._clean_cache()
command = "short"
if len(sys.argv) > 1:
command = sys.argv[1]
if command == "short":
f = os.path.join(self._configdir, "installations.yml")
self.check_versions(f, verbose=False)
elif command == "full":
f = os.path.join(self._configdir, "installations.yml")
self.check_versions(f)
elif command == "test_prepare":
self.test_prepare()
elif command == "test_run":
f = os.path.join(self._configdir, "installations-test.yml")
self.check_versions(f)
elif command == "help":
print("Usage: {:} <command>".format(sys.argv[0]))
else:
print("Unknown command")
def test_prepare(self):
installations = {}
if not os.path.exists(self._testdir):
os.makedirs(self._testdir)
for app in self._apps:
print(app)
repo_path = os.path.join(self._testdir, app + ".git")
installations[app + "-test"] = {"app": app, "path": repo_path}
if not os.path.exists(repo_path):
print(" Cloning repository...")
git.Repo.clone_from(self._apps[app]["url"], repo_path, depth=1)
f = os.path.join(self._configdir, "installations-test.yml")
with open(f, 'w') as outfile:
yaml.dump(installations, outfile, default_flow_style=False)
def check_versions(self, configfile, verbose=True):
with open(configfile, 'r') as ymlfile:
installations = yaml.safe_load(ymlfile)
max_app_len = len(max(installations, key=len))
for inst in sorted(installations):
if verbose:
print("=== " + inst + " ===")
print(" App: " + installations[inst]['app'])
print(" Path: " + installations[inst]['path'])
print(" Version: ", end="")
else:
print(inst.ljust(max_app_len + 2), end="")
app = installations[inst]['app']
path = installations[inst]['path']
current = self.get_current_version(app, path)
latest = self.get_latest_version(app)
if len(current) > 0 and len(latest) > 0:
lv = self._format_version(latest)
compare = self._compare_versions(current, latest)
if compare < 0:
cv = colored(self._format_version(current), "red")
print("{:} < {:}".format(cv, lv))
elif compare == 0:
cv = colored(self._format_version(current), "green")
print("{:}".format(cv))
elif compare > 0:
cv = colored(self._format_version(current), "yellow")
print("{:} > {:}".format(cv, lv))
if verbose:
print("")
def _clean_cache(self):
for fn in os.listdir(self._cachedir):
f = os.path.join(self._cachedir, fn)
if not os.path.isfile(f):
continue
if os.path.getmtime(f) < time.time() - self._cache_age:
os.remove(f)
def _format_version(self, version):
s = ""
for n in version:
if len(s) > 0:
if n.isdigit():
s += "."
else:
s += "-"
s += n
return s
def _compare_versions(self, v1a, v2a):
v1 = v1a[:]
v2 = v2a[:]
if len(v1) < len(v2):
v1.append("0" * (len(v2) - len(v1)))
if len(v2) < len(v1):
v2.append("0" * (len(v1) - len(v2)))
for i in range(0, min(len(v1), len(v2))):
if (not v1[i].isdigit()) or not (v2[i].isdigit()):
break
if int(v1[i]) > int(v2[i]):
return 1
elif int(v1[i]) < int(v2[i]):
return -1
return 0
def get_current_version(self, app, path):
current_version = []
contents = ""
pattern = ""
if 'current-file' in self._apps[app]:
pattern = self._apps[app]['current-file-regex']
file = os.path.join(path, self._apps[app]['current-file'])
if os.path.isfile(file):
with open(file, "r", encoding='utf8') as f:
contents = f.read()
if contents == '' and 'current-command' in self._apps[app]:
pattern = self._apps[app]['current-command-regex']
cmd = os.path.join(path, self._apps[app]['current-command'])
contents = subprocess.check_output(cmd, shell=True).decode("utf-8")
if contents == '':
print(colored("could not get current version", "red"))
return current_version
if isinstance(pattern, str):
pattern = [pattern]
for p in pattern:
re.compile(p)
for match in re.finditer(p, contents):
current_version.extend(match.groups())
return [x for x in current_version if x is not None and x.strip() != ""]
def get_latest_version(self, app):
app = self._apps[app]
pattern = re.compile(app['tag-regex'])
versions = []
tags = self._get_tags(app['url'])
for tag in tags:
match = re.match(app['tag-exclude'], tag, flags=re.IGNORECASE)
if match is not None:
continue
match = pattern.match(tag)
if match is not None:
versions.append(match.groups())
versions.sort(key=lambda row:
tuple(0 if item is None or item == "" else (
int(item) if item.isdigit() else item)
for item in row))
return [x for x in versions[-1] if x is not None and x != ""]
def _get_tags(self, url):
refs = self._lsremote_tags_cached(url)
tags = []
for r in refs:
r = r.replace("refs/tags/", "")
r = r.replace("^{}", "")
if r not in tags:
tags.append(r)
return tags
def _lsremote_tags_cached(self, url):
key = hashlib.sha1(url.encode()).hexdigest()
if not os.path.exists(self._cachedir):
os.makedirs(self._cachedir)
cachefile = os.path.join(self._cachedir, key)
if os.path.isfile(cachefile):
if os.path.getmtime(cachefile) > time.time() - self._cache_age:
with open(cachefile, "r", encoding='utf8') as f:
data = f.read()
return json.loads(data)
data = self._lsremote_tags(url)
with open(cachefile, "w", encoding='utf8') as f:
jdata = json.dumps(data)
f.write(jdata)
return data
def _lsremote_tags(self, url):
remote_refs = {}
g = git.cmd.Git()
for ref in g.ls_remote("--tags", url).split('\n'):
hash_ref_list = ref.split('\t')
remote_refs[hash_ref_list[1]] = hash_ref_list[0]
return remote_refs
if __name__ == '__main__':
rootdir = os.path.dirname(os.path.realpath(__file__))
rootdir = os.path.dirname(rootdir)
myWauc = WebAppUpdateChecker(rootdir)
myWauc.run() |
from project_example.settings import * # pylint: disable=W0614,W0401
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'project_example.conf.test.urls'
INSTALLED_APPS += (
'django.contrib.admin'
'django_nose',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
# -*- coding: utf-8 -*-
"""Test the data module."""
from adnipy import data
def test_image_id_from_filename():
"""Test extracting image id from filename."""
correct = 123456789
filename = "_I123456789.nii"
image_id = data.image_id_from_filename(filename)
assert correct == image_id
|
# importing anything from analysis segfaults java with netlogo on a mac
# for now no clue why
#
from . import pairs_plotting
from .b_and_w_plotting import set_fig_to_bw
from .cart import setup_cart, CART
from .feature_scoring import (get_ex_feature_scores, get_feature_scores_all,
get_rf_feature_scores,
get_univariate_feature_scores)
from .logistic_regression import Logit
from .plotting import lines, envelopes, kde_over_time, multiple_densities
from .plotting_util import Density, PlotType
from .prim import Prim, run_constrained_prim, pca_preprocess, setup_prim
from .scenario_discovery_util import RuleInductionType
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module computes the neutral and ionized populations of H in the
upper atmosphere.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import numpy as np
import astropy.units as u
import astropy.constants as c
from scipy.integrate import simps, solve_ivp, cumtrapz
from scipy.interpolate import interp1d
from p_winds import parker, tools, microphysics
__all__ = ["radiative_processes_exact", "radiative_processes",
"radiative_processes_mono", "recombination", "ion_fraction"]
# Exact calculation of hydrogen photoionization
def radiative_processes_exact(spectrum_at_planet, r_grid, density, f_r,
h_fraction):
"""
Calculate the photoionization rate of hydrogen as a function of radius based
on the EUV spectrum arriving at the planet and the neutral H density
profile.
Parameters
----------
spectrum_at_planet (``dict``):
Spectrum of the host star arriving at the planet covering fluxes at
least up to the wavelength corresponding to the energy to ionize
hydrogen (13.6 eV, or 911.65 Angstrom).
r_grid (``numpy.ndarray``):
Radius grid for the calculation, in units of cm.
density (``numpy.ndarray``):
Number density profile for the atmosphere, in units of 1 / cm ** 3.
f_r (``numpy.ndarray`` or ``float``):
Ionization fraction profile for the atmosphere.
h_fraction (``float``):
Hydrogen number fraction of the outflow.
Returns
-------
phi_prime (``float``):
Ionization rate of hydrogen for each point on r_grid in unit of 1 / s.
"""
wavelength = (spectrum_at_planet['wavelength'] *
spectrum_at_planet['wavelength_unit']).to(u.angstrom).value
flux_lambda = (spectrum_at_planet['flux_lambda'] * spectrum_at_planet[
'flux_unit']).to(u.erg / u.s / u.cm ** 2 / u.angstrom).value
energy = (c.h * c.c).to(u.erg * u.angstrom).value / wavelength
# Wavelength corresponding to the energy to ionize H
wl_break = 911.65 # angstrom
# Index of the lambda_0 in the wavelength array
i_break = tools.nearest_index(wavelength, wl_break)
# Auxiliary definitions
wavelength_cut = wavelength[:i_break + 1]
flux_lambda_cut = flux_lambda[:i_break + 1]
energy_cut = energy[:i_break + 1]
# 2d grid of radius and wavelength
xx, yy = np.meshgrid(wavelength_cut, r_grid)
# Photoionization cross-section in function of wavelength
a_lambda = microphysics.hydrogen_cross_section(wavelength=xx)
# Optical depth to hydrogen photoionization
m_h = 1.67262192E-24 # Proton mass in unit of kg
r_grid_temp = r_grid[::-1]
# We assume that the atmosphere is made of only H + He
he_fraction = 1 - h_fraction
f_he_to_h = he_fraction / h_fraction
mu = (1 + 4 * f_he_to_h) / (1 + f_r + f_he_to_h)
n_tot = density / mu / m_h
n_htot = 1 / (1 + f_r + f_he_to_h) * n_tot
n_h = n_htot * (1 - f_r)
n_hetot = n_htot * f_he_to_h
n_he = n_hetot * (1 - f_r)
n_h_temp = n_h[::-1]
column_h = cumtrapz(n_h_temp, r_grid_temp, initial=0)
column_density_h = -column_h[::-1]
tau_rnu = column_density_h[:, None] * a_lambda
# Optical depth to helium photoionization
n_he_temp = n_he[::-1]
column_he = cumtrapz(n_he_temp, r_grid_temp, initial=0)
column_density_he = -column_he[::-1]
a_lambda_he = microphysics.helium_total_cross_section(wavelength=xx)
tau_rnu += column_density_he[:, None] * a_lambda_he
# Finally calculate the photoionization rate
phi_prime = abs(simps(flux_lambda_cut * a_lambda / energy_cut *
np.exp(-tau_rnu), wavelength_cut, axis=-1))
return phi_prime
# Stellar flux-average calculation of hydrogen photoionization
def radiative_processes(spectrum_at_planet):
"""
Calculate the photoionization rate of hydrogen at null optical depth based
on the EUV spectrum arriving at the planet.
Parameters
----------
spectrum_at_planet (``dict``):
Spectrum of the host star arriving at the planet covering fluxes at
least up to the wavelength corresponding to the energy to ionize
hydrogen (13.6 eV, or 911.65 Angstrom).
Returns
-------
phi (``float``):
Ionization rate of hydrogen at null optical depth in unit of 1 / s.
a_0 (``float``):
Flux-averaged photoionization cross-section of hydrogen in unit of
cm ** 2.
"""
wavelength = (spectrum_at_planet['wavelength'] *
spectrum_at_planet['wavelength_unit']).to(u.angstrom).value
flux_lambda = (spectrum_at_planet['flux_lambda'] * spectrum_at_planet[
'flux_unit']).to(u.erg / u.s / u.cm ** 2 / u.angstrom).value
energy = (c.h * c.c).to(u.erg * u.angstrom).value / wavelength
# Wavelength corresponding to the energy to ionize H
wl_break = 911.65 # angstrom
# Index of the lambda_0 in the wavelength array
i_break = tools.nearest_index(wavelength, wl_break)
# Auxiliary definitions
wavelength_cut = wavelength[:i_break + 1]
flux_lambda_cut = flux_lambda[:i_break + 1]
energy_cut = energy[:i_break + 1]
# Photoionization cross-section in function of wavelength
a_lambda = microphysics.hydrogen_cross_section(wavelength=wavelength_cut)
# Flux-averaged photoionization cross-section
# Note: For some reason the Simpson's rule implementation of ``scipy`` may
# yield negative results when the flux varies by a few orders of magnitude
# at the edges of integration. So we take the absolute values of a_0 and phi
a_0 = abs(simps(flux_lambda_cut * a_lambda, wavelength_cut) /
simps(flux_lambda_cut, wavelength_cut))
# Finally calculate the photoionization rate
phi = abs(simps(flux_lambda_cut * a_lambda / energy_cut, wavelength_cut))
return phi, a_0
# Hydrogen photoionization if you have only a monochromatic channel flux
def radiative_processes_mono(flux_euv, average_photon_energy=20.):
"""
Calculate the photoionization rate of hydrogen at null optical depth based
on the monochromatic EUV flux arriving at the planet.
Parameters
----------
flux_euv (``float``):
Monochromatic extreme-ultraviolet (0 - 912 Angstrom) flux arriving at
the planet in unit of erg / s / cm ** 2.
average_photon_energy (``float``, optional):
Average energy of the photons ionizing H in unit of eV. Default is 20 eV
(as in Murray-Clay et al 2009, Allan & Vidotto 2019).
Returns
-------
phi (``float``):
Ionization rate of hydrogen at null optical depth in unit of 1 / s.
a_0 (``float``):
Flux-averaged photoionization cross-section of hydrogen in unit of
cm ** 2.
"""
# Average cross-section
a_0 = 6.3E-18 * (average_photon_energy / 13.6) ** (-3) # Unit 1 / cm ** 2.
# Monochromatic ionization rate
flux_euv *= 6.24150907E+11 # Convert erg to eV
phi = flux_euv * a_0 / average_photon_energy
return phi, a_0
# Case-B hydrogen recombination
def recombination(temperature):
"""
Calculates the case-B hydrogen recombination rate for a gas at a certain
temperature.
Parameters
----------
temperature (``float``):
Isothermal temperature of the upper atmosphere in unit of Kelvin.
Returns
-------
alpha_rec (``float``):
Recombination rate of hydrogen in units of cm ** 3 / s.
"""
alpha_rec = 2.59E-13 * (temperature / 1E4) ** (-0.7)
return alpha_rec
# Fraction of ionized hydrogen vs. radius profile
def ion_fraction(radius_profile, planet_radius, temperature, h_fraction,
mass_loss_rate, planet_mass, mean_molecular_weight_0=1.0,
spectrum_at_planet=None, flux_euv=None, initial_f_ion=0.0,
relax_solution=False, convergence=0.01, max_n_relax=10,
exact_phi=False, return_mu=False, **options_solve_ivp):
"""
Calculate the fraction of ionized hydrogen in the upper atmosphere in
function of the radius in unit of planetary radius.
Parameters
----------
radius_profile (``numpy.ndarray``):
Radius in unit of planetary radii.
planet_radius (``float``):
Planetary radius in unit of Jupiter radius.
temperature (``float``):
Isothermal temperature of the upper atmosphere in unit of Kelvin.
h_fraction (``float``):
Total (ion + neutral) H number fraction of the atmosphere.
mass_loss_rate (``float``):
Mass loss rate of the planet in units of g / s.
planet_mass (``float``):
Planetary mass in unit of Jupiter mass.
mean_molecular_weight_0 (``float``):
Initial mean molecular weight of the atmosphere in unit of proton mass.
Default value is 1.0 (100% neutral H). Since its final value depend on
the H ion fraction itself, the mean molecular weight can be
self-consistently calculated by setting `relax_solution` to `True`.
spectrum_at_planet (``dict``, optional):
Spectrum of the host star arriving at the planet covering fluxes at
least up to the wavelength corresponding to the energy to ionize
hydrogen (13.6 eV, or 911.65 Angstrom). Can be generated using
``tools.make_spectrum_dict``. If ``None``, then ``flux_euv`` must be
provided instead. Default is ``None``.
flux_euv (``float``, optional):
Extreme-ultraviolet (0-911.65 Angstrom) flux arriving at the planet in
units of erg / s / cm ** 2. If ``None``, then ``spectrum_at_planet``
must be provided instead. Default is ``None``.
initial_f_ion (``float``, optional):
The initial ionization fraction at the layer near the surface of the
planet. Default is 0.0, i.e., 100% neutral.
relax_solution (``bool``, optional):
The first solution is calculating by initially assuming the entire
atmosphere is in neutral state. If ``True``, the solution will be
re-calculated in a loop until it converges to a delta_f of 1%, or for a
maximum of 10 loops (default parameters). Default is ``False``.
convergence (``float``, optional):
Value of delta_f at which to stop the relaxation of the solution for
``f_r``. Default is 0.01.
max_n_relax (``int``, optional):
Maximum number of loops to perform the relaxation of the solution for
``f_r``. Default is 10.
return_mu (``bool``, optional):
If ``True``, then this function returns a second variable ``mu_bar``,
which is the self-consistent, density-averaged mean molecular weight of
the atmosphere. Equivalent to the ``mu_bar`` of Eq. A.3 in Lampón et
al. 2020.
**options_solve_ivp:
Options to be passed to the ``scipy.integrate.solve_ivp()`` solver. You
may want to change the options ``method`` (integration method; default
is ``'RK45'``), ``atol`` (absolute tolerance; default is 1E-6) or
``rtol`` (relative tolerance; default is 1E-3). If you are having
numerical issues, you may want to decrease the tolerance by a factor of
10 or 100, or 1000 in extreme cases.
Returns
-------
f_r (``numpy.ndarray``):
Values of the fraction of ionized hydrogen in function of the radius.
mu_bar (``float``):
Mean molecular weight of the atmosphere, in unit of proton mass,
averaged across the radial distance using according to the function
`average_molecular_weight` in the `parker` module. Only returned when
``return_mu`` is set to ``True``.
"""
# Hydrogen recombination rate
alpha_rec = recombination(temperature)
# Hydrogen mass in g
m_h = 1.67262192E-24
# Photoionization rate at null optical depth at the distance of the planet
# from the host star, in unit of 1 / s.
if exact_phi and spectrum_at_planet is not None:
vs = parker.sound_speed(temperature, mean_molecular_weight_0)
rs = parker.radius_sonic_point(planet_mass, vs)
rhos = parker.density_sonic_point(mass_loss_rate, rs, vs)
_, rho_norm = parker.structure(radius_profile * planet_radius / rs)
f_outer = 0.0 # Assume completely ionized at the top of atmosphere
phi_abs = radiative_processes_exact(
spectrum_at_planet,
(radius_profile * planet_radius * u.Rjup).to(u.cm).value,
rho_norm * rhos, f_outer, h_fraction)
a_0 = 0.
elif spectrum_at_planet is not None:
phi_abs, a_0 = radiative_processes(spectrum_at_planet)
elif flux_euv is not None:
phi_abs, a_0 = radiative_processes_mono(flux_euv)
else:
raise ValueError('Either `spectrum_at_planet` or `flux_euv` must be '
'provided.')
# Multiplicative factor of Eq. 11 of Oklopcic & Hirata 2018, unit of
# cm ** 2 / g
# We assume that the remaining of the number fraction is pure He
he_fraction = 1 - h_fraction
he_h_fraction = he_fraction / h_fraction
k1_abs = h_fraction * a_0 / (h_fraction + 4 * he_fraction) / m_h
# Multiplicative factor of the second term in the right-hand side of Eq.
# 13 of Oklopcic & Hirata 2018, unit of cm ** 3 / s / g
k2_abs = h_fraction / (h_fraction + 4 * he_fraction) * alpha_rec / m_h
# In order to avoid numerical overflows, we need to normalize a few key
# variables. Since the normalization may need to be repeated to relax the
# solution, we have a function to do it.
def _normalize(_phi, _k1, _k2, _r, _mu):
# First calculate the sound speed, radius at the sonic point and the
# density at the sonic point. They will be useful to change the units of
# the calculation aiming to avoid numerical overflows
_vs = parker.sound_speed(temperature, _mu)
_rs = parker.radius_sonic_point(planet_mass, _vs)
_rhos = parker.density_sonic_point(mass_loss_rate, _rs, _vs)
# And now normalize everything
phi_unit = _vs * 1E5 / _rs / 7.1492E+09 # 1 / s
phi_norm = _phi / phi_unit
k1_unit = 1 / (_rhos * _rs * 7.1492E+09) # cm ** 2 / g
k1_norm = _k1 / k1_unit
k2_unit = _vs * 1E5 / _rs / 7.1492E+09 / _rhos # cm ** 3 / g / s
k2_norm = _k2 / k2_unit
r_norm = (_r * planet_radius / _rs)
# The differential r will be useful at some point
dr_norm = np.diff(r_norm)
dr_norm = np.concatenate((dr_norm, np.array([dr_norm[-1], ])))
# The structure of the atmosphere
v_norm, rho_norm = parker.structure(r_norm)
return phi_norm, k1_norm, k2_norm, r_norm, dr_norm, v_norm, rho_norm
phi, k1, k2, r, dr, velocity, density = _normalize(
phi_abs, k1_abs, k2_abs, radius_profile, mean_molecular_weight_0)
if exact_phi:
_phi_prime_fun = interp1d(r, phi, fill_value="extrapolate")
else:
# To start the calculations we need the optical depth, but technically
# we don't know it yet, because it depends on the ion fraction in the
# atmosphere, which is what we want to obtain. However, the optical
# depth depends more strongly on the densities of H than the ion
# fraction, so a good approximation is to assume the whole atmosphere is
# neutral at first.
column_density = np.flip(np.cumsum(np.flip(dr * density)))
tau_initial = k1 * column_density
# We do a dirty hack to make tau_initial a callable function so it's
# easily parsed inside the differential equation solver
_tau_fun = interp1d(r, tau_initial, fill_value="extrapolate")
# Now let's solve the differential eq. 13 of Oklopcic & Hirata 2018
# The differential equation in function of r
def _fun(_r, _f, _phi, _k2):
if exact_phi:
_phi_prime = _phi_prime_fun(np.array([_r, ]))[0]
else:
_t = _tau_fun(np.array([_r, ]))[0]
_phi_prime = np.exp(-_t)*_phi
_v, _rho = parker.structure(_r)
# In terms 1 and 2 we use the values of k2 and phi from above
term1 = (1. - _f) / _v * _phi_prime
term2 = _k2 * _rho * _f ** 2 / _v
df_dr = term1 - term2
return df_dr
# We solve it using `scipy.solve_ivp`
sol = solve_ivp(_fun, (r[0], r[-1],), np.array([initial_f_ion, ]),
t_eval=r, args=(phi, k2), **options_solve_ivp)
f_r = sol['y'][0]
# When `solve_ivp` has problems, it may return an array with different
# size than `r`. So we raise an exception if this happens
if len(f_r) != len(r):
raise RuntimeError('The solver ``solve_ivp`` failed to obtain a'
' solution.')
# Calculate the average mean molecular weight using Eq. A.3 from Lampón et
# al. 2020
mu_bar = parker.average_molecular_weight(f_r, radius_profile, velocity,
planet_mass, temperature,
he_h_fraction)
# For the sake of self-consistency, there is the option of repeating the
# calculation of f_r by updating the optical depth with the new ion
# fractions.
if relax_solution is True:
for i in range(max_n_relax):
previous_f_r = np.copy(f_r)
if exact_phi:
# phi_abs will need to be recomputed here with the new density
# structure
vs = parker.sound_speed(temperature, mu_bar)
rs = parker.radius_sonic_point(planet_mass, vs)
rhos = parker.density_sonic_point(mass_loss_rate, rs, vs)
_, rho_norm = parker.structure(
radius_profile * planet_radius / rs)
phi_abs = radiative_processes_exact(
spectrum_at_planet,
(radius_profile * planet_radius * u.Rjup).to(u.cm).value,
rho_norm * rhos, f_r, h_fraction)
# We re-normalize key parameters because the newly-calculated f_ion
# changes the value of the mean molecular weight of the atmosphere
phi, k1, k2, r, dr, velocity, density = _normalize(
phi_abs, k1_abs, k2_abs, radius_profile, mu_bar)
if exact_phi:
_phi_prime_fun = interp1d(r, phi, fill_value="extrapolate")
else:
# Re-calculate the column densities
column_density = np.flip(np.cumsum(np.flip(dr * density *
(1 - f_r))))
tau = k1 * column_density
_tau_fun = interp1d(r, tau, fill_value="extrapolate")
# And solve it again
sol = solve_ivp(_fun, (r[0], r[-1],), np.array([initial_f_ion, ]),
t_eval=r, args=(phi, k2), **options_solve_ivp)
f_r = sol['y'][0]
# Raise an error if the length of `f_r` is different from the length
# of `r`
if len(f_r) != len(r):
raise RuntimeError('The solver ``solve_ivp`` failed to obtain a'
' solution.')
# Here we update the average mean molecular weight
mu_bar = parker.average_molecular_weight(f_r, radius_profile,
velocity,
planet_mass, temperature,
he_h_fraction)
# Calculate the relative change of f_ion in the outer shell of the
# atmosphere (where we expect the most important change)
# relative_delta_f = abs(f_r[-1] - previous_f_r_outer_layer) \
# / previous_f_r_outer_layer
relative_delta_f = abs(
np.sum(f_r - previous_f_r) / np.sum(previous_f_r))
# Break the loop if convergence is achieved
if relative_delta_f < convergence:
break
else:
pass
else:
pass
if return_mu is False:
return f_r
else:
return f_r, mu_bar
|
# Standard Library
import os
import re
import sys
# Functions for outputting message to stderr
def warning_message(message, newLine=True):
'''Output a warning message to stderr.'''
sys.stderr.write('WARNING: ' + message)
if newLine:
sys.stderr.write('\n')
def information_message(message, newLine=True):
'''Output an information message to stderr.'''
sys.stderr.write('INFO: ' + message)
if newLine:
sys.stderr.write('\n')
def error_message(message, newLine=True, terminate=True):
'''Output an error message to stderr.'''
global commandlineArguments
sys.stderr.write('ERROR: ' + message)
if newLine:
sys.stderr.write('\n')
if terminate:
sys.exit()
|
from abc import ABCMeta, abstractmethod # Only Python 2.6 and above
"""
Defining an abstract class which the controller
modules will implement, forcing them to override
all the abstract methods
"""
class ControllerModule(object):
__metaclass__ = ABCMeta
def __init__(self):
self.pendingCBT = {}
self.CBTMappings = {}
@abstractmethod
def initialize(self):
pass
@abstractmethod
def processCBT(self):
pass
@abstractmethod
def timer_method(self):
pass
@abstractmethod
def terminate(self):
pass
# Check if the given cbt is a request sent by the current module
# If yes, returns the source CBT for which the request has been
# created, else return None
def checkMapping(self, cbt):
for key in self.CBTMappings:
if(cbt.uid in self.CBTMappings[key]):
return key
return None
# For a given sourceCBT's uid, check if all requests are serviced
def allServicesCompleted(self, sourceCBT_uid):
requested_services = self.CBTMappings[sourceCBT_uid]
for service in requested_services:
if(service not in self.pendingCBT):
return False
return True
|
"""This pkg aims to implement serveral filtering methods for (un)directed
graphs.
Edge filtering methods allows to extract the backbone of a graph or sampling the
most important edges. You can use edge filtering methods as a preprocessing step
aiming to improve the performance/results of graph algorithms or to turn a graph
visualtzation more asthetic.
**See the example below for a simple usage of the package.**
```python
import networkx as nx
import edgeseraser as ee
g = nx.erdos_renyi_graph(100, 0.4)
ee.noise_score.filter_nx_graph(g)
g # filtered graph
```
## Available methods and details
| Method | Description | suitable for | limitations/restrictions/details |
| --- | --- |--- | --- |
| [Noise Score] | Filters edges with high noise score. Paper:[1]|Directed, Undirected, Weighted | Very good and fast! [4] |
| [Disparity] | Dirichlet process filter (stick-breaking) Paper:[2] | Directed, Undirected, Weighted |There are some criticism regarding the use in undirected graphs[3]|
| [Pólya-Urn]| Filters edges with Pólya-Urn method. Paper:[5]| Directed, Undirected, Integer Weighted||
[1]: https://arxiv.org/abs/1701.07336
[2]: https://arxiv.org/abs/0904.
[3]: https://arxiv.org/abs/2101.00863
[4]: https://www.michelecoscia.com/?p=1236
[5]: https://www.nature.com/articles/s41467-019-08667-3
[Noise Score]: https://devmessias.github.io/edgeseraser/api_docs/#edgeseraser.noise_score
[Disparity]: https://devmessias.github.io/edgeseraser/api_docs/#edgeseraser.disparity
[Pólya-Urn]: https://devmessias.github.io/edgeseraser/api_docs/#edgeseraser.polya
"""
__author__ = """Bruno Messias"""
__email__ = "[email protected]"
__version__ = "0.5.0"
|
# V0
class Solution:
# @param s, a string
# @return an integer
def numDecodings(self, s):
if s=="" or s[0]=='0': return 0
dp=[1,1]
for i in range(2,len(s)+1):
if 10 <=int(s[i-2:i]) <=26 and s[i-1]!='0':
dp.append(dp[i-1]+dp[i-2])
elif int(s[i-2:i])==10 or int(s[i-2:i])==20:
dp.append(dp[i-2])
elif s[i-1]!='0':
dp.append(dp[i-1])
else:
return 0
return dp[len(s)]
# V1
# https://blog.csdn.net/qian2729/article/details/50570960
# idea :
# dp[i]=⎧⎩⎨⎪⎪dp[i−1]+dp[i−2], 10 <=s[i-2:2]<=26 and s[i−2:2]!=[10 or 20]
# dp[i−2],s[i-2:i] = [10 or 20]
# dp[i−1],others
# DEMO
# my_s_list = ["22", "223", "2234", "22342", "223423", "2234234"]
# s = Solution()
# for i in my_s_list:
# output = s.numDecodings(i)
# print ('output :', output)
# dp : [1, 1, 2]
# output : 2
# dp : [1, 1, 2, 3]
# output : 3
# dp : [1, 1, 2, 3, 3]
# output : 3
# dp : [1, 1, 2, 3, 3, 3]
# output : 3
# dp : [1, 1, 2, 3, 3, 3, 6]
# output : 6
# dp : [1, 1, 2, 3, 3, 3, 6, 6]
# output : 6
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0 or s[0] == '0':
return 0
dp = [0] * (max(len(s) + 1,2))
dp[0],dp[1] = 1,1
for i in range(2,len(s) + 1):
if 10 <= int(s[i - 2:i]) <= 26 and s[i - 1] != '0':
dp[i] = dp[i - 1] + dp[i - 2]
elif int(s[i-2:i]) == 10 or int(s[i-2:i]) == 20:
dp[i] = dp[i - 2]
elif s[i-1] != '0':
dp[i] = dp[i - 1]
else:
return 0
return dp[len(s)]
# V1'
# https://www.jiuzhang.com/solution/decode-ways/#tag-highlight-lang-python
# IDEA : DP
class Solution:
# @param {string} s a string, encoded message
# @return {int} an integer, the number of ways decoding
def numDecodings(self, s):
if s == "" or s[0] == '0':
return 0
dp = [1, 1]
for i in range(2,len(s) + 1):
if 10 <= int(s[i - 2 : i]) <=26 and s[i - 1] != '0':
dp.append(dp[i - 1] + dp[i - 2])
elif int(s[i-2 : i]) == 10 or int(s[i - 2 : i]) == 20:
dp.append(dp[i - 2])
elif s[i-1] != '0':
dp.append(dp[i-1])
else:
return 0
return dp[len(s)]
# V2
# Time: O(n)
# Space: O(1)
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0 or s[0] == '0':
return 0
prev, prev_prev = 1, 0
for i in range(len(s)):
cur = 0
if s[i] != '0':
cur = prev
if i > 0 and (s[i - 1] == '1' or (s[i - 1] == '2' and s[i] <= '6')):
cur += prev_prev
prev, prev_prev = cur, prev
return prev
|
#Number Guessing Game Objectives:
# Include an ASCII art logo.
# Allow the player to submit a guess for a number between 1 and 100.
# Check user's guess against actual answer. Print "Too high." or "Too low." depending on the user's answer.
# If they got the answer correct, show the actual answer to the player.
# Track the number of turns remaining.
# If they run out of turns, provide feedback to the player.
# Include two different difficulty levels (e.g., 10 guesses in easy mode, only 5 guesses in hard mode).
from random import randint
from art import logo
EASY_LEVEL_TURNS = 10
HARD_LEVEL_TURNS = 5
def check_answer(guess, answer, turns):
"""Checks answer against guess. Returns the number of turns remaining."""
if guess > answer:
print(" Too high.")
return turns -1
elif guess < answer:
print(" Too low.")
return turns -1
else:
print(f" You got it! The answer was {answer}.")
def set_difficulty():
level = input("Choos a difficulty. Type 'easy' or 'hard': ")
if level == "easy":
return EASY_LEVEL_TURNS
else:
return HARD_LEVEL_TURNS
def game():
print(logo)
print("Welcome to the Number Guessing Game!")
print("I'm thinking of a number between 1 and 100.")
answer = randint(1, 100)
#print(f"Debugger -- the correct answer is {answer}")
turns = set_difficulty()
guess = 0
while guess != answer:
print(f"You have {turns} attempts remaining to guess the number.")
guess = int(input("Make a guess: "))
turns = check_answer(guess, answer, turns)
if turns == 0:
print("You've run out of guesses. You lose.")
return
elif guess != answer:
print(" Guess again.")
game()
|
class StopAt:
""" Represents the Stop At CLI parameter """
def check(self, context):
""" Return if this CLI should be used """
return context.config.stopAt is not None
def build(self, context):
""" Return the string parameters to add to the command string """
return ["--stop-at", context.config.stopAt] |
from Gaudi.Configuration import *
from Configurables import k4DataSvc, TestE4H2L, EDM4hep2LcioTool, Lcio2EDM4hepTool
algList = []
END_TAG = "END_TAG"
evtsvc = k4DataSvc('EventDataSvc')
# EDM4hep2lcio Tool
edmConvTool = EDM4hep2LcioTool("EDM4hep2lcio")
edmConvTool.Parameters = [
"E4H_CaloHitCollection", "LCIO_CaloHitCollection",
"E4H_RawCaloHitCollection", "LCIO_RawCaloHitCollection",
"E4H_TPCHitCollection", "LCIO_TPCHitCollection",
"E4H_TrackCollection", "LCIO_TrackCollection",
"E4H_SimTrackerHitCollection", "LCIO_SimTrackerHitCollection",
"E4H_TrackerHitCollection", "LCIO_TrackerHitCollection",
"E4H_MCParticleCollection", "LCIO_MCParticleCollection",
"E4H_SimCaloHitCollection", "LCIO_SimCaloHitCollection"
]
# LCIO2EDM4hep Tool
lcioConvTool = Lcio2EDM4hepTool("Lcio2EDM4hep")
lcioConvTool.Parameters = [
"LCIO_CaloHitCollection", "E4H_CaloHitCollection_conv",
# "LCIO_TrackerHitCollection", "E4H_TrackerHitCollection_conv",
"LCIO_SimTrackerHitCollection", "E4H_SimTrackerHitCollection_conv",
"LCIO_TrackCollection", "E4H_TrackCollection_conv",
"LCIO_MCParticleCollection", "E4H_MCParticleCollection_conv",
"LCIO_SimCaloHitCollection", "E4H_SimCaloHitCollection_conv"
]
TestConversion = TestE4H2L("TestConversion")
TestConversion.EDM4hep2LcioTool=edmConvTool
TestConversion.Lcio2EDM4hepTool=lcioConvTool
# Output_DST = MarlinProcessorWrapper("Output_DST")
# Output_DST.OutputLevel = WARNING
# Output_DST.ProcessorType = "LCIOOutputProcessor"
# Output_DST.Parameters = [
# "DropCollectionNames", END_TAG,
# "DropCollectionTypes", "MCParticle", "LCRelation", "SimCalorimeterHit", "CalorimeterHit", "SimTrackerHit", "TrackerHit", "TrackerHitPlane", "Track", "ReconstructedParticle", "LCFloatVec", "Clusters", END_TAG,
# "FullSubsetCollections", "EfficientMCParticles", "InefficientMCParticles", "MCPhysicsParticles", END_TAG,
# "KeepCollectionNames", "MCParticlesSkimmed", "MCPhysicsParticles", "RecoMCTruthLink", "SiTracks", "SiTracks_Refitted", "PandoraClusters", "PandoraPFOs", "SelectedPandoraPFOs", "LooseSelectedPandoraPFOs", "TightSelectedPandoraPFOs", "LE_SelectedPandoraPFOs", "LE_LooseSelectedPandoraPFOs", "LE_TightSelectedPandoraPFOs", "LumiCalClusters", "LumiCalRecoParticles", "BeamCalClusters", "BeamCalRecoParticles", "MergedRecoParticles", "MergedClusters", "RefinedVertexJets", "RefinedVertexJets_rel", "RefinedVertexJets_vtx", "RefinedVertexJets_vtx_RP", "BuildUpVertices", "BuildUpVertices_res", "BuildUpVertices_RP", "BuildUpVertices_res_RP", "BuildUpVertices_V0", "BuildUpVertices_V0_res", "BuildUpVertices_V0_RP", "BuildUpVertices_V0_res_RP", "PrimaryVertices", "PrimaryVertices_res", "PrimaryVertices_RP", "PrimaryVertices_res_RP", "RefinedVertices", "RefinedVertices_RP", "PFOsFromJets", END_TAG,
# "LCIOOutputFile", "Output_DST.slcio", END_TAG,
# "LCIOWriteMode", "WRITE_NEW", END_TAG
# ]
# from Configurables import PodioOutput
# out = PodioOutput("PodioOutput", filename = "output_k4SimDelphes.root")
# out.outputCommands = ["keep *"]
algList.append(TestConversion)
# algList.append(Output_DST)
# algList.append(out)
from Configurables import ApplicationMgr
ApplicationMgr( TopAlg = algList,
EvtSel = 'NONE',
EvtMax = 1,
ExtSvc = [evtsvc],
OutputLevel=DEBUG
)
|
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('ipl_predictor.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
stads = {'Rajiv Gandhi International Stadium, Uppal': 0, 'Maharashtra Cricket Association Stadium': 1, 'Saurashtra Cricket Association Stadium': 2, 'Holkar Cricket Stadium': 3, 'M Chinnaswamy Stadium': 4, 'Wankhede Stadium': 5, 'Eden Gardens': 6, 'Feroz Shah Kotla': 7, 'Punjab Cricket Association IS Bindra Stadium, Mohali': 8, 'Green Park': 9, 'Punjab Cricket Association Stadium, Mohali': 10, 'Sawai Mansingh Stadium': 11, 'MA Chidambaram Stadium, Chepauk': 12, 'Dr DY Patil Sports Academy': 13, 'Newlands': 14, "St George's Park": 15, 'Kingsmead': 16, 'SuperSport Park': 17, 'Buffalo Park': 18, 'New Wanderers Stadium': 19, 'De Beers Diamond Oval': 20, 'OUTsurance Oval': 21,
'Brabourne Stadium': 22, 'Sardar Patel Stadium, Motera': 23, 'Barabati Stadium': 24, 'Vidarbha Cricket Association Stadium, Jamtha': 25, 'Himachal Pradesh Cricket Association Stadium': 26, 'Nehru Stadium': 27, 'Dr. Y.S. Rajasekhara Reddy ACA-VDCA Cricket Stadium': 28, 'Subrata Roy Sahara Stadium': 29, 'Shaheed Veer Narayan Singh International Stadium': 30, 'JSCA International Stadium Complex': 31, 'Sheikh Zayed Stadium': 32, 'Sharjah Cricket Stadium': 33, 'Dubai International Cricket Stadium': 34, 'M. A. Chidambaram Stadium': 35, 'Feroz Shah Kotla Ground': 36, 'M. Chinnaswamy Stadium': 37, 'Rajiv Gandhi Intl. Cricket Stadium': 38, 'IS Bindra Stadium': 39, 'ACA-VDCA Stadium': 40}
toss = {'field': 0, 'bat': 1}
teams = {'MI': 1,
'KKR': 2,
'RCB': 3,
'DC': 4,
'CSK': 5,
'RR': 6,
'DCS': 7,
'GL': 8,
'KXIP': 9,
'SRH': 10,
'RPS': 11,
'KTK': 12,
'PW': 13,
'tie': 14,
}
team1 = request.form['tem1']
team2 = request.form['tem2']
toss_winner = request.form['win']
toss_decision = request.form['toss']
venue = request.form['venue']
int_features = [teams[team1],teams[team2],teams[toss_winner],toss[toss_decision],stads[venue]]
# print(int_features)
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = prediction[0]
outs = list(teams.keys())[list(teams.values()).index(output)]
return render_template('index.html', prediction_text='The winner will be {}'.format(outs))
if __name__ == "__main__":
app.run(debug=True)
|
abc=1,2,3,4 #by default it is taken as tuple
print(type(abc))
print(abc)
tupl1=('a','xyz',10.90)
print(tupl1)
tup=()
print(tup)
mtup=(1,)
print(mtup)
mtup1=(1,2,3,4,"abc")
mtup2=mtup1 # copying of tuples
print(mtup2)
print(mtup2[0]) #print the first element
print(mtup2[-1]) #print the last element
print(mtup2[:]) #print all the elements using slicing
print(mtup2[1:])
print(mtup2[1:3])
print(mtup2[1:-2])
mytuple1=(1,2,3,4,"SWE",2,0)
mytuple2=(23,4343,555,"AWE")
print(mytuple1+mytuple2)#merge two tuples using + operator
mytuple3=(1,2,3,'Hello'*3)
print(mytuple3)
mytuple3[0]=99 # cannot modify as tuples ar immutable
del(tupl1) # can delete the whole tuple
print(tupl1)
print("Length of tuple:",len(mytuple3)) |
from pprint import pformat
import logging
import os
def configure_root_logger():
"""
i prefer to use this instead of logging.basicConfig because this is more flexiable
e.g. in term of controlling encoding of the log file
"""
module_name=None # root
log_level = logging.DEBUG # default of level for root is WARNING
formatter = 'logger name : %(name)s , %(levelname)s , func : %(funcName)s , %(message)s , module : %(module)s ,line : %(lineno)d , %(asctime)s'
# no need to use the return , it is accessed by logging
get_logger_with_file_handler(module_name,log_level,formatter)
def get_logger_with_file_handler(module_name : str, log_level : int,formatter : str,)->logging.Logger:
"""
get logger with file handler
the log file is the same as the module name with extension .log
Args:
module_name (str): pass here__name__
log_level (int): pass here e.g. logger.DEBUG
formatter (str): pass here e.g. 'logger name : %(name)s , %(levelname)s,
func : %(funcName)s , %(message)s , module : %(module)s ,
line : %(lineno)d , %(asctime)s'
Returns:
logging.Logger: [description]
"""
logger = logging.getLogger(module_name) # use with module_name
logger.setLevel(log_level) # e.g logging.DEBUG
if module_name == None:
file_name = 'root'
else:
file_name = module_name
log_path = os.path.join('logs',f'{file_name}.log')
file_handler = logging.FileHandler(filename=log_path,encoding='utf-8')
oFormatter = logging.Formatter(formatter)
file_handler.setFormatter(oFormatter)
logger.addHandler(file_handler)
return logger
SEP = '\n'
def args_to_string(**kwargs)->str:
args_as_string=SEP
index=0
for key,value in kwargs.items():
formated_value = pformat(value)
args_as_string += f'arg{index} {key} : {formated_value}{SEP}'
index += 1
return args_as_string
|
# Lesson3: List is mutable
# source: code/list_is_mutable.py
original_list = list(range(1, 6))
print(original_list)
new_list = original_list
new_list[2] = 128
print(original_list)
print(new_list) |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import sys
sys.path.append('..')
import numpy as np
from tk.TKGame import TKGame
from tk.keras.NNet import NNetWrapper as NNet
from tk.test.testTKLogick import generate_encoded_state
class TestNNet(unittest.TestCase): #TODO: rename to testTKLogick
def setUp(self):
self.g = TKGame()
self.n1 = NNet(self.g)
self.n1.load_checkpoint('temp/','best.pth.tar')
self.n1.nnet.model._make_predict_function()
def tearDown(self):
self.g = None
self.n1 = None
def testNNOutputs(self):
state = [9, 9, 9, 9, 9, 9, 9, 9, 9, 1, 10, 10, 10, 10, 10, 10, 10, 10, 0, 0, None, None]
encoded_state = generate_encoded_state(state)
canonical_form = self.g.getCanonicalForm(encoded_state, -1)
prediction = self.n1.predict(canonical_form)
action = np.argmax(prediction[0])
# print(canonical_form)
# print(prediction)
self.assertEqual(action,10)
if __name__ == '__main__':
unittest.main() |
# Generated by Django 4.0.3 on 2022-03-22 15:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bank_account', '0005_bankaccount_created_at'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='recipient_bank_account',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='recipient_bank_account', to='bank_account.bankaccount', verbose_name='Recipient bank account'),
),
]
|
from scrapli.driver import GenericDriver
from scrapli.driver.core import IOSXEDriver
from scrapli.response import Response
from nornir_scrapli.exceptions import NornirScrapliNoConfigModeGenericDriver
def test_send_configs(nornir, monkeypatch):
from nornir_scrapli.tasks import send_configs
def mock_open(cls):
pass
def mock_send_configs(
cls,
configs,
strip_prompt,
failed_when_contains="",
stop_on_failed=False,
privilege_level="",
timeout_ops=None,
):
responses = []
response = Response(host="fake_as_heck", channel_input=configs[0])
response._record_response(b"")
responses.append(response)
response = Response(host="fake_as_heck", channel_input=configs[1])
response._record_response(b"")
responses.append(response)
return responses
monkeypatch.setattr(IOSXEDriver, "open", mock_open)
monkeypatch.setattr(IOSXEDriver, "send_configs", mock_send_configs)
result = nornir.run(task=send_configs, configs=["interface loopback123", "description neat"])
assert result["sea-ios-1"][0].result == "interface loopback123\ndescription neat\n"
assert result["sea-ios-1"].failed is False
assert result["sea-ios-1"].changed is True
def test_send_configs_dry_run(nornir, monkeypatch):
from nornir_scrapli.tasks import send_configs
def mock_open(cls):
pass
def mock_acquire_priv(cls, priv):
return
monkeypatch.setattr(IOSXEDriver, "open", mock_open)
monkeypatch.setattr(IOSXEDriver, "acquire_priv", mock_acquire_priv)
result = nornir.run(
task=send_configs,
dry_run=True,
configs=["interface loopback123", "description neat"],
)
assert result["sea-ios-1"].result is None
assert result["sea-ios-1"].failed is False
assert result["sea-ios-1"].changed is False
def test_send_configs_generic_driver(nornir_generic, monkeypatch):
from nornir_scrapli.tasks import send_configs
def mock_open(cls):
pass
monkeypatch.setattr(GenericDriver, "open", mock_open)
result = nornir_generic.run(
task=send_configs,
dry_run=True,
configs=["interface loopback123", "description neat"],
)
assert (
"nornir_scrapli.exceptions.NornirScrapliNoConfigModeGenericDriver"
in result["sea-ios-1"].result
)
assert result["sea-ios-1"].failed is True
assert result["sea-ios-1"].changed is False
assert isinstance(result["sea-ios-1"].exception, NornirScrapliNoConfigModeGenericDriver)
|
import numpy as np
import pdb
def trim(w, wvs):
if w not in wvs:
if w[:-1] in wvs: return w[:-1]
elif w.replace('-','') in wvs: return w.replace('-','')
elif w[:-4] + 'lize' in wvs: return w[:-4] + 'lize'
return w
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
#def readDataset(filename, wvs=[], no_skip=False, sort=True, printout=False):
def readDataset(filename, no_skip=False, sort=True, printout=False):
pairs = []
scores = []
with open(filename) as f:
lines = f.readlines()
for line in lines:
split = line.split()
#if no_skip or not wvs or (split[0].lower() in wvs and split[1].lower() in wvs):
pairs.append([ split[0].lower(), split[1].lower(), float(split[2]) ])
if sort:
pairs = sorted(pairs, key=lambda x: x[2])
if printout:
for pair in pairs:
print(" %+14s %+14s : %.2f" % (pair[0], pair[1], pair[2]))
for pair in pairs:
scores.append(pair[2])
return (pairs, scores)
def readWordVecs(filename):
wvs = {}
with open(filename) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line != '':
split = line.split()
word = split[0]
vec = []
#try:
wvs[word] = np.array([float(split[i]) for i in range(1, len(split))], dtype='float64')
#except:
# load word vector error, replace with zeros
#continue
#wvs[word] = np.array([float(0.0) for i in range(1, len(split))], dtype='float64')
#pdb.set_trace()
#print('error load word vector')
return wvs
def readWordVecsList(filename):
words = []
vecs = []
with open(filename) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line != '':
split = line.split()
words.append(split[0])
vec = []
for i in range(1, len(split)):
vec.append(float(split[i]))
vecs.append(np.array(vec, dtype='float64'))
vecs = np.asarray(vecs, dtype='float64')
return (words, vecs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.