max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
haptools/data/__init__.py | aryarm/admixtools | 0 | 12793951 | <gh_stars>0
from .data import Data
from .genotypes import Genotypes, GenotypesRefAlt
from .phenotypes import Phenotypes
from .covariates import Covariates
from .haplotypes import Extra, Variant, Haplotype, Haplotypes
| 0.984375 | 1 |
src/utils/utils.py | dsikar/sdsandbox | 1 | 12793952 | <filename>src/utils/utils.py<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[40]:
def sort_unity_files(path, mask):
"""
Create a sorted dictionary from unity (SDSandbox) files e.g.
C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\0_cam-image_array_.jpg
Where the key in example above is 0 (first characters before underscore in 0_cam-image_array_.jpg)
Parameters
----------
path : string
path to files
mask : string
file type
Returns
-------
fdict: dictionary
Sorted dictionary containing key and file path
Example
-------
path = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\'
mask = '*.jpg'
fdict = sort_unity_files(path, mask)
for key in sorted(fdict):
print("key: {}, value:{}".format(key,fdict[key]))
Note
-------
File path format is OS dependant. OrderedDict must by sorted to order files in the right order.
"""
import fnmatch
import os
from collections import OrderedDict
#path = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\'
#mask = '*.jpg'
filemask = os.path.expanduser(path + mask)
path, mask = os.path.split(filemask)
fdict = OrderedDict()
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, mask):
fdict[int(filename.split('_')[0])] = os.path.join(root, filename)
print("Use sorted() function in your for loop to sort the output of this sort_unity_files().")
return fdict
# In[41]:
def overlay_imgs(s_img, l_img, x_offset=50, y_offset=50):
"""
Overlay two numpy array images
Parameters
----------
s_img: numpy array, small image
l_img: numpy array, large image
x_offset: left padding from large to small overlaid image
y_offset: top padding from large to small overlaid image
Returns
-------
image_arr: numpy array containing large image with insert
of small image inlaid
Example
--------
"""
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
return l_img
# In[42]:
def plot_img_hist(img, scheme='rgb'):
"""
Plot histogram for an rgb array
Parameters
-------
img: numpy array
scheme: string, 'rgb' (default) or , 'yuv-rgb'
If scheme is rgb, maximum number of values in a bins is expected to 3 digit, otherwise
6 digits and y-axys is plotted on log scale.
Returns
-------
fig: matplotlib.pyplot figure
Example
-------
import cv2
import numpy as np
import matplotlib.pyplot as plt
ipath = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\12893_cam-image_array_.jpg'
img1 = cv2.imread(ipath) # 120x160x3
plt.rcParams["figure.figsize"] = (6,4)
myfig = plot_img_hist(img)
"""
# from https://discuss.pytorch.org/t/plot-a-histogram-for-multiple-images-full-dataset/67600
# https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
nb_bins = 256
count_r = np.zeros(nb_bins)
count_g = np.zeros(nb_bins)
count_b = np.zeros(nb_bins)
#img = Image.open('16_left-2.jpeg')
# Calculate manual hist
x = np.array(img)
x = x.transpose(2, 0, 1)
hist_r = np.histogram(x[0], bins=nb_bins, range=[0, 255])
hist_g = np.histogram(x[1], bins=nb_bins, range=[0, 255])
hist_b = np.histogram(x[2], bins=nb_bins, range=[0, 255])
count_r = hist_r[0]
count_g = hist_g[0]
count_b = hist_b[0]
# Plot manual
bins = hist_r[1]
fig = plt.figure()
plt.bar(bins[:-1], count_r, color='r', alpha=0.5)
plt.bar(bins[:-1], count_g, color='g', alpha=0.5)
plt.bar(bins[:-1], count_b, color='b', alpha=0.5)
return fig
# In[43]:
# fpath = fdict[key]
def get_sdsandbox_json_steer_angle(fpath):
"""
Get steering angle stored in json file.
The argument passed in the path for a file, that was stored with a corresponding json file
containing a steering angle, looks something like:
C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\ \
12893_cam-image_array_.jpg
The json file with with steering angle, in the same path, will be named record_12893.json
We open that file and return the steering angle.
Parameters
-------
fpath: string, filepath
Returns
-------
st_angle: steering angle
Example
-------
fpath = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\12893_cam-image_array_.jpg'
jsa = get_sdsandbox_json_steer_angle(fpath)
print(jsa)
"""
import json
# split string
fsplit = fpath.split('\\')
# get name e.g. 12893_cam-image_array_.jpg
fname = fsplit[-1]
# get number e.g. 12893
fnumber = fsplit[-1].split('_')
fnumber = fnumber[0]
# build json file name e.g. record_12893.json
fname = 'record_' + fnumber + '.json'
# build file path e.g. 'C:\Users\aczd097\Downloads\dataset\unity\log_sample\logs_Mon_Jul_13_08_29_01_2020\record_12893.json'
idx = fpath.rindex('\\') + 1
fname = fpath[0:idx] + fname
# open and read file
f = open(fname, "r")
file = f.read()
# load json
fjson = json.loads(file)
# get and return steering angle attribute
st_angle = fjson['user/angle']
return st_angle
# In[ ]:
def overlay_imgs(s_img, l_img, x_offset=50, y_offset=50):
"""
Overlay two numpy array images
Parameters
----------
s_img: numpy array, small image
l_img: numpy array, large image
x_offset: left padding from large to small overlaid image
y_offset: top padding from large to small overlaid image
Returns
-------
image_arr: numpy array containing large image with insert
of small image inlaid
Example
--------
"""
#import cv2
#s_img = cv2.imread("smaller_image.png")
#l_img = cv2.imread("larger_image.jpg")
# x_offset=y_offset=50
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
return l_img
# In[ ]:
# TODO, need to bring this from existing module
def add_rain(image_arr, rt=None, st=0):
"""
Add rain to image
Parameters
----------
image_arr: numpy array containing image
rt: string, rain type "heavy" or "torrential"
st: range to draw a random slant from
Returns
-------
image_arr: numpy array containing image with rain
Example
--------
"""
import Automold as am
# print("Adding rain...")
if(st != 0):
# draw a random number for slant
st = np.random.randint(-1 * st, st)
if(rt!='light'): # heavy or torrential
image_arr = am.add_rain_single(image_arr, rain_type=rt, slant=st)
else:
# no slant
image_arr = am.add_rain_single(image_arr)
return image_arr
# In[52]:
def make_video(fdict, model, preproc=False):
"""
Make video from image dictionary.
video.avi is written to disk
Parameters
-------
fdict: collections.OrderedDict, ordered dictionary of file names
model: string, model name
preproc: boolean, show preprocessed image next to original
Returns
none
Example
-------
path = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\'
mask = '*.jpg'
fdict = sort_unity_files(path, mask)
model = 'nvidia2'
make_video(fdict, model, True) # saved as nvidia2.avi
"""
import os
import sys
# append local path so we can make use
# of locally defined modules
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import argparse
import fnmatch
import json
import os
from io import BytesIO
from PIL import Image
import base64
import numpy as np
import matplotlib.pyplot as plt
import Augment_cls as Augmentation
import cv2
import conf
# instantiate augmentation class
ag = Augmentation.Augment_cls(model)
# video name
video_name = model + '.avi'
VIDEO_WIDTH, VIDEO_HEIGHT = 800, 600
IMAGE_WIDTH, IMAGE_HEIGHT = 800, 600
if(preproc == True): # wide angle
VIDEO_WIDTH = IMAGE_WIDTH*2
video = cv2.VideoWriter(video_name, 0, 11, (VIDEO_WIDTH, VIDEO_HEIGHT)) # assumed 11fps
# font
font = cv2.FONT_HERSHEY_SIMPLEX
# frame count
fno = 1
try:
for key in sorted(fdict):
image = cv2.imread(fdict[key]) # 120x160x3
# get histogram
myfig = plot_img_hist(image, 'rgb')
myfig.savefig("temp_plot.png")
image2 = cv2.imread("temp_plot.png")
# save
plt.close(myfig)
image_copy = image
# resize so we can write some info onto image
image = cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA)
# add Info to frame
cv2.putText(image, model, (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
# overlay histogram
image = overlay_imgs(image2, image)
pst = get_sdsandbox_json_steer_angle(fdict[key])
pst *= conf.norm_const
simst = "Frame: {}, Actual steering angle: {:.2f}".format(str(fno), pst)
cv2.putText(image, simst, (50, 115), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
# create a preprocessed copy to compare what simulator generates to what network "sees"
if (preproc == True): # wide angle
image2 = ag.preprocess(image_copy)
image2 = cv2.resize(image2, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA)
cv2.putText(image2, 'Network Image', (50, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
# histogram on network image
myfig = plot_img_hist(image, 'yuv-rgb')
myfig.savefig("temp_plot.png")
image4 = cv2.imread("temp_plot.png")
# save
plt.close(myfig)
# overlay
image2 = overlay_imgs(image4, image2)
# concatenate
if (preproc == True): # wide angle
cimgs = np.concatenate((image, image2), axis=1)
image = cimgs
# write to video
video.write(image);
# increment frame counter
fno = fno + 1
except Exception as e:
print("Exception raise: " + str(e))
cv2.destroyAllWindows()
video.release()
# subset 100 images - should be quicker
#path = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\subset_100\\'
#mask = '*.jpg'
#fdict = sort_unity_files(path, mask)
#model = 'nvidia2'
#make_video(fdict, model, True) # saved as nvidia2.avi
def plot_img_hist(img, scheme='rgb'):
"""
Plot histogram for an rgb array
Parameters
-------
img: numpy array
scheme: string, 'rgb' (default) or , 'yuv-rgb'
If scheme is rgb, maximum number of values in a bins is expected to 3 digit, otherwise
6 digits and y-axys is plotted on log scale.
Returns
-------
fig: matplotlib.pyplot figure
Example
-------
import cv2
import numpy as np
import matplotlib.pyplot as plt
ipath = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\logs_Mon_Jul_13_08_29_01_2020\\12893_cam-image_array_.jpg'
img1 = cv2.imread(ipath) # 120x160x3
plt.rcParams["figure.figsize"] = (6,4)
myfig = plot_img_hist(img)
"""
# from https://discuss.pytorch.org/t/plot-a-histogram-for-multiple-images-full-dataset/67600
# https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
nb_bins = 256
count_r = np.zeros(nb_bins)
count_g = np.zeros(nb_bins)
count_b = np.zeros(nb_bins)
# Calculate manual hist
x = np.array(img)
x = x.transpose(2, 0, 1)
hist_r = np.histogram(x[0], bins=nb_bins, range=[0, 255])
hist_g = np.histogram(x[1], bins=nb_bins, range=[0, 255])
hist_b = np.histogram(x[2], bins=nb_bins, range=[0, 255])
count_r = hist_r[0]
count_g = hist_g[0]
count_b = hist_b[0]
# Plot manual
bins = hist_r[1]
fig = plt.figure()
# figure()
#plt.yscale('log')
Rmean = "{:.2f}".format(np.mean(x[0]))
plt.bar(bins[:-1], count_r, color='r', alpha=0.5, label="red (mean = " + Rmean + ")")
Gmean = "{:.2f}".format(np.mean(x[1]))
plt.bar(bins[:-1], count_g, color='g', alpha=0.45, label="green (mean = " + Gmean + ")")
Bmean = "{:.2f}".format(np.mean(x[2]))
plt.bar(bins[:-1], count_b, color='b', alpha=0.4, label="blue (mean = " + Bmean + ")")
# show labels
plt.legend(loc='upper right')
plt.xlabel("Bins")
plt.xticks(np.arange(0, 255, step=25))
plt.ylabel("Pixels")
RGBmean = "{:.2f}".format(np.mean(x))
plt.title("RGB intensity value distributions (mean = " + RGBmean + ")")
# add a grid
plt.grid()
# make y scale logarithmic
# plt.yscale('log', nonposy='clip')
# set y limit, may need to change
# No plotting max for one off images
#ymax = 10000
#plt.ylim(0, ymax)
plt.savefig("temp_plot.jpg")
plt.close(fig)
#return fig
# change rgb values
# https://stackoverflow.com/questions/59320564/how-to-access-and-change-color-channels-using-pil
def changeRGB(img, rv=0, gv=0, bv=0):
"""
Change RGB values using PIL
Parameters
-------
img: uint8 numpy image array
rv: integer, value to be added to red channel
gv: integer, value to be added to green channel
bv, integer, value to be added to blue channel
Output
-------
myimg: uint8 numpy image array
Example
-------
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('steph.jpeg')
myimg = changeRGB(img, 60, 0, 0)
plt.imshow(myimg)
"""
from PIL import Image
import numpy as np
im = Image.fromarray(np.uint8(img))
# Split into 3 channels
r, g, b = im.split()
# Red
r = r.point(lambda i: i + rv)
# Green
g = g.point(lambda i: i + gv)
# Blue
b = b.point(lambda i: i + bv)
# Recombine back to RGB image
result = Image.merge('RGB', (r, g, b))
# Convert to uint8 numpy array
myimg = np.asarray(result)
return myimg
# subset 100 images - should be quicker
#path = 'C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\subset_100\\'
#mask = '*.jpg'
#fdict = sort_unity_files(path, mask)
#model = 'nvidia2'
#make_video(fdict, model, True) # saved as nvidia2.avi
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Make Video script')
parser.add_argument('--filepath', type=str, help='tcpflow log')
parser.add_argument('--model', type=str, help='model name for video label')
parser.add_argument('--mask', type=str, help='image file suffix')
args = parser.parse_args()
#fdict = sort_unity_files(args.filepath, args.mask)
#make_video(fdict, model, True) # saved as nvidia2.avi
#make_video(args.filepath, args.model, True)
# example
# python utils.py --filepath=C:\\Users\\aczd097\\Downloads\\dataset\\unity\\log_sample\\subset_100\\ \
# --model=nvidia2 --mask=*.jpg
# In[ ]:
| 3.046875 | 3 |
a10_neutron_lbaas/tests/unit/test_a10_config.py | hthompson6/a10-neutron-lbaas | 10 | 12793953 | # Copyright 2014, <NAME> (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from a10_neutron_lbaas.tests.unit import test_base
class TestA10Config(test_base.UnitTestBase):
def test_verify_appliances(self):
self.assertTrue(self.a.config.get('verify_appliances'))
def test_num_appliances(self):
# Everytime we update the test config, this test has to be updated
# A better test would seem to be be parsnig the JSON structure found in the file
# and comparing that against what we get in devices.
# This actually tests the number of devices with status == True
self.assertEqual(10, len(self.a.config.get_devices()))
def test_expected_ports(self):
self.assertEqual(8443, self.a.config.get_device('ax1')['port'])
self.assertEqual(80, self.a.config.get_device('ax3')['port'])
self.assertEqual(443, self.a.config.get_device('ax4')['port'])
def test_expected_protocols(self):
self.assertEqual('https', self.a.config.get_device('ax1')['protocol'])
self.assertEqual('http', self.a.config.get_device('ax3')['protocol'])
self.assertEqual('https', self.a.config.get_device('ax4')['protocol'])
def test_v_method(self):
for k, v in self.a.config.get_devices().items():
self.assertEqual('LSI', v['v_method'].upper())
def test_alternate_shared_partition(self):
self.assertTrue(self.a.config.get_device('axadp-alt')['shared_partition'])
def test_ip_in_ip(self):
expected = True
actual = False
for k, v in self.a.config.get_devices().items():
if "ip_in_ip" in v:
actual = v['ip_in_ip']
self.assertEqual(expected, actual)
# TODO(dougwig) -- test new a10_config members
# def test_image_defaults(self):
# self.assertIsNotNone(self.a.config.image_defaults)
# def test_image_defaults_members(self):
# image_defaults = self.a.config.image_defaults
# actual = image_defaults.keys()
# expected = ["name", "id", "visibility", "tags", "min_disk",
# "min_ram", "container_format", "protected",
# "properties", "disk_format"]
# self.assertListEqual(sorted(expected), sorted(actual))
# def test_instance_defaults(self):
# self.assertIsNotNone(self.a.config.instance_defaults)
def test_backwards_compat(self):
self.assertEqual(self.a.config.get_devices(), self.a.config.devices)
self.assertEqual(self.a.config.get_devices(), self.a.config.config.devices)
self.assertEqual(self.a.config.get(
'database_connection'), self.a.config.database_connection)
self.assertEqual(self.a.config.get('use_database'), self.a.config.use_database)
self.assertEqual(self.a.config.get('verify_appliances'), self.a.config.verify_appliances)
self.assertEqual(self.a.config.get(
'database_connection'), self.a.config.config.database_connection)
self.assertEqual(self.a.config.get('use_database'), self.a.config.config.use_database)
self.assertEqual(self.a.config.get(
'verify_appliances'), self.a.config.config.verify_appliances)
self.assertEqual(self.a.config.get(
'vport_defaults'), self.a.config.get_vport_defaults())
class TestA10ConfigProvider(test_base.UnitTestBase):
def setUp(self):
super(TestA10ConfigProvider, self).setUp({'provider': 'prov1'})
def test_top_level(self):
self.assertEqual(self.a.config.get('who_should_win'), 'the-doctor')
self.assertEqual(self.a.config.get('best_spaceship'), 'tardis')
def test_vthunder_api_version(self):
v = self.a.config.get_vthunder_config()
self.assertEqual(v['api_version'], '9.9')
self.assertEqual(v['nova_flavor'], 'acos.min')
| 2.203125 | 2 |
overhave/cli/db/regular.py | TinkoffCreditSystems/overhave | 33 | 12793954 | <reponame>TinkoffCreditSystems/overhave
import click
import sqlalchemy_utils as sau
from alembic.config import Config
from sqlalchemy.engine.url import URL
from sqlalchemy.exc import OperationalError
from overhave import db as database
from overhave.base_settings import DataBaseSettings
def _create_all(config: Config) -> None:
click.echo("Creating...")
config.attributes["metadata"].create_all()
click.secho("Completed.", fg="green")
@click.command(short_help="Create all metadata tables")
@click.pass_obj
def create_all(config: Config) -> None:
""" Create all metadata tables. """
_create_all(config)
def _drop_all(config: Config) -> None:
click.echo("Dropping...")
meta = config.attributes["metadata"]
engine = config.attributes["engine"]
for table in meta.tables:
engine.execute(f'DROP TABLE IF EXISTS "{table}" CASCADE')
engine.execute("DROP TABLE IF EXISTS alembic_version")
engine.execute("DROP SCHEMA IF EXISTS huey")
meta.drop_all()
click.secho("Completed.", fg="green")
@click.command(short_help="Drop all metadata tables, attributes, schema")
@click.pass_obj
def drop_all(config: Config) -> None:
""" Drop all metadata tables, attributes, schema. """
click.confirm("Does it really need?", abort=True)
_drop_all(config)
def _ensure_database_exists(db_url: URL) -> None:
try:
if not sau.database_exists(db_url):
sau.create_database(db_url)
except OperationalError as e:
click.echo(e)
click.echo("Catched error when trying to check database existence!")
def set_config_to_context(context: click.Context, settings: DataBaseSettings) -> None:
""" Set Alembic config to Click context for easy operations and migrations ability. """
_ensure_database_exists(settings.db_url)
settings.setup_db()
config = Config()
config.attributes["engine"] = settings.create_engine()
config.attributes["metadata"] = database.metadata
context.obj = config
| 2.140625 | 2 |
basta/migrations/0018_initialize_categories.py | lorenzosp93/basta_app | 1 | 12793955 | <filename>basta/migrations/0018_initialize_categories.py
from django.db import migrations, transaction, IntegrityError
from django.utils.translation import gettext_lazy as _
CATEGORIES = (
('elements', _('Chemical element')),
('petnames', _('Pet name')),
('transportation', _('Means of transportation')),
('furniture', _('Furniture')),
('disease', _('Disease / Illness')),
('genres', _('Musical genre')),
('villains', _('Villain')),
('excuses', _('Excuses not to go to a party')),
('reasonsquitjob',_('Reasons to quit your job')),
('doatdate', _('Things to do on a date')),
('hobbies', _('Hobby / Activity')),
('uniforms', _('People in uniform')),
('literary', _('Work of literature'))
)
DEFAULTS = [
'name', 'surname','plant', 'animal',
'location', 'film', 'object', 'brand'
]
def initialize_categories(apps, schema_editor):
Category = apps.get_model('basta', 'Category')
for category in CATEGORIES:
try:
with transaction.atomic():
Category.objects.create(
name = category[0],
default = category[0] in DEFAULTS
)
except IntegrityError:
pass
def uninitialize_categories(apps, schema_editor):
Category = apps.get_model('basta', 'Category')
for category in CATEGORIES:
try:
with transaction.atomic():
Category.objects.get(
name = category[0],
).delete()
except IntegrityError:
pass
class Migration(migrations.Migration):
dependencies = [
('basta', '0017_auto_20200503_2155')
]
operations = [
migrations.RunPython(
initialize_categories,
uninitialize_categories
),
] | 2.28125 | 2 |
ansiscape/interpreters/font.py | cariad/ansiscape | 0 | 12793956 | from ansiscape.enums import Font, InterpretationKey, SelectGraphicRendition
from ansiscape.interpreter import Interpreter
class FontValue(Interpreter[Font]):
def __init__(self) -> None:
super().__init__(
key=InterpretationKey.FONT,
lookup={
SelectGraphicRendition.DEFAULT: Font.DEFAULT,
SelectGraphicRendition.FONT_ALT_0: Font.ALT_0,
SelectGraphicRendition.FONT_ALT_1: Font.ALT_1,
SelectGraphicRendition.FONT_ALT_2: Font.ALT_2,
SelectGraphicRendition.FONT_ALT_3: Font.ALT_3,
SelectGraphicRendition.FONT_ALT_4: Font.ALT_4,
SelectGraphicRendition.FONT_ALT_5: Font.ALT_5,
SelectGraphicRendition.FONT_ALT_6: Font.ALT_6,
SelectGraphicRendition.FONT_ALT_7: Font.ALT_7,
SelectGraphicRendition.FONT_ALT_8: Font.ALT_8,
SelectGraphicRendition.FONT_DEFAULT: Font.DEFAULT,
},
)
| 2.453125 | 2 |
wave/wave/freq_old/domain/__init__.py | jedhsu/wave | 0 | 12793957 | <gh_stars>0
from .param import *
from .spectrum import *
| 1.203125 | 1 |
backend/apps/risks/views.py | intellisense/risks | 0 | 12793958 | <filename>backend/apps/risks/views.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from .models import RiskType, RiskField
from .serializers import RiskTypeSerializer
class RiskTypeList(generics.ListCreateAPIView):
queryset = RiskType.objects.all()
serializer_class = RiskTypeSerializer
class RiskTypeDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = RiskType.objects.all()
serializer_class = RiskTypeSerializer
class FieldTypes(APIView):
def get(self, request, **kwargs):
"""
Return a list of RiskField.FIELD_TYPES.
"""
data = [{'field_type': k, 'name': v} for k, v in RiskField.FIELD_TYPES]
return Response(data)
| 2.0625 | 2 |
script/core/runner/grid5k/Grid5kRunner.py | tdurieux/RepairThemAll | 49 | 12793959 | <filename>script/core/runner/grid5k/Grid5kRunner.py
import time
import subprocess
import json
import os
import re
import sys
from core.runner.RepairTask import RepairTask
from core.runner.Runner import Runner
from core.renderer.renderer import get_renderer
from config import REPAIR_ROOT, OUTPUT_PATH, GRID5K_MAX_NODE, GRID5K_TIME_OUT
class Grid5kRunner(Runner):
def __init__(self, tasks, args):
"""
:type tasks: list of RepairTask
"""
super(Grid5kRunner, self).__init__(tasks, args)
def get_running(self):
cmd = 'oarstat --json -u `whoami`'
devnull = open('/dev/null', 'w')
try:
cmd_output = subprocess.check_output(cmd, shell=True, stdin=None, stderr=devnull)
jobs = json.loads(cmd_output)
running_ids = []
waiting_ids = []
for job_id in jobs:
if jobs[job_id]['state'] == "Running":
running_ids.append(int(job_id))
else:
waiting_ids.append(int(job_id))
for task in self.running:
if task.id not in running_ids:
self.running.remove(task)
task.end_date = time.time()
result_path = os.path.join(OUTPUT_PATH, task.benchmark.name, task.bug.project,
str(task.bug.bug_id),
task.tool.name,
str(task.tool.seed), "result.json")
if os.path.exists(result_path):
try:
with open(result_path) as fd:
task.results = json.load(fd)
if 'patches' in task.results and len(task.results['patches']) > 0:
task.status = "PATCHED"
else:
task.status = "DONE"
except Exception:
task.status = "ERROR"
pass
else:
task.status = "ERROR"
self.finished.append(task)
for task in self.waiting:
if task.id not in waiting_ids:
self.waiting.remove(task)
if task.id in running_ids:
task.status = "STARTED"
task.starting_date = time.time()
self.running.append(task)
except subprocess.CalledProcessError:
pass
finally:
return self.running
def start_task(self, task):
"""
:param task:
:type task: RepairTask
:return:
"""
log_root_path = os.path.join(OUTPUT_PATH, task.benchmark.name, task.bug.project, str(task.bug.bug_id),
task.tool.name,
str(task.tool.seed))
stdout_log = os.path.join(log_root_path, 'grid5k.stdout.log')
stderr_log = os.path.join(log_root_path, 'grid5k.stderr.log')
if not os.path.exists(log_root_path):
os.makedirs(log_root_path)
elif os.path.exists(stderr_log):
os.remove(stderr_log)
if os.path.exists(stdout_log):
os.remove(stdout_log)
bug_id = task.bug.project
if task.bug.bug_id != "" and task.bug.bug_id is not None:
bug_id = "%s_%s" % (task.bug.project, task.bug.bug_id)
parameters = []
current_parameter = None
for a in sys.argv:
if a[0] == '-':
if current_parameter is not None:
parameters.append(current_parameter)
param = a[1:]
if param[0] == '-':
param = param[1:]
current_parameter = {
"separator": '-' if len(param) == 1 else '--',
"parameter": param,
"value": ""
}
elif current_parameter is not None:
current_parameter['value'] += " " + a
if current_parameter is not None:
parameters.append(current_parameter)
node_cmd_args = "%s %s --id %s" % (
os.path.join(REPAIR_ROOT, 'script', 'repair.py'),
task.tool.name,
bug_id
)
for param in parameters:
if param["parameter"] == "i" or param["parameter"] == "id":
continue
node_cmd_args += " %s%s%s" % (param["separator"], param["parameter"], param["value"])
node_cmd = "python %s" % node_cmd_args
cmd = "oarsub -l nodes=1,walltime=%s -O %s -E %s \"%s\"" % (
GRID5K_TIME_OUT,
stdout_log,
stderr_log,
node_cmd)
devnull = open('/dev/null', 'w')
cmd_output = subprocess.check_output(cmd, shell=True, stdin=None, stderr=devnull)
m = re.search('OAR_JOB_ID=([0-9]+)', cmd_output)
if m:
task.id = int(m.group(1))
task.status = "WAITING"
self.waiting.append(task)
def execute(self):
renderer = get_renderer(self)
to_run = self.tasks[:]
while (len(to_run) > 0 or len(self.running) > 0 or len(self.waiting) > 0) and not self.is_end_time():
if len(to_run) > 0 and len(self.running) + len(self.waiting) < GRID5K_MAX_NODE:
task = to_run.pop()
if task.bug is not None:
self.start_task(task)
time.sleep(1)
renderer.render()
self.get_running()
renderer.render_final_result()
| 2.203125 | 2 |
datasets/code_x_glue_cc_cloze_testing_all/generated_definitions.py | WojciechKusa/datasets | 10,608 | 12793960 | DEFINITIONS = {
"go": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "go",
"parameters": {"language": "go"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/go",
"sizes": {"train": 25282},
},
"java": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "java",
"parameters": {"language": "java"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/java",
"sizes": {"train": 40492},
},
"javascript": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "javascript",
"parameters": {"language": "javascript"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/javascript",
"sizes": {"train": 13837},
},
"php": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "php",
"parameters": {"language": "php"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/php",
"sizes": {"train": 51930},
},
"python": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "python",
"parameters": {"language": "python"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/python",
"sizes": {"train": 40137},
},
"ruby": {
"class_name": "CodeXGlueCcClozeTestingAll",
"dataset_type": "Code-Code",
"description": "CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"dir_name": "ClozeTesting-all",
"name": "ruby",
"parameters": {"language": "ruby"},
"project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all",
"raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/ruby",
"sizes": {"train": 4437},
},
}
| 1.460938 | 1 |
wmm/wrapper.py | ESA-VirES/WMM | 1 | 12793961 | #!/usr/bin/env python
import sys
import argparse
import os
import os.path
import subprocess
import numpy
import gdal
import osr
import ogr
from uuid import uuid4
import logging
logger = logging.getLogger(__name__)
zoomresolution = {
0.70312500:0,
0.35156250:1,
0.17578125:2,
0.08789063:3,
0.04394531:4,
0.02197266:5,
0.01098633:6,
0.00549316:7,
0.00274658:8,
0.00137329:9,
0.00068665:10
}
# product names:(id, iso_interval flavor)
products = {
"Decl":(1, 1),
"Incl":(2, 1),
"F":(3, 2),
"H":(4, 2),
"X":(5, 2),
"Y":(6, 2),
"Z":(7, 2)
#"GV":{8,
#"Ddot":{9.,
#"Idot":{10,
#"Fdot":{11,
#"Hdot":{12,
#"Xdot":{13,
#"Ydot":{14,
#"Zdot":{15,
#"GVdot":{16
}
iso_intervals = {
1:{
0: 10,
1: 10,
2: 5,
3: 5,
4: 1,
5: 1,
6: 0.5,
7: 0.5,
8: 0.1
},
2:{
0: 5000,
1: 5000,
2: 2500,
3: 1000,
4: 1000,
5: 1000,
6: 500,
7: 500,
8: 100
}
}
def main(filename, product, bbox, pixelsize, height, time):
# configuration
## resolutions (in degrees) per zoom level for EPSG 4326
exe = os.path.join(os.path.dirname(__file__), 'wmm_grid.exe')
"""if not os.path.exists("WMM.COF"):
raise Exception(
"Coefficient file missing: %s"
% os.path.join(os.path.dirname(__file__), "WMM.COF")
)"""
proc = subprocess.Popen(
[exe], 1, exe, subprocess.PIPE, open(os.devnull),
subprocess.STDOUT, cwd=os.path.dirname(__file__)
)
latmin, lonmin, latmax, lonmax = bbox
# Step Size (in decimal degrees)
#deg_interval = resolution
deg_interval = (latmax-latmin)/pixelsize
# Minimum Height above the WGS-84 Ellipsoid (in km)
heightmax = height
# Maximum Height above the WGS-84 Ellipsoid (in km)
heightmin = height
# height step size (in km)
height_interval = 0
# decimal year starting time
timestart = time
# decimal year ending time
timeend = time
# time step size
time_interval = "0"
# geomagnetic element to print. Your options are :
# 1. Declination 9. Ddot
# 2. Inclination 10. Idot
# 3. F 11. Fdot
# 4. H 12. Hdot
# 5. X 13. Xdot
# 6. Y 14. Ydot
# 7. Z 15. Zdot
# 8. GV 16. GVdot
product_id = products[product][0]
# select output (1 for file)
output = "1"
# output filename
tempfile = "/tmp/%s" % uuid4().hex[:10]
# generate geotransform values
geotransform = [
lonmin,
deg_interval,
0,
latmax,
0,
-deg_interval
]
wmmxmin = lonmin-deg_interval/2
wmmxmax = lonmax+deg_interval/2
wmmymin = latmin-deg_interval/2
wmmymax = latmax+deg_interval/2
print >>proc.stdin, wmmymin
print >>proc.stdin, wmmymax
print >>proc.stdin, wmmxmin
print >>proc.stdin, wmmxmax
print >>proc.stdin, deg_interval
print >>proc.stdin, height
print >>proc.stdin, heightmax
print >>proc.stdin, heightmin
print >>proc.stdin, height_interval
print >>proc.stdin, timestart
print >>proc.stdin, timeend
print >>proc.stdin, time_interval
print >>proc.stdin, product_id
print >>proc.stdin, output
print >>proc.stdin, tempfile
print >>proc.stdin
status = proc.wait()
print "STATUS:", status
proc.stdin.close()
values = []
xvalues = set()
yvalues = set()
with open(tempfile) as f:
for line in f:
line_array = line.split( )
values.append(str(line_array[4]))
xvalues.add(str(line_array[1]))
yvalues.add(str(line_array[0]))
rasterxsize = len(xvalues)
rasterysize = len(yvalues)
logger.info("size %d %d " %(rasterxsize, rasterysize))
os.remove(tempfile)
# create 1d numpy array, reshape to 2d, and flip to correct order
raster_out = numpy.fromiter(values, "float32")
raster_2d = numpy.flipud(raster_out.reshape(rasterysize,rasterxsize))
#print raster_2d.shape
#print rasterxsize, rasterysize
driver = gdal.GetDriverByName('GTiff')
ds = driver.Create(
filename, rasterxsize, rasterysize, 1, gdal.GDT_Float32
)
ds.GetRasterBand(1).WriteArray(raster_2d)
ds.SetGeoTransform(geotransform)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
ds.SetProjection(srs.ExportToWkt())
return ds
#contour_steps = parsed.interval[0]
"""
zoom = round(deg_interval, 8)
contour_steps = iso_intervals[
products[product][1]
][zoomresolution[zoom]]
#print "zoomlevel: " + zoomresolution[deg_interval]
#print iso_intervals[products[parsed.product[0]][1]][zoomresolution[deg_interval]]
# clean up from previous runs
try:
os.remove('contour.shp')
except:
pass
try:
os.remove('contour.dbf')
except:
pass
try:
os.remove('contour.shx')
except:
pass
ogr_ds = ogr.GetDriverByName('ESRI Shapefile').CreateDataSource(filename)
ogr_lyr = ogr_ds.CreateLayer('contour')
field_defn = ogr.FieldDefn('ID', ogr.OFTInteger)
ogr_lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('elev', ogr.OFTReal)
ogr_lyr.CreateField(field_defn)
gdal.ContourGenerate(mem_ds.GetRasterBand(1), contour_steps, 0, [], 0, 0, ogr_lyr, 0, 1)
#print min(raster_out), max(raster_out)
"""
if __name__ == "__main__":
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("bbox", nargs=4, type=float)
#parser.add_argument("resolution", nargs=1, type=float)
parser.add_argument("pixelsize", nargs=1, type=int)
parser.add_argument("height", nargs=1, type=int)
parser.add_argument("time", nargs=1, type=float)
parser.add_argument("product", nargs=1, type=str)
parser.add_argument("--contour", action="store_true", default=False)
#parser.add_argument("interval", nargs=1, type=float)
parsed = parser.parse_args(args)
main(
"contour.shp", parsed.product[0], parsed.bbox, parsed.pixelsize[0],
parsed.height[0], parsed.time[0], parsed.contour
)
| 2.015625 | 2 |
epoch.py | InterImm/marclock-serverless | 1 | 12793962 | <gh_stars>1-10
from http.server import BaseHTTPRequestHandler
from urllib.parse import urlparse
import json
import debugserver
import datetime
from functions.mars import CoordinatedMarsTime
from functions.interimm import MartianTime
class handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
url = self.path
parsed_url = urlparse(url)
path = parsed_url.path
epoch_time = path.split('/')[-1]
epoch_time = int(float(epoch_time))
mars_time = CoordinatedMarsTime.from_unix_time(epoch_time)
mars_interimm_now = MartianTime()
mars_interimm_now_hms = mars_interimm_now.interimm_clock(mars_time.msd)
mars_interimm_now_cal = mars_interimm_now.interimm_calendar(mars_time.msd)
dt_time = datetime.datetime.fromtimestamp(epoch_time).isoformat()
res = {
"earth_utc_time": dt_time,
"mars_timezone": 0,
"mars24": {
'msd': mars_time.msd,
'day': mars_time.days,
'hour': mars_time.hours,
'minute': mars_time.minutes,
'second': mars_time.seconds,
'millisecond': mars_time.milliseconds
},
"interimm": {
"msd": mars_interimm_now_hms.get('msd'),
"year": mars_interimm_now_cal.get('year'),
"month": mars_interimm_now_cal.get('month'),
"day": mars_interimm_now_cal.get('day'),
"hour": mars_interimm_now_hms.get('hour'),
"minute": mars_interimm_now_hms.get('minute'),
"second": mars_interimm_now_hms.get('second')
}
}
self.wfile.write(json.dumps(res).encode("utf-8"))
return
if __name__ == '__main__':
debugserver.serve(handler) | 2.75 | 3 |
docs/demo/IRE2017/RobotCAR/MLP/train_model.py | FaBoPlatform/RobotCarAI | 10 | 12793963 | # coding: utf-8
# MultiPerceptron
# queueを使った学習
# 学習step数を記録
# 学習データはCSVの代わりにジェネレータを搭載
# 3x11x4のNNモデルに変更
# scoreを追加
import os
_FILE_DIR=os.path.abspath(os.path.dirname(__file__))
import time
import tensorflow as tf
import threading
from sklearn.utils import shuffle
import sys
sys.path.append(_FILE_DIR+'/..')
from generator import SensorGenerator
import numpy as np
tf.reset_default_graph()
MODEL_DIR=_FILE_DIR+"/model"
SUMMARY_LOG_DIR=_FILE_DIR+"/log"
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
n_nodes_hl1 = 11
data_cols = 3 # センサーの数。left45,front,right45
n_classes = 4 # 予測結果の数。stop,left,forward,right
batch_size = 100 # バッチサイズは10〜100前後に
chunk_size = 100 # FIFOQueueのcapacity
target_step = 10000000 # ステップ数
TEST_NUM = 10000 # テストデータ件数
generator = SensorGenerator()
def generate_random_train_data(batch_size):
CSVDATA=[]
# 10m以内の判定を学習させる
#sensors = np.random.randint(0,1000,[batch_size,3])
# 前方20cm以内の判定を学習させる
#LEFT45 = np.random.randint(0,1000,batch_size)
#FRONT = np.random.randint(0,20,batch_size)
#RIGHT45 = np.random.randint(0,1000,batch_size)
# 前方20cm-100cm、左右100cm以内のの判定を学習させる
#LEFT45 = np.random.randint(0,100,batch_size)
#FRONT = np.random.randint(20,200,batch_size)
#RIGHT45 = np.random.randint(0,100,batch_size)
# 2m以内の判定を学習させる
#LEFT45 = np.random.randint(0,200,batch_size)
#FRONT = np.random.randint(0,200,batch_size)
#RIGHT45 = np.random.randint(0,200,batch_size)
# 1m以内の判定を学習させる
#LEFT45 = np.random.randint(0,100,batch_size)
#FRONT = np.random.randint(0,100,batch_size)
#RIGHT45 = np.random.randint(0,100,batch_size)
# 2m以内の判定を学習させる
sensors = np.random.randint(0,200,[batch_size,3])
#sensors = np.c_[LEFT45,FRONT,RIGHT45]
for i in range(batch_size):
GENERATOR_RESULT = generator.driving_instruction(sensors[i])
CSVROW = np.hstack((sensors[i],GENERATOR_RESULT[0:4]))
CSVDATA.append(CSVROW)
CSVDATA = np.array(CSVDATA)
batch_data = CSVDATA[0:batch_size,0:data_cols]
batch_target = CSVDATA[0:batch_size,data_cols:]
return batch_data, batch_target
def load_and_enqueue(sess):
while True:
try:
batch_data, batch_target = generate_random_train_data(batch_size)
sess.run(enqueue_op, feed_dict={placeholder_input_data:batch_data, placeholder_input_target:batch_target})
except tf.errors.CancelledError as e:
break
print("finished enqueueing")
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
with tf.variable_scope("input"):
placeholder_input_data = tf.placeholder('float', [None, data_cols], name='input_data') # for load_and_enqueue. use dequeue_data_op for prediction
placeholder_input_target = tf.placeholder('float', name='input_target') # for load_and_enqueue. use dequeue_target_op for prediction
placeholder_batch_size = tf.placeholder(tf.int32, name='batch_size') # need feed_dict in training sess.run(). don't need for prediction.
with tf.variable_scope("step"):
placeholder_step = tf.placeholder(tf.int32, name='input_step') # step値入力用
variable_step = tf.Variable(initial_value=0, name="step") # step記録用
step_op = variable_step.assign(placeholder_step)
with tf.variable_scope("queue"):
queue = tf.FIFOQueue(
capacity=chunk_size, # enqueue size
dtypes=['float', 'float'],
shapes=[[data_cols], [n_classes]],
name='FIFOQueue'
)
# Enqueue and dequeue operations
enqueue_op = queue.enqueue_many([placeholder_input_data, placeholder_input_target], name='enqueue_op')
dequeue_data_op, dequeue_target_op = queue.dequeue_many(placeholder_batch_size, name='dequeue_op') # instead of data/target placeholder
with tf.variable_scope('neural_network_model'):
hidden_1_layer = {'weights':tf.Variable(weight_variable([data_cols, n_nodes_hl1])),
'biases':tf.Variable(bias_variable([n_nodes_hl1]))}
output_layer = {'weights':tf.Variable(weight_variable([n_nodes_hl1, n_classes])),
'biases':tf.Variable(bias_variable([n_classes])),}
l1 = tf.add(tf.matmul(dequeue_data_op,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
# 予測結果
prediction = tf.add(tf.matmul(l1,output_layer['weights']), output_layer['biases'], name='output_y')
# スコア
score = tf.nn.softmax(prediction, name='score')
with tf.variable_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=dequeue_target_op)
loss_op = tf.reduce_mean(losses, name='cost')
tf.summary.scalar('loss', loss_op)
with tf.variable_scope('accuracy'):
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(dequeue_target_op, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'), name='accuracy')
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()
train_op = tf.train.AdamOptimizer(0.0001).minimize(loss_op, name='train_op')
saver = tf.train.Saver(max_to_keep=1000)
test_data, test_target =generate_random_train_data(TEST_NUM)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
if ckpt:
# checkpointファイルから最後に保存したモデルへのパスを取得する
last_model = ckpt.model_checkpoint_path
print("load {0}".format(last_model))
# 学習済みモデルを読み込む
saver.restore(sess, last_model)
LOAD_MODEL = True
else:
print("initialization")
# 初期化処理
init_op = tf.global_variables_initializer()
sess.run(init_op)
writer = tf.summary.FileWriter(SUMMARY_LOG_DIR, sess.graph)
start_time, start_clock = time.time(), time.clock()
# Start a thread to enqueue data asynchronously, and hide I/O latency.
coord = tf.train.Coordinator()
enqueue_thread = threading.Thread(target=load_and_enqueue, args=[sess])
enqueue_thread.isDaemon()
enqueue_thread.start()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
step = 0 # 最後にstep数をモデルに記録するために変数を用意しておく
try:
# check the accuracy before training (without feed_dict!)
print(sess.run(accuracy, feed_dict={placeholder_batch_size:chunk_size})) # check batch_size's data
# step取得
_step = sess.run(variable_step)
print("learned step:{}".format(_step))
for step in range(_step+1, target_step+1):
batch_loss=0
w_summary=None
_, batch_loss, w_summary = sess.run([train_op, loss_op, summary_op],
feed_dict={placeholder_batch_size:batch_size})
if step % 1000 == 0:
if not w_summary is None:
writer.add_summary(w_summary, step)
ac = sess.run(accuracy, feed_dict={placeholder_batch_size:chunk_size}) # check batch_size's data
# テストデータでの精度を確認する
test_accuracy = accuracy.eval({'queue/dequeue_op:0':test_data,
'queue/dequeue_op:1':test_target})
if step % 10000 == 0:
print("Step:%d accuracy:%.8f test_accuracy:%.8f loss:%.8f time:%.8f clock:%.14f" % (step,ac,test_accuracy,batch_loss,time.time()-start_time,time.clock()-start_clock))
# 1000000 step毎にsaveする
if step % 1000000 == 0:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
sess.run(queue.close(cancel_pending_enqueues=True))
except Exception as e:
# Report exceptions to the coodinator.
print(e)
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
# ステップ学習時、保存する
if step > _step:
_step = sess.run(step_op,feed_dict={placeholder_step:step}) # variable_stepにstepを記録する
saver.save(sess, MODEL_DIR + '/model-'+str(step)+'.ckpt')
# テストデータを新たに生成し、精度を確認する
test_data, test_target =generate_random_train_data(TEST_NUM)
print('Accuracy:',accuracy.eval({dequeue_data_op:test_data,
dequeue_target_op:test_target}))
# 総step数を表示する
print('step:{}'.format(sess.run(variable_step)))
print("end")
| 2.390625 | 2 |
tests/h/services/annotation_moderation_test.py | rickyhan/h | 2 | 12793964 | <filename>tests/h/services/annotation_moderation_test.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from h import models
from h.services.annotation_moderation import AnnotationModerationService
from h.services.annotation_moderation import annotation_moderation_service_factory
class TestAnnotationModerationServiceHide(object):
def test_it_creates_annotation_moderation(self, svc, factories, db_session):
annotation = factories.Annotation()
svc.hide(annotation)
mod = db_session.query(models.AnnotationModeration) \
.filter_by(annotation=annotation) \
.first()
assert mod is not None
def test_it_skips_creating_moderation_when_already_exists(self, svc, factories, db_session):
existing = factories.AnnotationModeration()
svc.hide(existing.annotation)
count = db_session.query(models.AnnotationModeration) \
.filter_by(annotation=existing.annotation) \
.count()
assert count == 1
@pytest.fixture
def svc(self, db_session):
return AnnotationModerationService(db_session)
class TestAnnotationNipsaServiceFactory(object):
def test_it_returns_service(self, pyramid_request):
svc = annotation_moderation_service_factory(None, pyramid_request)
assert isinstance(svc, AnnotationModerationService)
def test_it_provides_request_db_as_session(self, pyramid_request):
svc = annotation_moderation_service_factory(None, pyramid_request)
assert svc.session == pyramid_request.db
| 2.109375 | 2 |
rdmo/questions/migrations/0051_sites_blank.py | ItsNotYou/rdmo | 77 | 12793965 | # Generated by Django 2.2.9 on 2020-02-14 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0050_data_migration'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='sites',
field=models.ManyToManyField(blank=True, help_text='The sites this catalog belongs to (in a multi site setup).', to='sites.Site', verbose_name='Sites'),
),
]
| 1.773438 | 2 |
members/context_processors.py | AsinineFatuity/tukulelunch-django-python | 0 | 12793966 | <filename>members/context_processors.py
from .models import Category,PledgeItem,Pledge
from .views import pledge_id_fn
def counter(request):
item_count=0
if 'admin' in request.path:
return {}
else:
try:
pledge=Pledge.objects.filter(pledge_id=pledge_id_fn(request)) #specify cart object in the current session
#find the current pledged_item in the session and return only a single pledge_items object
pledge_items=PledgeItem.objects.all().filter(pledge=pledge[:1])
for pledge_item in pledge_items:
item_count+=pledge_item.quantity
except Pledge.DoesNotExist:
item_count=0
return dict(item_count=item_count)
def menu_links(request):
links=Category.objects.all()
return dict(links=links)
| 1.992188 | 2 |
tests/conftest.py | Agilicus/copper-sdk | 4 | 12793967 | import pytest
from copper_sdk import COPPER_API_TOKEN, COPPER_API_EMAIL
from copper_sdk.copper import Copper
@pytest.fixture(scope='session')
def copper():
return Copper(COPPER_API_TOKEN, COPPER_API_EMAIL)
| 1.742188 | 2 |
cefiro_customizations/helpers.py | saeedkola/cefiro_customizations | 0 | 12793968 | <reponame>saeedkola/cefiro_customizations
import frappe
from frappe.utils import get_files_path, get_site_path, get_site_base_path
import openpyxl,re
def get_absolute_path(file_name, is_private=False):
site_name = get_site_base_path()
if(file_name.startswith('/files/')):
file_name = file_name[7:]
return frappe.utils.get_bench_path()+ "/sites/"+site_name[2:]+"/"+ frappe.utils.get_path('private' if is_private else 'public', 'files', file_name)[1:]
@frappe.whitelist()
def get_column_names(file_name):
excel_file_path = get_absolute_path(file_name)
wb = openpyxl.load_workbook(excel_file_path)
sheet = wb.active
max_col = sheet.max_column
column_map = []
for i in range(1,max_col+1):
cell_obj = sheet.cell(row=1,column=i)
column_map.append({
"excel_column" : cell_obj.value,
"import_column": ""
})
return column_map | 2.515625 | 3 |
PaddleFSL/test/backbones/rc_init_vector_test.py | Chaoqun-Guo/FSL-Mate | 0 | 12793969 | # Copyright 2021 PaddleFSL Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlefsl.backbones import RCInitVector
vector_initializer = RCInitVector(corpus='glove-wiki', embedding_dim=50)
def get_idx_list_from_words_test():
idx_list = vector_initializer.get_idx_list_from_words('[PAD]')
print(idx_list)
idx_list = vector_initializer.get_idx_list_from_words(['i', 'love', 'you'])
print(idx_list)
def search_tokens_test():
vector = vector_initializer.search_tokens(['i', 'love', 'robin', '[PAD]'])
print(vector)
print(vector.shape)
def rc_init_vector_test():
vector = vector_initializer(
tokens=['yes', 'it', 'is', '*9*', '6$'],
head_position=[0],
tail_position=[2],
max_len=6
)
print(len(vector_initializer))
print(vector)
print(vector.shape)
if __name__ == '__main__':
get_idx_list_from_words_test()
search_tokens_test()
rc_init_vector_test()
| 2.203125 | 2 |
docs/source/plots/var_plot_forecast.py | madhushree14/statsmodels | 6,931 | 12793970 | from var_plots import plot_forecast
plot_forecast()
| 1.101563 | 1 |
_old/LennieTheLeaker.py | tmp63498/Lestat | 13 | 12793971 | #!/usr/bin/python3
import argparse
import sys
def readHashFile(hashfile):
f = open(hashfile)
hashes = f.read().split('\n')[:-1]
ntlm ={"cracked":{}, "safe":{}}
f.close()
for i in hashes:
try:
h = i.split(':')
ntlm["safe"][h[3].upper()] = h[0].lower()
except Exception as e:
pass
return hashes, ntlm
def searchLeaked(leakfile, ntlm, verbose):
leak = open(leakfile,"r")
cpt = 0
print("[*] Checking leaked database against hashes (long) ...", file=sys.stderr)
for line in leak:
if line[:-1] in ntlm["safe"]:
ntlm["cracked"][line[:-1]] = ntlm["safe"][line[:-1]]
cpt += 1
del(ntlm["safe"][line[:-1]])
if verbose:
print(line[:-1], ntlm["cracked"][line[:-1]])
print(f"{cpt} compromised", file=sys.stderr)
leak.close()
def export(ntlm, john_result_file='', output=''):
john = ''
if john_result_file:
f = open(john_result_file)
john = f.read().lower()
f.close()
if output:
f = open(output, "a+")
cpt = 0
for c in ntlm["cracked"]:
line = f"{ntlm['cracked'][c]}:<LeakTheWeak>:LEAK:NOLM:{c}:::"
if ntlm["cracked"][c] not in john:
if output :
f.write(line+'\n')
else:
print(line)
cpt += 1
if john_result_file:
print(f"New {cpt} compromised")
if output:
f.close()
def main():
parser = argparse.ArgumentParser(description='List accounts compromised in public leaked NTLMs', add_help=True)
parser.add_argument('-w', '--write', action="store", dest="path", default='',
help='A path to store the results. Default is stdout')
parser.add_argument('HASH_FILE', action="store",
help="The result file of impacket-secretsdump")
parser.add_argument('-j', '--john', action="store", dest="john_file", default='',
help="If used, only the accounts not cracked by john are displayed")
parser.add_argument('-v', '--verbose', action="store_true", dest="verbose", default=False,
help="display the cracked accounts in real time")
parser.add_argument('LEAK_FILE', action="store",
help="The wordlist containing the NTLM leaked")
args = parser.parse_args()
hashes, ntlm = readHashFile(args.HASH_FILE)
searchLeaked(args.LEAK_FILE, ntlm, args.verbose)
export(ntlm, args.john_file, args.path)
if __name__ == '__main__':
main()
| 2.796875 | 3 |
baseq/fastq/split_barcode.py | basedata10/baseq | 1 | 12793972 | <gh_stars>1-10
import os
def write_buffer(buffer, filehandles):
for key in buffer.keys():
filehandles[key].writelines(buffer[key])
buffer[key] = []
def write_and_close(buffer, filehandles):
for key in buffer.keys():
filehandles[key].writelines(buffer[key])
filehandles[key].close()
def split_barcode(barcode_file, fastq, outdir, suffix):
"""
barcode_file: tsv: samplename barcode_string...
"""
with open(barcode_file, 'r') as file:
barcodes = file.readlines()
bc_files = {}
bc_buffers = {}
for bc in barcodes:
info = [x.strip() for x in bc.split()]
if len(info) == 2:
sample = info[0]
barcode = info[1]
print("[info] The barcode for {} is {}".format(info[0], info[1]))
path = os.path.join(outdir, sample) + "." + suffix
print("[info] File for '{}' is '{}'".format(sample, path))
bc_files[barcode] = open(path, "w")
bc_buffers[barcode] = []
#Iterating the reads...
from baseq.utils.file_reader import read_file_by_lines
inlines = read_file_by_lines(fastq, 1000 * 1000 * 1000, 4)
count = 0
for line in inlines:
count += 1
barcode = line[0].strip().split(":")[-1]
if barcode in bc_files:
bc_buffers[barcode].append("".join(line))
if count % 1000000 == 1:
write_buffer(bc_buffers, bc_files)
write_and_close(bc_buffers, bc_files) | 2.796875 | 3 |
Exercicios/ex072.py | mauroalbuquerque/Python-CursoEmVideo | 0 | 12793973 | <filename>Exercicios/ex072.py
contagem = ('Zero', 'Um', 'Dois', 'Três',
'Quatro', 'Cinco', 'Seis', 'Sete',
'Oito', 'Nove', 'Dez', 'Onze', 'Doze',
'Treze', 'Quartoze', 'Quinze', 'Dezesseis',
'Dezesete', 'Dezoito', 'Dezenove', 'Vinte')
selecao = input('Informe um número entre 0 ~ 20: ')
while not int(selecao) >= 0 and int(selecao) <= 20:
selecao = input('Tente Novamente informando um número válido. Informe um número entre 0 ~ 20: ')
print(selecao)
| 3.9375 | 4 |
tools/blender_script.py | aspadm/DEMD-database-old | 3 | 12793974 | import os
import bpy
import sys
# Names of folder and files
args = sys.argv
source_file = args[-2]
convert_file = args[-1]
save_type = convert_file.split(".")[-1]
# Deleting all objects
for scene in bpy.data.scenes:
for obj in scene.objects:
scene.objects.unlink(obj)
for bpy_data_iter in (
bpy.data.objects,
bpy.data.meshes,
bpy.data.lamps,
bpy.data.cameras,
):
for id_data in bpy_data_iter:
bpy_data_iter.remove(id_data)
bpy.ops.object.select_by_type(type = "MESH")
bpy.ops.object.delete(use_global=False)
for item in bpy.data.meshes:
for scene in bpy.data.scenes:
for obj in scene.objects:
scene.objects.unlink(obj)
item.user_clear()
bpy.data.meshes.remove(item)
print("Scene cleared")
# Open model and save
try:
try:
print("Try to use plugin...")
bpy.ops.import_scene.deusexmd(filepath=source_file)
print("Success")
except:
try:
print("Fail")
print("Try to use outer script...")
try:
import import_DeusExMD
except:
print("Fail to import")
exit(2)
print("Successful module import; try to open model...")
import_DeusExMD.import_DeusExMD(source_file, #filepath
bpy.context, #context
False, #randomize_colors
True, #import_vertcolors
False, #skip_blank
False, #use_layers
1.0) #mesh_scale
print("Success")
except:
print("Fail")
exit(1)
print("\nModel opened\n")
if save_type == "obj":
bpy.ops.export_scene.obj(filepath=convert_file)
elif save_type == "fbx":
bpy.ops.export_scene.fbx(filepath=convert_file)
elif save_type == "3ds":
bpy.ops.export_scene.autodesk_3ds(filepath=convert_file)
elif save_type == "stl":
bpy.ops.export_mesh.stl(filepath=convert_file,
check_existing=False,
ascii=False)
else:
print("Incorrect save format")
print("\nConvertions done!")
exit(0)
# In case of error
except Exception:
print("\nSome errors here")
exit(1)
| 2.265625 | 2 |
powerwatch/analysis/old_analysis_scripts/monthly_pw_dw_correlate.py | nklugman/PlugWatch | 0 | 12793975 | #!/usr/bin/env python
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, window, asc, desc, lead, lag, udf, hour, month, dayofmonth, collect_list, lit, year, coalesce, mean
import pyspark.sql.functions as F
from pyspark.sql.window import Window
from pyspark.sql.types import FloatType, IntegerType, DateType, TimestampType
from pyspark import SparkConf
import yaml
import datetime
import os
from math import isnan
conf = SparkConf()
conf.set("spark.jars", os.getenv("HOME") + "/.ivy2/jars/org.postgresql_postgresql-42.1.1.jar")
conf.set("spark.executor.extrajavaoptions", "-Xmx15000m")
conf.set("spark.executor.memory", "15g")
conf.set("spark.driver.memory", "15g")
conf.set("spark.storage.memoryFraction", "0")
spark = SparkSession.builder \
.config(conf=conf) \
.master("local[4]") \
.appName("SAIDI/SAIFI cluster size") \
.getOrCreate()
config = open('config.yaml')
config = yaml.load(config)
#connect to the database
pw_df = spark.read.jdbc("jdbc:postgresql://timescale.lab11.eecs.umich.edu/powerwatch", "pw_dedupe",
properties={"user": config['user'], "password": config['password'],"driver":"org.postgresql.Driver"})
#read the data that we care about
pw_df = pw_df.select(pw_df['core_id'],pw_df['time'],pw_df['is_powered'],pw_df['product_id'],pw_df['millis'],pw_df['last_unplug_millis'],pw_df['last_plug_millis'])
pw_df = pw_df.filter("product_id = 7008 OR product_id= 7009")
#now we need to created a window function that looks at the leading lagging edge of is powered and detects transitions
#then we can filter out all data that is not a transition
def detectTransition(value1, value2):
if(value1 == value2):
return 0
else:
return 1
udfDetectTransition = udf(detectTransition, IntegerType())
w = Window.partitionBy("core_id").orderBy(asc("time"))
is_powered_lag = lag("is_powered",1).over(w)
pw_df = pw_df.withColumn("transition", udfDetectTransition("is_powered",is_powered_lag))
#filter out all transitions
pw_df = pw_df.filter("transition != 0")
#now count each outage (really restoration)
def countOutage(value1, value2, value3):
if(value1 == False and value2 == True and value3 == True):
return 1
else:
return 0
udfCountTransition = udf(countOutage, IntegerType())
is_powered_lead = lead("is_powered",1).over(w)
is_powered_lag = lag("is_powered",1).over(w)
pw_df = pw_df.withColumn("outage", udfCountTransition("is_powered", is_powered_lead, is_powered_lag))
#now find all the exact outage and restore times using millis
def timeCorrect(time, millis, unplugMillis):
if(unplugMillis == 0 or millis == None or unplugMillis == None or isnan(millis) or isnan(unplugMillis)):
return time
elif unplugMillis > millis:
return time
else:
return time - datetime.timedelta(microseconds = (int(millis)-int(unplugMillis))*1000)
udftimeCorrect = udf(timeCorrect, TimestampType())
pw_df = pw_df.withColumn("outage_time", udftimeCorrect("time","millis","last_unplug_millis"))
pw_df = pw_df.withColumn("r_time", udftimeCorrect("time","millis","last_plug_millis"))
#now denote the end time of the outage for saidi reasons
time_lead = lead("r_time",1).over(w)
pw_df = pw_df.withColumn("restore_time", time_lead)
#now filter out everything that is not an outage. We should have a time and end_time for every outage
pw_df = pw_df.filter("outage != 0")
#record the duration of the outage
def calculateDuration(startTime, endTime):
delta = endTime-startTime
seconds = delta.total_seconds()
return int(seconds)
udfcalculateDuration = udf(calculateDuration, IntegerType())
pw_df = pw_df.withColumn("outage_duration", udfcalculateDuration("outage_time","restore_time"))
window_size = 150
w = Window.orderBy(asc("outage_time")).rowsBetween(-1*window_size,window_size)
pw_df = pw_df.withColumn("outage_window_list",collect_list(F.struct("outage_time","core_id")).over(w))
def filterOutage(time, core_id, timeList):
count = 1
used = []
used.append(core_id)
for i in timeList:
if abs((time - i[0]).total_seconds()) < 120 and i[1] not in used:
used.append(i[1])
count += 1
if count > window_size:
return window_size
else:
return count
udfFilterTransition = udf(filterOutage, IntegerType())
pw_df = pw_df.withColumn("outage_cluster_size", udfFilterTransition("outage_time","core_id","outage_window_list"))
pw_df = pw_df.filter("outage_cluster_size > 1")
#now we need to collapse these individual outage events into actual outages
#we can use a similar method of windowing as before but only look at the row before
w = Window.orderBy(asc("outage_time"))
outage_time_lag = lag("outage_time",1).over(w)
def onlyOutages(time, lag_time):
if(lag_time is not None):
if((time - lag_time).total_seconds() < 120):
return 0
else:
return 1
else:
return 1
udfFilterTransition = udf(onlyOutages, IntegerType())
pw_df = pw_df.withColumn("outage_cluster", udfFilterTransition("outage_time",outage_time_lag))
pw_df = pw_df.filter("outage_cluster = 1")
pw_df = pw_df.select("outage_time","outage_cluster_size")
#okay we now have a list of all Powerwatch outages with an outage time
#now we should take all DW unplug events and for each outage see how many unplug events occur
dw_df = spark.read.jdbc("jdbc:postgresql://timescale.lab11.eecs.umich.edu/powerwatch", "dumsorwatch",
properties={"user": config['user'], "password": config['password'],"driver":"org.postgresql.Driver"})
#read the data that we care about
dw_df = dw_df.select(dw_df['phone_imei'],dw_df['time'],dw_df['type'],dw_df['fft_cnt'],dw_df['fft_base'])
dw_df = dw_df.filter(year("time") == 2018)
#get the avg fft_cnt as a baseline
#plugged_50_df = dw_df.filter("type = 'plugged' AND fft_cnt > -1 AND (fft_base = 50 OR fft_base = '50hz')")
#plugged_50_df.select(mean('fft_cnt')).show()
#print(plugged_50_df.count())
#plugged_60_df = dw_df.filter("type = 'plugged' AND fft_cnt > -1 AND (fft_base = 60 OR fft_base = '60hz')")
#plugged_60_df.select(mean('fft_cnt')).show()
#print(plugged_60_df.count())
#
#unplugged_50_df = dw_df.filter("type = 'unplugged' AND fft_cnt > -1 AND (fft_base = 50 OR fft_base = '50hz')")
#unplugged_50_df.select(mean('fft_cnt')).show()
#print(unplugged_50_df.count())
#unplugged_60_df = dw_df.filter("type = 'unplugged' AND fft_cnt > -1 AND (fft_base = 60 OR fft_base = '60hz')")
#unplugged_60_df.select(mean('fft_cnt')).show()
#print(unplugged_60_df.count())
dw_df = dw_df.select("phone_imei","time","fft_cnt","fft_base")
#dw_df = dw_df.filter("type = 'unplugged' OR type = 'plugged'")
#okay now we want the fft_cnt drop between plugged and unplugged for each phone imei
#we will first try per-plug differencing (although it may be better to take the averaged plug cnt for each phone)
#w = Window.groupBy('phone_imei').orderBy(asc("time"))
#fft_lag = lag("fft_cnt",1).over(w)
dw_df = dw_df.filter("type = 'unplugged'")
#now we need to join the data on time/outage time
joined_df = pw_df.join(dw_df, col("outage_time") == col("time"), "fullouter")
joined_df = joined_df.withColumn("agg_time",coalesce("outage_time","time"))
joined_df = joined_df.select("agg_time","phone_imei","outage_cluster_size","fft_cnt","fft_base")
#now run a similar metric as above
#for each outage time we need to see how many DW unplug events occurred within some time window
window_size = 100
w = Window.orderBy(asc("agg_time"),).rowsBetween(-1*window_size,window_size)
joined_df = joined_df.withColumn("imei_list",collect_list(F.struct("agg_time","phone_imei")).over(w))
def filterOutage(time, phone_imei, imeiList):
count = 0
#we want phone imei to be none
if phone_imei is not None:
return -1
used = []
for i in imeiList:
if abs((time - i[0]).total_seconds()) < 15 and i[1] not in used:
used.append(i[1])
count += 1
if count > window_size:
return window_size
else:
return count
udfFilterTransition = udf(filterOutage, IntegerType())
joined_df = joined_df.withColumn("phones_detecting", udfFilterTransition("agg_time","phone_imei","imei_list"))
#For the same window size average the fft_cnt to see if there is a signal in that metric
window_size = 100
w = Window.orderBy(asc("agg_time"),).rowsBetween(-1*window_size,window_size)
joined_df = joined_df.withColumn("fft_list",collect_list(F.struct("agg_time","phone_imei","fft_cnt","fft_base")).over(w))
def filterOutage(time, phone_imei, imeiList, base, return_count):
count = 0
fft_cnt = 0
#we want phone imei to be none
if phone_imei is not None:
return -1
for i in imeiList:
if(base == 50):
if abs((time - i[0]).total_seconds()) < 15 and i[2] is not None and (i[3] == '50' or i[3] == '50hz'):
count += 1
fft_cnt += i[2]
elif(base == 60):
if abs((time - i[0]).total_seconds()) < 15 and i[2] is not None and (i[3] == '60' or i[3] == '60hz'):
count += 1
fft_cnt += i[2]
if(count == 0):
return float(-1)
print("{},{},{}".format(base, count, float(fft_cnt)/float(count)))
if(return_count):
return float(count)
else:
return float(float(fft_cnt)/float(count))
udfFilterTransition = udf(filterOutage, FloatType())
joined_df = joined_df.withColumn("avg_50_fft_cnt", udfFilterTransition("agg_time","phone_imei","fft_list",lit(50),lit(False)))
joined_df = joined_df.withColumn("50_fft_cnt", udfFilterTransition("agg_time","phone_imei","fft_list",lit(50),lit(True)))
joined_df = joined_df.withColumn("avg_60_fft_cnt", udfFilterTransition("agg_time","phone_imei","fft_list",lit(60),lit(False)))
joined_df = joined_df.withColumn("60_fft_cnt", udfFilterTransition("agg_time","phone_imei","fft_list",lit(60),lit(True)))
#now remove all of the phone records
joined_df = joined_df.filter("phones_detecting > -1")
joined_df = joined_df.select("agg_time","outage_cluster_size","phones_detecting","avg_50_fft_cnt","50_fft_cnt","avg_60_fft_cnt","60_fft_cnt")
joined_df.show(1000)
#now join the two datasets together so that we have a
#pw_df = pw_df.select("time","outage_duration","outage_cluster_size")
#pw_df = pw_df.withColumn("outage_events",lit(1))
#pw_df = pw_df.groupBy(month("time"),"outage_cluster_size").sum().orderBy(month("time"),"outage_cluster_size")
#pw_df = pw_df.select("month(time)","outage_cluster_size","sum(outage_duration)","sum(outage_events)")
#pw_df = pw_df.withColumn("num_outages",pw_df["sum(outage_events)"]/pw_df["outage_cluster_size"])
#pw_df.show(500)
#pw_cp = pw_df
#
##now filter out all single outages
#pw_df = pw_df.filter("outage_cluster_size > 1")
#pw_df = pw_df.select("month(time)","sum(outage_duration)","sum(outage_events)","num_outages")
#pw_df = pw_df.groupBy("month(time)").sum().orderBy("month(time)")
#pw_df = pw_df.show(200)
#
#pw_cp.repartition(1).write.format("com.databricks.spark.csv").option("header", "true").save("monthly_outages_aggregate_cluster_size_time_corrected").groupBy(month("time")).sum().orderBy(month("time"))
| 2.390625 | 2 |
tests/anomaly/test_default.py | cnll0075/Merlion | 2,215 | 12793976 | #
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from abc import ABC
import logging
import os
from os.path import abspath, dirname, join
import sys
import unittest
import torch
import random
import numpy as np
import pandas as pd
from merlion.models.defaults import DefaultDetector, DefaultDetectorConfig
from merlion.plot import plot_anoms_plotly
from merlion.post_process.threshold import AggregateAlarms
from merlion.utils import TimeSeries
from ts_datasets.anomaly import *
rootdir = dirname(dirname(dirname(abspath(__file__))))
logger = logging.getLogger(__name__)
def set_random_seeds():
torch.manual_seed(12345)
random.seed(12345)
np.random.seed(12345)
def get_train_test_splits(df: pd.DataFrame, metadata: pd.DataFrame, n: int) -> (pd.DataFrame, pd.DataFrame, np.ndarray):
train_df = df[metadata.trainval]
test_df = df[~metadata.trainval]
test_labels = pd.DataFrame(metadata[~metadata.trainval].anomaly)
return train_df.tail(n), test_df.head(n), test_labels[:n]
class Mixin(ABC):
def test_score(self):
print("-" * 80)
logger.info("test_score\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
test_ts = TimeSeries.from_pd(self.test_df)
score_ts = self.model.get_anomaly_score(test_ts)
scores = score_ts.to_pd().values.flatten()
min_score, max_score, sum_score = min(scores), max(scores), sum(scores)
logger.info(f"scores look like: {scores[:10]}")
logger.info(f"min score = {min_score}")
logger.info(f"max score = {max_score}")
logger.info(f"sum score = {sum_score}")
def test_save_load(self):
print("-" * 80)
logger.info("test_save_load\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
path = join(rootdir, "tmp", "default", "anom", "multi" if multi else "uni")
self.model.save(dirname=path)
loaded_model = DefaultDetector.load(dirname=path)
test_ts = TimeSeries.from_pd(self.test_df)
scores = self.model.get_anomaly_score(test_ts)
scores_np = scores.to_pd().values.flatten()
loaded_model_scores = loaded_model.get_anomaly_score(test_ts)
loaded_model_scores = loaded_model_scores.to_pd().values.flatten()
self.assertEqual(len(scores_np), len(loaded_model_scores))
alarms = self.model.post_rule(scores)
loaded_model_alarms = loaded_model.post_rule(scores)
self.assertSequenceEqual(list(alarms), list(loaded_model_alarms))
def test_plot(self):
try:
import plotly
print("-" * 80)
logger.info("test_plot\n" + "-" * 80 + "\n")
self.run_init()
logger.info("Training model...\n")
train_ts = TimeSeries.from_pd(self.train_df)
self.model.train(train_ts)
multi = train_ts.dim > 1
savedir = join(rootdir, "tmp", "default", "anom")
os.makedirs(savedir, exist_ok=True)
path = join(savedir, ("multi" if multi else "uni") + ".png")
test_ts = TimeSeries.from_pd(self.test_df)
fig = self.model.plot_anomaly_plotly(
time_series=test_ts, time_series_prev=train_ts, plot_time_series_prev=True
)
plot_anoms_plotly(fig, TimeSeries.from_pd(self.test_labels))
try:
import kaleido
fig.write_image(path, engine="kaleido")
except ImportError:
logger.info("kaleido not installed, not trying to save image")
except ImportError:
logger.info("plotly not installed, skipping test case")
class TestUnivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(
DefaultDetectorConfig(granularity="1h", threshold=AggregateAlarms(alm_threshold=1.5))
)
# Time series with anomalies in both train split and test split
df = pd.read_csv(join(rootdir, "data", "synthetic_anomaly", "horizontal_spike_anomaly.csv"))
df.timestamp = pd.to_datetime(df.timestamp, unit="s")
df = df.set_index("timestamp")
# Get training & testing splits
self.train_df = df.iloc[: -len(df) // 2, :1]
self.test_df = df.iloc[-len(df) // 2 :, :1]
self.test_labels = df.iloc[-len(df) // 2 :, -1:]
class TestMultivariate(unittest.TestCase, Mixin):
def run_init(self):
set_random_seeds()
self.model = DefaultDetector(DefaultDetectorConfig(threshold=AggregateAlarms(alm_threshold=2)))
self.dataset = MSL(rootdir=join(rootdir, "data", "smap"))
df, metadata = self.dataset[0]
self.train_df, self.test_df, self.test_labels = get_train_test_splits(df, metadata, 2000)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.INFO
)
unittest.main()
| 1.976563 | 2 |
models.py | chawlaj100/P1_Facial_Keypoints | 1 | 12793977 | ## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# Image size = 224*224 ->
self.conv1 = nn.Conv2d(1, 32, 5) # (224 - 5)/1 + 1 = 220 -> (32, 220, 220)
self.act = nn.ReLU()
self.pool = nn.MaxPool2d(2, 2) # 46 -> (32, 110, 110)
self.drop1 = nn.Dropout(p=0.2)
self.conv2 = nn.Conv2d(32, 64, 4) # (110 - 4)/1 + 1 = 43 -> (64, 107, 107)
self.drop2 = nn.Dropout(p=0.2) # after pooling -> (64, 53, 53)
self.conv3 = nn.Conv2d(64, 128, 3) # (53 - 3)/1 + 1 = 19 -> (128, 51, 51)
self.drop3 = nn.Dropout(p=0.2) # after pooling -> (128, 25, 25)
self.dense1 = nn.Linear(80000,1000) # 128*25*25 = 80000
self.drop4 = nn.Dropout(p=0.2)
self.dense2 = nn.Linear(1000,500)
self.drop5 = nn.Dropout(p=0.2)
self.dense3 = nn.Linear(500,136)
def forward(self, x):
## TODO: Define the feedforward behavior of this model
## x is the input image and, as an example, here you may choose to include a pool/conv step:
x = self.drop1(self.pool(self.act(self.conv1(x))))
x = self.drop2(self.pool(self.act(self.conv2(x))))
x = self.drop3(self.pool(self.act(self.conv3(x))))
x = x.view(x.size(0), -1)
x = self.drop4(self.act(self.dense1(x)))
x = self.drop5(self.act(self.dense2(x)))
out = self.dense3(x)
return out
| 3.65625 | 4 |
scripts/python/remoteAccessScripts/microThoughts.py | jeremiahmarks/dangerzone | 1 | 12793978 | <gh_stars>1-10
#intent: create a script that will create an HTML page and post it to my website
#via python and html. The argument it will accept is the text for the newest post.
# server.org/SERVERDIRECTORY/yyyyMonDayhhmmss.html
SERVER='ftp.yourhost.org'
USERNAME='yourFTPusername'
PASSWORD='<PASSWORD>'
SERVERDIRECTORY='theDirectoryOnTheServer' #this currently must be created already, but
# '/' should be acceptable as well.
def getContent():
import argparse
parser=argparse.ArgumentParser(description="Retrieves the content and returns it to the main running process.")
parser.add_argument('--c',dest='c', help="This is where the content would go")
args=parser.parse_args()
return args.c
def getTime():
import time
currentTime=time.localtime()
currentTime=str(currentTime.tm_year)+'_'+str(currentTime.tm_mon)+'_'+str(currentTime.tm_mday)+'_'+str(currentTime.tm_hour)+'_'+str(currentTime.tm_min)+'_'+str(currentTime.tm_sec)
return currentTime
def htmlMaker(content, time):
from string import Template
htmlTemplate=Template('<html>\n<head>\n<title>$thetime</title>\n</head>\n<body>\n$thecontent\n</body>\n</html>')
htmlTemplate=htmlTemplate.substitute(thetime=time, thecontent=content)
htmlFile=file(time+'.html', 'w')
htmlFile.write(htmlTemplate)
htmlFile.close()
return htmlFile
def uploadPage(newFile):
from ftplib import FTP
ftp=FTP(SERVER)
ftp.login(USERNAME, PASSWORD)
ftp.cwd(SERVERDIRECTORY)
newFile=open(newFile.name, 'rb')
ftp.storbinary('STOR '+newFile.name, newFile)
newFile.close()
ftp.close()
print "it should be live at "+SERVER+'/'+SERVERDIRECTORY+'/'+newFile.name
if __name__ == '__main__':
content=getContent()
time=getTime()
newFile=htmlMaker(content, time)
uploadPage(newFile)
| 3.46875 | 3 |
tests/test_api/test_FCNet.py | levi131/PaddleScience | 0 | 12793979 | <reponame>levi131/PaddleScience<filename>tests/test_api/test_FCNet.py<gh_stars>0
"""
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import numpy as np
import paddlescience as psci
import pytest
import paddle
from apibase import APIBase
from apibase import randtool
np.random.seed(22)
paddle.seed(22)
def cal_FCNet(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
dtype='float64',
activation='tanh'):
"""
calculate FCNet api
"""
net = psci.network.FCNet(
num_ins=num_ins,
num_outs=num_outs,
num_layers=num_layers,
hidden_size=hidden_size,
dtype=dtype,
activation=activation)
for i in range(num_layers):
net.weights[i] = paddle.ones_like(net.weights[i])
res = net.nn_func(ins)
return res
def cal_with_np(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
"""
calculate with numpy
"""
w = []
for i in range(num_layers):
if i == 0:
lsize = num_ins
rsize = hidden_size
elif i == (num_layers - 1):
lsize = hidden_size
rsize = num_outs
else:
lsize = hidden_size
rsize = hidden_size
w.append(np.ones((lsize, rsize)))
u = ins
for i in range(num_layers - 1):
u = np.matmul(u, w[i])
if activation == 'tanh':
u = np.tanh(u)
elif activation == 'sigmoid':
u = 1 / (1 + np.exp(-u))
u = np.matmul(u, w[-1])
return u
class TestFCNet(APIBase):
"""
test flatten
"""
def hook(self):
"""
implement
"""
self.types = [np.float64]
# self.debug = True
# enable check grad
self.static = False
obj = TestFCNet(cal_FCNet)
@pytest.mark.api_network_FCNet
def test_FCNet0():
"""
default
"""
xy_data = np.array([[0.1, 0.5]])
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet1():
"""
xy shape (9, 2)
"""
xy_data = randtool("float", 0, 10, (9, 2))
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet2():
"""
xy shape (9, 3)
"""
xy_data = randtool("float", 0, 1, (9, 3))
u = cal_with_np(xy_data, 3, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=3,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet3():
"""
xy shape (9, 4)
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet4():
"""
xy shape (9, 4)
num_outs: 2
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 2, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=2,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet5():
"""
xy shape (9, 4)
num_outs: 3
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet6():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 2, 20)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=2,
hidden_size=20)
@pytest.mark.api_network_FCNet
def test_FCNet7():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 5, 20)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20)
@pytest.mark.api_network_FCNet
def test_FCNet8():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
activation='sigmoid'
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 5, 20, activation='sigmoid')
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20)
| 2.234375 | 2 |
bitey/cpu/addressing_mode_factory.py | jgerrish/bitey | 0 | 12793980 | <reponame>jgerrish/bitey
from dataclasses import dataclass
from typing import ClassVar
from bitey.cpu.addressing_mode import (
AddressingMode,
AbsoluteAddressingMode,
AbsoluteIndirectAddressingMode,
AbsoluteXAddressingMode,
AbsoluteYAddressingMode,
AccumulatorAddressingMode,
ImmediateAddressingMode,
ImpliedAddressingMode,
IndirectXAddressingMode,
IndirectYAddressingMode,
RelativeAddressingMode,
ZeroPageAddressingMode,
ZeroPageXAddressingMode,
ZeroPageYAddressingMode,
)
@dataclass
class AddressingModeFactory:
addressing_mode_map: ClassVar[dict[str, AddressingMode]] = {
"absolute": AbsoluteAddressingMode,
"absolute_indirect": AbsoluteIndirectAddressingMode,
"absolute_x": AbsoluteXAddressingMode,
"absolute_y": AbsoluteYAddressingMode,
"accumulator": AccumulatorAddressingMode,
"immediate": ImmediateAddressingMode,
"implied": ImpliedAddressingMode,
"indirect_x": IndirectXAddressingMode,
"indirect_y": IndirectYAddressingMode,
"relative": RelativeAddressingMode,
"zeropage": ZeroPageAddressingMode,
"zeropage_x": ZeroPageXAddressingMode,
"zeropage_y": ZeroPageYAddressingMode,
}
def build(addressing_mode):
"Build an AddressingMode instance from a string"
return AddressingModeFactory.get_mode_from_str(addressing_mode)()
def get_mode_from_str(addressing_mode):
"Given an addressing mode string, return the addressing mode class"
m = AddressingModeFactory.addressing_mode_map[addressing_mode]
return m
| 2.921875 | 3 |
libs/simulate_robot.py | yuk6heo/GIS-RAmap | 33 | 12793981 | <reponame>yuk6heo/GIS-RAmap
from __future__ import absolute_import, division
from davisinteractive import logging
from davisinteractive.metrics import batched_jaccard
from davisinteractive.utils.operations import bezier_curve
from davisinteractive.robot.interactive_robot import InteractiveScribblesRobot
from davisinteractive.evaluation.service import EvaluationService
import time
import numpy as np
ROBOT_DEFAULT_PARAMETERS = {
'kernel_size': .2,
'max_kernel_radius': 16,
'min_nb_nodes': 4,
'nb_points': 1000
}
class Interactrobot(InteractiveScribblesRobot):
def __init__(self,
kernel_size=.2,
max_kernel_radius=16,
min_nb_nodes=4,
nb_points=1000):
""" Robot constructor
"""
super(Interactrobot, self).__init__()
if kernel_size >= 1. or kernel_size < 0:
raise ValueError('kernel_size must be a value between [0, 1).')
self.kernel_size = kernel_size
self.max_kernel_radius = max_kernel_radius
self.min_nb_nodes = min_nb_nodes
self.nb_points = nb_points
def interact_singleimg(self,
pred_mask,
gt_mask,
nb_objects=None,):
""" Interaction of the Scribble robot given a prediction.
Given the sequence and a mask prediction, the robot will return a
scribble in the region that fails the most.
# Arguments
sequence: String. Name of the sequence to interact with.
pred_masks: Numpy Array. Array with the prediction masks. It must
be an integer array with shape (H x W), one-hot vector for multi object
gt_masks: Numpy Array. Array with the ground truth of the sequence.
It must have the same data type and shape as `pred_masks`, one-hot vector for multi object
nb_objects: Integer. Number of objects in the ground truth mask. If
`None` the value will be infered from `y_true`. Setting this
value will speed up the computation.
frame: Integer. Frame to generate the scribble. If not given, the
worst frame given by the jaccard will be used.
# Returns
dict: Return a scribble (default representation).
"""
robot_start = time.time()
predictions = np.asarray(pred_mask, dtype=np.int)
annotations = np.asarray(gt_mask, dtype=np.int)
if nb_objects is None:
obj_ids = np.unique(annotations)
obj_ids = obj_ids[(obj_ids > 0) & (obj_ids < 255)]
nb_objects = len(obj_ids)
obj_ids = [i for i in range(nb_objects + 1)]
# Infer height and width of the sequence
h, w = annotations.shape
img_shape = np.asarray([w, h], dtype=np.float)
pred, gt = predictions, annotations
scribbles = []
for obj_id in obj_ids:
logging.verbose(
'Creating scribbles from error mask at object_id={}'.format(
obj_id), 2)
start_time = time.time()
error_mask = (gt == obj_id) & (pred != obj_id)
if error_mask.sum() == 0:
logging.info(
'Error mask of object ID {} is empty. Skip object ID.'.
format(obj_id))
continue
# Generate scribbles
skel_mask = self._generate_scribble_mask(error_mask)
skel_time = time.time() - start_time
logging.verbose(
'Time to compute the skeleton mask: {:.3f} ms'.format(
skel_time * 1000), 2)
if skel_mask.sum() == 0:
continue
G, P = self._mask2graph(skel_mask)
mask2graph_time = time.time() - start_time - skel_time
logging.verbose(
'Time to transform the skeleton mask into a graph: ' +
'{:.3f} ms'.format(mask2graph_time * 1000), 2)
t_start = time.time()
S = self._acyclics_subgraphs(G)
t = (time.time() - t_start) * 1000
logging.verbose(
'Time to split into connected components subgraphs ' +
'and remove the cycles: {:.3f} ms'.format(t), 2)
t_start = time.time()
longest_paths_idx = [self._longest_path_in_tree(s) for s in S]
longest_paths = [P[idx] for idx in longest_paths_idx]
t = (time.time() - t_start) * 1000
logging.verbose(
'Time to compute the longest path on the trees: {:.3f} ms'.
format(t), 2)
t_start = time.time()
scribbles_paths = [
bezier_curve(p, self.nb_points) for p in longest_paths
]
t = (time.time() - t_start) * 1000
logging.verbose(
'Time to compute the bezier curves: {:.3f} ms'.format(t), 2)
end_time = time.time()
logging.verbose(
'Generating the scribble for object id {} '.format(obj_id) +
'took {:.3f} ms'.format((end_time - start_time) * 1000), 2)
# Generate scribbles data file
for p in scribbles_paths:
p /= img_shape
path_data = {
'path': p.tolist(),
'object_id': int(obj_id),
'start_time': start_time,
'end_time': end_time
}
scribbles.append(path_data)
scribbles_data = {'scribbles': scribbles,}
t = time.time() - robot_start
logging.info(('The robot took {:.3f} s to generate all the '
'scribbles for {} objects.').format(
t, nb_objects))
return scribbles_data
| 2.09375 | 2 |
src/nti/app/pyramid_zope/traversal.py | NextThought/nti.app.pyramid_zope | 1 | 12793982 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Support for resource tree traversal.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from pyramid import traversal
from pyramid.compat import is_nonstr_iter
from pyramid.compat import decode_path_info
from pyramid.exceptions import URLDecodeError
from pyramid.httpexceptions import HTTPNotFound
from pyramid.interfaces import VH_ROOT_KEY
from pyramid.interfaces import ITraverser
from zope import interface
from zope.component import queryMultiAdapter
from zope.event import notify
from zope.location.interfaces import LocationError
from zope.traversing import api as ztraversing
from zope.traversing.interfaces import ITraversable
from zope.traversing.interfaces import BeforeTraverseEvent
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from zope.traversing.namespace import resource as _zresource
lineage = traversal.lineage
find_interface = traversal.find_interface
empty = traversal.empty
split_path_info = traversal.split_path_info
logger = __import__('logging').getLogger(__name__)
__all__ = [
'ZopeResourceTreeTraverser',
'resource',
]
def _notify_before_traverse_event(ob, request):
"""
Notifies a BeforeTraverseEvent, but safely: if the
handlers themselves raise a location error, turn that into
a HTTP 404 exception.
Because handlers are deliberately doing this, we stop
traversal and abort rather than try to return an information
dictionary and find a view and context, etc. This is limiting, but
safe.
"""
try:
notify(BeforeTraverseEvent(ob, request))
except LocationError:
# this is often a setup or programmer error
logger.debug("LocationError from traverse subscribers", exc_info=True)
raise HTTPNotFound("Traversal failed")
@interface.implementer(ITraverser)
class ZopeResourceTreeTraverser(traversal.ResourceTreeTraverser):
"""
A :class:`pyramid.interfaces.ITraverser` based on pyramid's
default traverser, but modified to use the
:mod:`zope.traversing.api` machinery instead of (only) dictionary
lookups. This provides is with the flexibility of the
:obj:`zope.traversing.interfaces.ITraversable` adapter pattern,
plus the support of namespace lookups
(:func:`zope.traversing.namespace.nsParse` and
:func:`zope.traversing.namespace.namespaceLookup`).
As this object traverses, it fires :obj:`~.IBeforeTraverseEvent`
events. If you either load the configuration from
:mod:`zope.app.publication` or manually enable the
:obj:`zope.site.site.threadSiteSubscriber <zope.site.site>` to
subscribe to this event, then any Zope site managers found along
the way will be made the current site.
"""
def __init__(self, root):
traversal.ResourceTreeTraverser.__init__(self, root)
def __call__(self, request): # pylint:disable=too-many-locals,too-many-branches,too-many-statements
"""
See :meth:`pyramid.interfaces.ITraversar.__call__`.
"""
# JAM: Unfortunately, the superclass implementation is entirely monolithic
# and we so we cannot reuse any part of it. Instead,
# we copy-and-paste it. Unless otherwise noted, comments below are
# original.
# JAM: Note the abundance of no covers. These are for features we are
# not currently using and the code is lifted directly from pyramid.
environ = request.environ
if request.matchdict is not None:
matchdict = request.matchdict
path = matchdict.get('traverse', '/') or '/'
if is_nonstr_iter(path):
# this is a *traverse stararg (not a {traverse})
# routing has already decoded these elements, so we just
# need to join them
path = '/'.join(path) or '/'
subpath = matchdict.get('subpath', ())
if not is_nonstr_iter(subpath): # pragma: no cover
# this is not a *subpath stararg (just a {subpath})
# routing has already decoded this string, so we just need
# to split it
subpath = split_path_info(subpath)
else: # pragma: no cover
# this request did not match a route
subpath = ()
try:
# empty if mounted under a path in mod_wsgi, for example
path = decode_path_info(environ['PATH_INFO'] or '/')
except KeyError:
path = '/'
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end,
e.reason)
if VH_ROOT_KEY in environ: # pragma: no cover
# HTTP_X_VHM_ROOT
vroot_path = decode_path_info(environ[VH_ROOT_KEY])
vroot_tuple = split_path_info(vroot_path)
# both will (must) be unicode or asciistr
vpath = vroot_path + path
vroot_idx = len(vroot_tuple) - 1
else:
vroot_tuple = ()
vpath = path
vroot_idx = -1
root = self.root
ob = vroot = root
if vpath == '/': # invariant: vpath must not be empty
# prevent a call to traversal_path if we know it's going
# to return the empty tuple
vpath_tuple = ()
else:
i = 0
view_selector = self.VIEW_SELECTOR
# A list so that remaining_path can be modified
vpath_tuple = list(split_path_info(vpath))
for segment in vpath_tuple:
# JAM: Fire traversal events, mainly so sites get installed. See
# zope.publisher.base.
_notify_before_traverse_event(ob, request)
# JAM: Notice that checking for '@@' is special cased, and
# doesn't go through the normal namespace lookup as it would in
# plain zope traversal. (XXX: Why not?)
if segment.startswith(view_selector): # pragma: no cover
return {'context': ob,
'view_name': segment[2:],
'subpath': vpath_tuple[i + 1:],
'traversed': vpath_tuple[:vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root}
try:
# JAM: This is where we differ. instead of using __getitem__,
# we use the traversing machinery.
# The zope app would use IPublishTraverser, which
# would install security proxies along the way. We probably don't need to
# do that? TODO:
# NOTE: By passing the request here, we require all traversers
# (including the namespace traversers) to be registered as multi-adapters.
# None of the default namespaces are. See our
# configure.zcml for what is.
# JAM: Damn stupid implementation of traversePathElement ignores
# the request argument to find a traversable /except/ when a namespace is found.
# therefore, we explicitly query for the multi adapter ourself in the non-namespace case
# (In the namespace case, we let traversing handle it, because it needs a named adapter
# after parsing)
traversable = None
if segment and segment[0] not in '+@' \
and not ITraversable.providedBy(ob):
try:
# Use the installed component registry
# instead of the request registry (which
# is the global component registry if
# pyramid was configured that way, or a
# standalone registry) in case the act of
# traversing has changed the site manager;
# zope.site.site.threadSiteSubscriber will
# do this for each BeforeTraverseEvent
# that's fired (though that's not
# registered by default).
traversable = queryMultiAdapter((ob, request),
ITraversable)
except TypeError:
# Some things are registered for "*" (DefaultTraversable)
# which means they get called here. If they can't take
# two arguments, then we bail. Sucks.
pass
remaining_path = vpath_tuple[i + 1:]
next_ob = ztraversing.traversePathElement(ob,
segment,
remaining_path,
traversable=traversable,
request=request)
if remaining_path != vpath_tuple[i + 1:]:
# Is this if check necessary? It would be faster to
# always assign
vpath_tuple[i + 1:] = remaining_path
except LocationError:
# LocationError is a type of KeyError. The DefaultTraversable turns
# plain KeyError and TypeErrors into LocationError.
return {'context': ob,
'view_name': segment,
'subpath': vpath_tuple[i + 1:],
'traversed': vpath_tuple[:vroot_idx + i + 1],
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root}
if i == vroot_idx: # pragma: no cover
vroot = next_ob
ob = next_ob
i += 1
# JAM: Also fire before traversal for the actual context item, since we
# won't actually traverse into it. Be sure not to fire multiple times
# for this (E.g., the root). This logic is complicated by the
# multi-returns above.
_notify_before_traverse_event(ob, request)
return {'context': ob,
'view_name': empty,
'subpath': subpath,
'traversed': vpath_tuple,
'virtual_root': vroot,
'virtual_root_path': vroot_tuple,
'root': root}
class resource(_zresource):
"""
Handles resource lookup in a way compatible with :mod:`zope.browserresource`.
This package registers resources as named adapters from :class:`.IDefaultBrowserLayer`
to Interface. We connect the two by making the pyramid request implement
the right thing.
"""
def __init__(self, context, request):
request = IBrowserRequest(request)
if not IDefaultBrowserLayer.providedBy(request):
interface.alsoProvides(request, IDefaultBrowserLayer) # We lie
super(resource, self).__init__(context, request)
| 2.109375 | 2 |
crsbot_source/consts.py | Qman11010101/chunithmSelectorBot | 0 | 12793983 | import json
import os
from os import environ as env
from distutils.util import strtobool
if os.path.isfile("setting.json"):
with open("setting.json", "r", encoding="UTF-8_sig") as s:
setting = json.load(s)
else:
setting = {
"token": {
"discord": env["discord_token"],
"chunirec": env["chunirec_token"]
},
"logging": {
"logging": strtobool(env["logging"]),
"loglevel_stdio": env["loglevel_stdio"],
"loglevel_file": env["loglevel_file"],
"log_filename": env["log_filename"]
},
"misc": {
"channel_id": int(env["channel_id"]),
"timezone": env["timezone"],
"api_lifetime": int(env["api_lifetime"]),
"max_musics": int(env["max_musics"]),
"command_prefix": env["command_prefix"]
}
}
# URL
URL_chunirec = "https://reiwa.f5.si/chunirec_all.json"
URL_ONGEKI = "https://ongeki.sega.jp/assets/json/music/music.json"
URL_MAIMAI = "https://maimai.sega.jp/data/DXsongs.json"
URL_WACCA = "https://reiwa.f5.si/wacca_all.json"
URL_ARCAEA = "https://reiwa.f5.si/arcaea_all.json"
URL_PHIGROS = "https://reiwa.f5.si/phigros_all.json"
# トークン
CHUNIREC_TOKEN = setting["token"]["chunirec"]
DISCORD_TOKEN = setting["token"]["discord"]
# API関係
API_LIFETIME = int(setting["misc"]["api_lifetime"])
# logger関係
tz = setting["misc"]["timezone"]
is_logging = setting["logging"]["logging"]
loglevel_stdio = setting["logging"]["loglevel_stdio"]
loglevel_file = setting["logging"]["loglevel_file"]
log_filename = setting["logging"]["log_filename"]
# その他
MAX_MUSICS = setting["misc"]["max_musics"]
CMDPREF = setting["misc"]["command_prefix"]
APP_VERSION = "3.0"
CHANNEL_NAME = "選曲bot"
# ヘルプメッセージ
DECLARATION_NAME = f"**CHUNITHM Random Selector bot v{APP_VERSION}**"
HIGHLOW = f"""※`(:high/low)`がついているパラメータは、後ろに『:high』もしくは『:low』を付け足すことで『以上』『以下』を表すことができます。
`:up/:down`や`:big/:small`でも可能です。"""
HELPMES_CHUNITHM = f"""
{DECLARATION_NAME}
【コマンド文字列】
`{CMDPREF}random [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [ノーツ数(:high/low)] [BPM(:high/low)] [難易度]`
`{CMDPREF}search [レベル(:high/low)] [ジャンル] [アーティスト] [ノーツ数(:high/low)] [BPM(:high/low)] [難易度]`
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式、もしくは『12.6』『13.7』のような譜面定数形式で入力してください。
**ジャンル**
> 楽曲が属するジャンルを指定します。
> 『ORIGINAL』『POPS&ANIME』『niconico』『東方Project』『VARIETY』『イロドリミドリ』『ゲキマイ』から1つ選んで入力してください。
**アーティスト**
> 楽曲のアーティスト名を指定します。
> アーティスト名を入力してください。
> 大文字・小文字および全角・半角を考慮しない部分一致で検索されます。
**ノーツ数**
> 楽曲のノーツ数を指定します。
> 半角数字で入力してください。
**BPM**
> 楽曲のBPMを指定します。
> 半角数字で入力してください。
**難易度**
> 楽曲の難易度を指定します。EXPERTのみもしくはMASTERのみの検索をする場合に使用します。
> 指定する場合、『exp』もしくは『mas』と指定してください。
> 指定されないか、不正な値を指定した場合は自動的にEXPERTとMASTERの両方から検索します。
> レベルもしくはノーツ数が指定されたときのみ機能します。
【コマンド例】
`{CMDPREF}random`: 全楽曲の中からランダムに3曲選びます。
`{CMDPREF}random 5 13+:up`: レベル13+以上の楽曲の中からランダムに5曲選びます。
`{CMDPREF}random - 13 - - - - exp`: レベル13のEXPERTの楽曲をランダムに3曲選びます。
`{CMDPREF}search none 東方Project none 1000:low`: 東方Projectの楽曲の中からノーツ数が1000以下の楽曲を検索します。
`{CMDPREF}search - - - - 300:high`: 全楽曲の中からBPM300以上の楽曲を検索します。
【注意点】
- ジャンルは1つのみ指定可能です。
- WORLD'S ENDには対応していません。
- 一部の値が未登録になっている場合があります。
他、以下の楽曲の検索機能があります。
- オンゲキ: `{CMDPREF}help_ongeki`
- WACCA: `{CMDPREF}help_wacca`
"""
HELPMES_ONGEKI = f"""
{DECLARATION_NAME}
**オンゲキ選曲機能**
【コマンド文字列】
`{CMDPREF}random_ongeki [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}rgeki`でも可)
`{CMDPREF}search_ongeki [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}sgeki`でも可)
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式で入力してください。
> 譜面定数は使えません。
**ジャンル**
> 楽曲が属するジャンルを指定します。
> 『オンゲキ』『POPS&ANIME』『niconico』『東方Project』『VARIETY』『チュウマイ』から1つ選んで入力してください。
**アーティスト**
> 楽曲のアーティスト名を指定します。
> アーティスト名を入力してください。
> 大文字・小文字および全角・半角を考慮しない部分一致で検索されます。
**難易度**
> 楽曲の難易度を指定します。EXPERTのみもしくはMASTERのみの検索をする場合に使用します。
> 指定する場合、『exp』もしくは『mas』と指定してください。
> 指定されないか、不正な値を指定した場合は自動的にEXPERTとMASTERの両方から検索します。
> レベルが指定されたときのみ機能します。
【コマンド例】
`{CMDPREF}random_ongeki`: 全楽曲の中からランダムに3曲選びます。
`{CMDPREF}random_ongeki 5 13+:up`: レベル13+以上の楽曲の中からランダムに5曲選びます。
`{CMDPREF}random_ongeki - 13 - - - - exp`: レベル13のEXPERTの楽曲をランダムに3曲選びます。
`{CMDPREF}search_ongeki 14 東方Project`: 東方Projectの楽曲の中からレベル14の曲を検索します。
【注意点】
- ジャンルは1つのみ指定可能です。
- LUNATICおよびボーナストラックには対応していません。
"""
# 書きかけ
HELPMES_MAIMAI = f"""
{DECLARATION_NAME}
**maimaiでらっくす選曲機能**
【コマンド文字列】
`{CMDPREF}random_maimai [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}rmai`でも可)
`{CMDPREF}search_maimai [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}smai`でも可)
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式で入力してください。
> 譜面定数は使えません。
**ジャンル**
> 楽曲が属するジャンルを指定します。
"""
HELPMES_WACCA = f"""
{DECLARATION_NAME}
**WACCA選曲機能**
【コマンド文字列】
`{CMDPREF}random_wacca [曲数] [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}rwacca`でも可)
`{CMDPREF}search_wacca [レベル(:high/low)] [ジャンル] [アーティスト] [難易度]` (`{CMDPREF}swacca`でも可)
{HIGHLOW}
【パラメータ】
指定しないパラメータは、`-`もしくは`none`と入力してください。
**曲数**
> 表示する曲数を指定します。最大{MAX_MUSICS}曲まで表示できます。
> それ以上の数字を入力した場合、ランダム選曲コマンドにおいては{MAX_MUSICS}曲として扱われ、検索コマンドではエラー扱いになります。
> 指定されなかった場合、3曲として扱われます。
> 半角数字で入力してください。
**レベル**
> 楽曲のレベルを指定します。
> 『10』『13+』のようなレベル表記形式で入力してください。
> 譜面定数は使えません。
**ジャンル**
> 楽曲が属するジャンルを指定します。
> 『アニメ/POP』『ボカロ』『東方アレンジ』『2.5次元』『バラエティ』『オリジナル』『TANO\\*C』『TANO\\*C(オリジナル)』から1つ選んで入力してください。
**アーティスト**
> 楽曲のアーティスト名を指定します。
> アーティスト名を入力してください。
> 大文字・小文字および全角・半角を考慮しない部分一致で検索されます。
**難易度**
> 楽曲の難易度を指定します。EXPERTのみもしくはINFERNOのみの検索をする場合に使用します。
> 指定する場合、『exp』もしくは『inf』と指定してください。
> 指定されないか、不正な値を指定した場合は自動的にEXPERTとINFERNOの両方から検索します。
> レベルが指定されたときのみ機能します。
【注意点】
- ジャンルは1つのみ指定可能です。
【不具合】
- 『オリジナル』を指定すると『TANO\\*C(オリジナル)』も同時に検索されてしまいます。
"""
| 2.15625 | 2 |
src/models/hmc_vae.py | ipeis/HH-VAEM | 2 | 12793984 | <reponame>ipeis/HH-VAEM
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2022 by <NAME>, UC3M. +
# All rights reserved. This file is part of the HH-VAEM, and is released under +
# the "MIT License Agreement". Please see the LICENSE file that should have +
# been included as part of this package. +
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from src.models.base import *
from src.models.hmc import *
# ============= HMCVAE ============= #
class HMCVAE(BaseVAE):
"""
Implements a Hamiltonian VAE (HMC-VAE) as described in https://arxiv.org/abs/2202.04599
"""
def __init__(self,
dataset: str, dim_x: int, dim_y: int, latent_dim = 10, arch='base', dim_h=256,
likelihood_x = 'gaussian', likelihood_y = 'gaussian', variance=0.1, imbalanced_y = False,
categories_y = 1, prediction_metric='rmse',
batch_size=128, lr=1e-3, samples_MC = 1, data_path='../data/', split_idx=0,
L=5, T=10, chains=1, chains_sksd=30, sksd=1, pre_steps=2e3, lr_pre=1e-3,
lr_encoder=1e-3, lr_decoder=1e-3, lr_predictor=1e-3, lr_hmc=1e-3, lr_scale = 1e-2,
update_s_each=10
):
"""
HMCVAE Initialization
Args:
dataset (str): name of the dataset (boston, mnist, ...)
dim_x (int): input data dimension
dim_y (int): target data dimension
latent_dim (int, optional): dimension of the latent space. Defaults to 10.
arch (str, optional): name of the architecture for encoder/decoder from the 'archs' file. Defaults to 'base'.
dim_h (int, optional): dimension of the hidden vectors. Defaults to 256.
likelihood_x (str, optional): input data likelihood type. Defaults to 'gaussian'.
likelihood_y (str, optional): target data likelihood type. Defaults to 'gaussian'.
variance (float, optional): fixed variance for Gaussian likelihoods. Defaults to 0.1.
imbalanced_y (bool, optional): True for compensating imbalanced classification. Defaults to False.
categories_y (int, optional): number of categories when the target is categorical. Defaults to 1.
prediction_metric (str, optional): name of the prediction metric for validation ('rmse', 'accuracy'). Defaults to 'rmse'.
batch_size (int, optional): batch size. Defaults to 128.
lr (float, optional): learning rate for the parameter optimization. Defaults to 1e-3.
samples_MC (int, optional): number of MC samples for computing the ELBO. Defaults to 1.
data_path (str, optional): path to load/save the data. Defaults to '../data/'.
split_idx (int, optional): idx of the training split. Defaults to 0.
L (int, optional): number of Leapfrog steps. Defaults to 5.
T (int, optional): length of the HMC chains. Defaults to 10.
chains (int, optional): number of parallel HMC chains. Defaults to 1.
chains_sksd (int, optional): number of parallel HMC chains for computing the SKSD. Defaults to 30.
sksd (int, optional): learn a scale factor for q(eps|zy) using the SKSD regularizer (1) or not (0). Defaults to 1.
pre_steps (float, optional): number of standard VI training steps (before using HMC). Defaults to 18e3.
lr_pre (float, optional): learning reate for all the parameters during the VI training stage. Defaults to 1e-3.
lr_encoder (float, optional): Learning rate for the encoder parameters. Defaults to 1e-3.
lr_decoder (float, optional): Learning rate for the decoder (p(x|z1)). Defaults to 1e-3.
lr_predictor (float, optional): Learning rate for the predictor. Defaults to 1e-3.
lr_hmc (float, optional): Learning rate for the HMC hyperparameters (matrix of step sizes). Defaults to 1e-3.
lr_scale (_type_, optional): Learning rate for the scale (inflation) factor Defaults to 1e-2.
update_s_each (int, optional): Interval of steps for optimizing the scale factor. Defaults to 10.
"""
super(HMCVAE, self).__init__(dataset=dataset, dim_x=dim_x, dim_y=dim_y,
latent_dim = latent_dim, arch=arch, dim_h=dim_h, likelihood_x = likelihood_x, likelihood_y = likelihood_y,
variance=variance, imbalanced_y=imbalanced_y,
categories_y=categories_y, prediction_metric=prediction_metric, batch_size=batch_size, lr=lr,
samples_MC = samples_MC, data_path=data_path, split_idx=split_idx)
self.HMC = HMC(dim=latent_dim, L=L, T=T, chains=chains, chains_sksd=chains_sksd, logp=None)
self.automatic_optimization=False
self.L = L
self.T = T
self.chains = chains
self.chains_sksd = chains_sksd
self.sksd = sksd
self.pre_steps = pre_steps
self.lr_pre = lr_pre
self.lr_encoder = lr_encoder
self.lr_decoder = lr_decoder
self.lr_predictor = lr_predictor
self.lr_hmc = lr_hmc
self.lr_scale = lr_scale
self.update_s_each = update_s_each
self.hmc=True
self.save_hyperparameters('L', 'T', 'chains', 'sksd', 'pre_steps',
'lr_pre', 'lr_encoder', 'lr_decoder', 'lr_predictor', 'lr_hmc',
'lr_scale', 'update_s_each')
self.step_idx=0 # training step index
# ============= Modified base functions ============= #
def forward(self, batch: tuple, hmc=True, samples=1) -> tuple:
"""
Forward data through the model. For the pretraining stage, use the ELBO. For the rest, use HMC
Args:
batch (tuple): contains (data, observed_data, target, observed_target)
hmc (bool): sample posterior using HMC (True). Defaults to True
samples (int): number of MC samples for computing the ELBO
Returns:
If hmc=False, returns:
loss_VI, rec_x, rec_y, kl
If hmc=True, returns:
loss_VI, loss_HMC, loss_SKSD, rec_x, rec_y, kl
"""
if hmc==True:
# Activate only encoder
activate(self.encoder)
deactivate(self.decoder)
deactivate(self.predictor)
self.HMC.log_eps.requires_grad = False
self.HMC.log_inflation.requires_grad = False
# Get data
x, observed_x, y, observed_y = batch
xn = self.normalize_x(x)
xt, yt, xy, observed = self.preprocess_batch(batch)
# xt is the preprocessed input (xt=x if no preprocessing)
# observed is observed_x OR observed_y (for not using kl if no observed data)
mu_z, logvar_z = self.encoder(xy)
z = self.sample_z(mu_z, logvar_z, samples=samples, hmc=False)
theta_x = self.decoder(z)
x_hat = self.build_x_hat(xn, observed_x, theta_x)
zx = torch.cat([z,x_hat],dim=-1)
rec_x = self.decoder.logp(xt, observed_x, z=z, theta=theta_x).sum(-1)
rec_y = self.predictor.logp(yt, observed_y, z=zx).sum(-1)
kls = self.encoder.regularizer(mu_z, logvar_z, observed)
elbo = rec_x + rec_y - kls.sum(0).unsqueeze(-1)
elbo = elbo[elbo!=0].mean()
rec_x = rec_x[rec_x!=0].mean()
rec_y = rec_y[rec_y!=0].mean()
kl_mean = torch.zeros(len(kls)).to(self.device)
for l, kl in enumerate(kls):
kl_mean[l]= kl[kl!=0].mean()
loss_3 = -elbo
if hmc==False: # returns elbo
return loss_3, rec_x, rec_y, kl_mean
else: # returns elbo, logp and sksd
# Activate decoder, predictor and hmc
activate(self.decoder)
activate(self.predictor)
self.HMC.log_eps.requires_grad = True
deactivate(self.encoder)
self.HMC.log_inflation.requires_grad = False
# Encoder again for not sharing gradients
mu_z, logvar_z = self.encoder(xy)
zT = self.sample_z(mu_z, logvar_z, samples=samples)
loss_1 = -self.HMC.logp(zT)
loss_1 = loss_1[loss_1!=0].mean()
if self.sksd==1:
# Deactivate everything except scale
self.HMC.log_inflation.requires_grad = True
deactivate(self.encoder)
deactivate(self.decoder)
deactivate(self.predictor)
self.HMC.log_eps.requires_grad = False
loss_2 = self.HMC.evaluate_sksd(mu_z, torch.exp(logvar_z))
else:
loss_2 = None
return loss_3, loss_1, loss_2
def training_step(self, batch: tuple, batch_idx: int, logging: bool=True):
"""
Perform a traning step following https://arxiv.org/abs/2202.04599
- For the first pre_steps, optimize parameters by maximizing the ELBO
- For the rest, optimize encoder using ELBO, and the rest using HMC objective and SKSD
Args:
batch (tuple): contains (data, observed_data, target, observed_target)
batch_idx (int): batch index from the training set
logging (bool): log metrics into Tensorboard (True). Default True
"""
(opt_vae, opt_decoder, opt_predictor, opt_encoder, opt_hmc, opt_scale) = self.optimizers(use_pl_optimizer=True)
if self.step_idx < self.pre_steps:
self.hmc=False
loss_3, rec_x, rec_y, kls = self.forward(batch, hmc=False, samples=self.samples_MC)
opt_vae.zero_grad()
self.manual_backward(loss_3)
opt_vae.step()
self.log('ELBO', -loss_3, on_step=False, on_epoch=True, prog_bar=True, logger=True)
if logging:
self.log('-rec_x', -rec_x, on_step=False, on_epoch=True, prog_bar=False, logger=True)
self.log('-rec_y', -rec_y, on_step=False, on_epoch=True, prog_bar=False, logger=True)
for l, kl in enumerate(kls):
self.log('kl_{:d}'.format(l), kl, on_step=False, on_epoch=True, prog_bar=False, logger=True)
else:
self.hmc=True
loss_3, loss_1, loss_2 = self.forward(batch, samples=self.chains)
##### Optimization
# Optimize psi (encoder)
activate(self.encoder)
deactivate(self.decoder)
deactivate(self.predictor)
self.HMC.log_eps.requires_grad = False
self.HMC.log_inflation.requires_grad = False
opt_encoder.zero_grad()
opt_decoder.zero_grad()
opt_predictor.zero_grad()
opt_hmc.zero_grad()
opt_scale.zero_grad()
self.manual_backward(loss_3)
opt_encoder.step()
# Optimize theta_x, theta_y and phi (decoders and HMC)
activate(self.decoder)
activate(self.predictor)
self.HMC.log_eps.requires_grad = True
deactivate(self.encoder)
self.HMC.log_inflation.requires_grad = False
opt_encoder.zero_grad()
opt_decoder.zero_grad()
opt_predictor.zero_grad()
opt_hmc.zero_grad()
opt_scale.zero_grad()
self.manual_backward(loss_1)#, [opt_decoder, opt_predictor, opt_hmc])
opt_decoder.step()
opt_predictor.step()
opt_hmc.step()
if self.sksd and self.step_idx % self.update_s_each == 0:
self.HMC.log_inflation.requires_grad = True
deactivate(self.encoder)
deactivate(self.decoder)
deactivate(self.predictor)
self.HMC.log_eps.requires_grad = False
opt_encoder.zero_grad()
opt_decoder.zero_grad()
opt_predictor.zero_grad()
opt_hmc.zero_grad()
opt_scale.zero_grad()
self.manual_backward(loss_2)#, opt_scale)
opt_scale.step()
scale = torch.exp(self.HMC.log_inflation)
self.log('scale', scale, on_step=False, on_epoch=True, prog_bar=True, logger=True)
if logging:
self.log('SKSD', loss_2, on_step=False, on_epoch=True, prog_bar=False, logger=True)
self.log('HMC_objective', -loss_1, on_step=False, on_epoch=True, prog_bar=True, logger=True)
self.step_idx += 1
def preprocess_batch(self, batch: tuple):
"""
Preprocessing operations for the batch (overrides the base class function) for defining the HMC objective p(epsilon)(x, y)
Args:
batch (tuple): contains (data, observed_data, target, observed_target)
Returns:
tuple: preprocessed batch, contains (data, observed_data, target, observed_target)
"""
batch = [b.to(self.device) for b in batch]
x, observed_x, y, observed_y = batch
x = x.view(-1, self.dim_x)
# Normalize the data
xn = self.normalize_x(x)
xo = xn * observed_x
x_tilde = torch.cat([xo, observed_x], axis=1)
y = y.view(-1, self.dim_y)
observed_y = observed_y.view(-1, self.dim_y)
# Normalize the target
yn = self.normalize_y(y)
yon = yn * observed_y
y_tilde = torch.cat([yon, observed_y], axis=1)
xy = torch.cat([x_tilde, y_tilde], axis=1)
observed = torch.logical_or(observed_x.sum(-1, keepdim=True)>0, observed_y.sum(-1, keepdim=True)>0)
# Define the HMC objective
self.HMC.logp = self.logp_func(xo, observed_x, yon, observed_y)
return xn, yn, xy, observed
def sample_z(self, mu: torch.Tensor, logvar: torch.Tensor, samples=1, hmc=True) -> torch.Tensor:
"""
Draw latent samples from a given approx posterior parameterized by mu and logvar
Args:
mu (torch.Tensor): tensor with the means (batch_size, latent_dim)
logvar (torch.Tensor): tensor with the log variances (batch_size, latent_dim)
samples (int, optional): number of samples. Defaults to 1.
hmc (bool, optional): draw hmc samples or Gaussian samples from the proposal. Defaults to True.
Returns:
torch.Tensor: latent samples
"""
if hmc==False or self.validation and self.global_step < self.pre_steps:
# Repeat samples_MC times for Monte Carlo
mu = mu.repeat(samples, 1, 1).transpose(0, 1)
logvar = logvar.repeat(samples, 1, 1).transpose(0, 1)
# Reparametrization
z = reparameterize(mu, torch.exp(logvar))
else: # sample from the true posterior
z, _ = self.HMC.generate_samples_HMC(mu, torch.exp(logvar), chains=samples)
return z
# ============= Modified PL functions ============= #
def configure_optimizers(self):
opt_vae = torch.optim.Adam(list(self.decoder.parameters()) + list(self.predictor.parameters())
+ list(self.encoder.parameters()), lr=self.lr_pre, weight_decay=0.01)
opt_decoder = torch.optim.Adam(list(self.decoder.parameters()), lr=self.lr_decoder, weight_decay=0.01)
opt_predictor = torch.optim.Adam(list(self.predictor.parameters()), lr=self.lr_predictor, weight_decay=0.01)
opt_encoder = torch.optim.Adam(list(self.encoder.parameters()), lr=self.lr_encoder, weight_decay=0.01)
opt_hmc = torch.optim.Adam([self.HMC.log_eps], lr=self.lr_hmc)
opt_scale = torch.optim.Adam([self.HMC.log_inflation], lr=self.lr_scale)
return [opt_vae, opt_decoder, opt_predictor, opt_encoder, opt_hmc, opt_scale]
| 1.59375 | 2 |
analyzePenalties.py | tgadf/football | 0 | 12793985 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 18:29:44 2019
@author: tgadfort
"""
from debug import debugclass
from playTypes import noplay
from playYards import playyards
#from copy import deepcopy, copy
# create logger
import logging
module_logger = logging.getLogger('log.{0}'.format(__name__))
############################################################################################################
## Drive Class
############################################################################################################
class analyzepenalties:
def __init__(self):
self.logger = logging.getLogger('log.{0}.{1}'.format(__name__, self.__class__))
self.ind = 2*" "
self.sep = "======================================================"
self.dc = debugclass()
self.py = playyards()
def isPenaltyAdditive(self, gameData):
self.logger.debug("\n{0}".format(2*self.sep))
self.logger.debug("{0}Analyzing Penalty Additiveness".format(self.ind))
for idr,driveData in enumerate(gameData):
drivePlays = driveData.plays
for ipl,drivePlay in enumerate(drivePlays):
play = drivePlay.play
if play.penalty.isPenalty is False:
continue
penaltyyards = play.penalty.yards
playyards = play.yds.yards
nextYards = drivePlay.nextDiffYards
if all([penaltyyards, playyards, nextYards]):
if penaltyyards + playyards == nextYards:
continue
elif penaltyyards == playyards and penaltyyards == nextYards:
play.yds.yards = 0
continue
else:
self.logger.debug("{0}Penalty Analysis: Penalty Yards=={1}\tPlay Yards=={2}\tNext Yards=={3}\tText=={4}".format(self.ind, penaltyyards, playyards, nextYards, play.text))
else:
self.logger.debug("{0}Penalty Analysis: Penalty Yards=={1}\tPlay Yards=={2}\tNext Yards=={3}\tText=={4}".format(self.ind, penaltyyards, playyards, nextYards, play.text))
self.logger.debug("{0}Analyzing Penalty Additiveness -> Done".format(self.ind))
def penalties(self, gameData):
self.logger.debug("\n{0}".format(2*self.sep))
self.logger.debug("{0}Analyzing Penalties".format(self.ind))
for idr,driveData in enumerate(gameData):
drivePlays = driveData.plays
for ipl,drivePlay in enumerate(drivePlays):
play = drivePlay.play
if play.penalty.isPenalty is False:
continue
penaltyyards = self.py.findPenaltyYards(play.text)
nextYards = drivePlay.nextDiffYards
if isinstance(play, noplay):
if play.yds.yards == 0 and penaltyyards is not None:
play.yds.yards = penaltyyards
elif play.yds.yards == 0 and penaltyyards is None:
play.yds.yards = nextYards
else:
if play.yds.yards is None:
play.yds.yards = nextYards
else:
print("Not sure...")
if nextYards == 0 and play.yds.yards == 0:
penaltyyards = 0
if sum([x in play.text for x in ["Personal Foul", "Unsportsmanlike Conduct", "Face Mask"]]) > 0:
if nextYards == 15:
penaltyyards = 15
play.yds.yards = 0
elif nextYards == 15:
penaltyyards = -15
play.yds.yards = 0
if nextYards == penaltyyards:
if play.yds.yards == 0:
play.yds.yards = nextYards
play.penalty.yards = penaltyyards
if nextYards == play.yds.yards and nextYards == penaltyyards:
continue
self.logger.debug("{0}Penalty Analysis: Penalty=={1}\tPlay=={2}\tNext=={3}\tYards=={4}\tPYards=={5}\tText=={6}".format(self.ind, play.penalty.isPenalty, play.name, nextYards, play.yds.yards, penaltyyards, play.text))
self.logger.debug("{0}Analyzing Penalties -> Done".format(self.ind))
return gameData | 2.421875 | 2 |
battle.py | AvenirX/Meicraft | 0 | 12793986 | import random
import time
from Character import *
from Item import create_item
def battle(fighters, max_turn=10):
"""
Battle process start->loot
:param max_turn: int turns for 1 battle, default 10
:param fighters: list of fighter
:return: None
"""
# Enter battle_process
print('Enter battle_process')
# Begin battle_process
# Init skills (for n)
for fighter in fighters:
fighter.init_battle()
fighter.report_status()
# Turns begin
turn = 1
# Init turns
# fighters_this_turn = list(fighters)
fighters_remain = len(fighters)
while turn <= max_turn and fighters_remain >= 2:
# Enter Turn #turn
print('\n#{t}'.format(t=turn))
# Begin Turn #turn
# Init turn
# Construct fighters participate in this turn
# & Init fighter turn paras
fighters_this_turn = []
for fighter in fighters:
fighter.init_turn()
if fighter.is_alive():
fighters_this_turn.append(fighter)
# toimpr magical nb, in right place ?
# if turn != 1:
# fighter.gain_score(1)
# Choose skill
for fighter in fighters_this_turn:
# NPC choose skill
if fighter.is_npc:
npc = fighter
target = ''
target_list = list(set(fighters_this_turn) - {npc})
key = random.choice(npc.get_available_skills())
skill = npc.BATTLESKILLBOOK[key]
# If it's an A skill, choose its target
if skill.phase_type == 'A':
target = random.choice(target_list)
# Player input skill
else:
player = fighter
target_list = list(set(fighters_this_turn) - {player})
target = '' # dummy?
target_name = '' # dummy?
key = '' # dummy?
while True:
input_raw = input('BoLoBoLo...{s}:'
.format(s=str(player.get_available_skills())))
input_args = input_raw.strip().split()
key = input_args[0].upper()
target_name = input_args[1] if len(input_args) > 1 else ''
if key in player.get_available_skills():
break
skill = player.BATTLESKILLBOOK[key]
# If it's an A skill, choose its target
if skill.phase_type == 'A':
# Auto choose target when only 1 enemy
if len(target_list) == 1:
target = target_list[0]
else:
while True:
for target_fighter in target_list:
if target_fighter.name == target_name:
target = target_fighter
if target:
break
target_name = input('target...')
# Cast skill = record move, create proj, deliver proj
skill.cast(target)
# Billing
for fighter in fighters:
# Start billing by order
for category in ['potion', 'arrow']:
for tag in ['fill', 'drain', 'heal', 'damage']:
prjs = fighter.incoming_projectiles[category][tag]
if prjs:
for prj in prjs:
prj.billing()
# All billed
# Check new death
if fighter.is_alive() and fighter.HP <= 0:
# fighter will die
# fighter did die
# go_die = set alive false, record turn, leave death message
fighter.go_die(turn)
fighters_remain -= 1
# killer gain score for killing
for prj in fighter.lethal_projectiles:
# toImpr move magical nb to global setting
prj.caster.killed_someone()
# Output turn info
# Moves
for fighter in fighters:
print('{hp}hp {mp}mp\t[{f}]|\t{m}'
.format(hp=fighter.HP, mp=fighter.MP,
f=fighter.name, m=str(fighter.last_move)))
time.sleep(0.2)
# Deaths
for fighter in fighters_this_turn:
if not fighter.is_alive():
print('{f} 卒'.format(f=fighter.name))
turn += 1
continue
# Exit turns
print('\nExit turns')
# Exit battle_process
# Battle result
score_board = sorted(fighters, key=lambda f: (f.score, f.died_turn), reverse=True)
for index, fighter in enumerate(score_board):
if fighter.is_alive():
status = '存活({hp}HP)'.format(hp=fighter.HP)
else:
killers = []
for pj in fighter.lethal_projectiles:
killer = '{owner}的{skill}'. \
format(owner=pj.caster.name, skill=pj.skill.alias)
killers.append(killer)
killers_msg = '&'.join(killers)
status = '卒(Turn{t}, {hp}HP, 被{killer}所杀)' \
.format(t=fighter.died_turn, hp=fighter.HP,
killer=killers_msg)
print('#{i}\t{f}\t\t*{score}*\t\t{status}'.format(i=index+1, f=fighter.name, score=fighter.score, status=status))
# distribute_loot(player, enemy, battle_result)
# toAdd loot system
# def distribute_loot(player, enemy, battle_result):
# """
# Distribute enemy loot to player according to battle result
# :param player: obj
# :param enemy: obj
# :param battle_result: string 'WIN'/'LOSE'/'TIE'
# :return: None
# """
# if battle_result == 'WIN':
# c_money = 1
# c_exp = 1
# loot_item_dict = enemy.loot['item_dict']
# elif battle_result == 'TIE':
# c_money = 0
# c_exp = 0.5
# loot_item_dict = {}
# elif battle_result == 'LOSE':
# c_money = 0
# c_exp = 0
# loot_item_dict = {}
#
# else:
# return
#
# player.add_money(enemy.loot['money'] * c_money)
# player.add_exp(enemy.loot['EXP'] * c_exp)
# for loot_item in loot_item_dict.keys():
# player.get_item(loot_item, loot_item_dict[loot_item])
# toImpr separated gameplay
if __name__ == "__main__":
glove1 = create_item('A2')
glove2 = create_item('A2')
# toAdd item database
# game_items = [glove1, glove2]
zcx = Fighter('zcx')
zcx.put_in_item(glove1)
zcx.equip_item(glove1)
iii = Fighter('i', is_npc=True)
iii.put_in_item(glove2)
iii.equip_item(glove2)
j = Fighter('j', is_npc=True)
k = Fighter('k', is_npc=True)
allFighters = [zcx, iii, j, k]
battle(allFighters)
# player1.report_wealth()
# print(player1.level)
| 3.453125 | 3 |
SA_SVM/pySVM.py | Bowenislandsong/Medical-Document-Classification- | 0 | 12793987 | <gh_stars>0
from sklearn import svm
from sklearn.datasets import load_files
import numpy as np
import scipy.io as spio
from scipy.sparse import csr_matrix
from scipy.special import gammaln
frommat = spio.loadmat('sbowen_woStop.mat', squeeze_me=True)
xTrain = frommat['X_train_woSTOP'] # Xtest Mtx
yTrain = frommat['X_test_woSTOP'] # Xtest Mtx
xTest = frommat['Y_train'] # Xtest Mtx
yTest = frommat['Y_test'] # Xtest Mtx
vocab = frommat['vocab'] # Xtest Mtx
print(len(vocab))
def RRNpreprocessing(M,tune,vocablen,special=False):
u, doc_indices = np.unique(M[:,0],return_inverse=True)
xProcessed = csr_matrix((M[:,2], (doc_indices,M[:,1])), shape=(len(doc_indices), vocablen))
print(xProcessed)
alpha = 2*gammaln(tune+1) - gammaln(2*tune+vocablen)
print(xProcessed.sum(axis=1))
rowsum = xProcessed.sum(axis=1)
WordProb = xProcessed.multiply(rowsum.power(-1))
# print(WordProb)
# if (special):
# for idx, val in enumerate(u):
# print(idx, val)
# xProcessed[idx,:] = np.random.choice(xProcessed[idx,:],tune,WordProb[idx,:])
# pass
return [xProcessed,alpha]
#tuning = [150,200]
itune = 150
print("tuning at :"+str(itune))
[xTrainProce,alpha] = RRNpreprocessing(xTrain,itune,len(vocab))
print("Alpha is: "+ str(alpha))
print("Xtrain is :"+str(xTrainProce))
| 2.25 | 2 |
trybs4.py | melvin0008/pythoncodestrial | 0 | 12793988 | from bs4 import BeautifulSoup
import xlsxwriter
workbook= xlsxwriter.Workbook("data.xlsx")
worksheet = workbook.add_worksheet()
f = open('rough.html',"r")
data=f.read()
soup=BeautifulSoup(data)
div = soup.find('div', {"class":'dataTables_scroll'})
table=div.find('table')
tbody=div.find('tbody')
rows=tbody.find_all('tr')
rowno = 0
for row in rows:
a=row.find_all('a')
td=row.find_all('td')
worksheet.write(rowno, 1, a[2].text)
worksheet.write(rowno, 2, td[3].text[td[3].text.find('P:'):])
worksheet.write(rowno, 3, a[3].text)
worksheet.write(rowno, 4, a[4].text)
worksheet.write(rowno, 5, a[3].text)
worksheet.write(rowno, 6, td[6].text)
rowno=rowno+1
workbook.close()
print "Done"
| 3.078125 | 3 |
plugins/option.py | loktacar/wallpapermaker | 1 | 12793989 | from plugin import Plugin
class Option(Plugin):
"""
Base class for option plugins
"""
# Default value, if default is None, option is required to be set to something else
default = None
# Option name
option = None
# Short command line option
cmd_short = None
# Command line argument name
cmd_argument = None
# Description, duhh
description = ''
@staticmethod
def parse(value):
return value
@classmethod
def folder_module(cls):
return cls.__module__.split('.')[-2]
@classmethod
def get_doc_line(cls):
s = ' '*4
if cls.cmd_short is not None:
s += '-%s ' % cls.cmd_short
if cls.option is not None:
folder_module = cls.folder_module()
if folder_module == 'options':
s += '--%s' % cls.option
else:
s += '--%s.%s' % (folder_module, cls.option)
if cls.cmd_argument is not None:
s += '=%s' % cls.cmd_argument
if len(s) < 30:
s += ' ' * (30 - len(s))
else:
s += '\n' + ' ' * 30
s += cls.description
return s
| 2.78125 | 3 |
lang/Python/longest-common-subsequence-1.py | ethansaxenian/RosettaDecode | 0 | 12793990 | <gh_stars>0
def lcs(xstr, ystr):
"""
lcs('thisisatest', 'testing123testing')
'tsitest'
"""
if not xstr or not ystr:
return ""
x, xs, y, ys = xstr[0], xstr[1:], ystr[0], ystr[1:]
if x == y:
return x + lcs(xs, ys)
else:
return max(lcs(xstr, ys), lcs(xs, ystr), key=len)
| 3.15625 | 3 |
docandcover.py | softwaresaved/docandcover | 0 | 12793991 | #input_var='https://github.com/softwaresaved/docandcover/'
input_var='https://github.com/Axelrod-Python/Axelrod'
import os
import shutil
from pygit2 import clone_repository
try:
shutil.rmtree('tmp-repo')
except:
pass
try:
repo = clone_repository(input_var,'tmp-repo')
except:
pass
from fileListGetter import fileListGetter
from functionDetailGetter import functionDetailGetter
#fileLists = fileListGetter('../snippets/tmp-repo/')
fileLists = fileListGetter('tmp-repo')
for i in functionDetailGetter(fileLists):
print i
#printFileLists(fileLists)
| 2.546875 | 3 |
galerias/migrations/0003_auto_20190806_1038.py | shiminasai/cantera | 0 | 12793992 | # Generated by Django 2.1.7 on 2019-08-06 16:38
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('galerias', '0002_auto_20190805_1410'),
]
operations = [
migrations.AddField(
model_name='galeriaimagenes',
name='aprobado',
field=models.BooleanField(default=True),
preserve_default=False,
),
migrations.AddField(
model_name='galeriavideos',
name='aprobado',
field=models.BooleanField(default=True),
preserve_default=False,
),
migrations.AddField(
model_name='galeriavideos',
name='descripcion',
field=ckeditor_uploader.fields.RichTextUploadingField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='galeriavideos',
name='portada',
field=sorl.thumbnail.fields.ImageField(blank=True, null=True, upload_to='galerias/', verbose_name='Imagen'),
),
migrations.AlterField(
model_name='galeriaimagenes',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Autor'),
),
migrations.AlterField(
model_name='galeriavideos',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Autor'),
),
]
| 1.703125 | 2 |
star_resty/payload/json.py | lazy-labs/star_resty | 3 | 12793993 | <gh_stars>1-10
import types
from typing import Mapping, Type, TypeVar, Union
import ujson
from marshmallow import EXCLUDE, Schema
from starlette.requests import Request
from star_resty.exceptions import DecodeError
from .base import SchemaParser, set_parser
__all__ = ('json_schema', 'json_payload', 'JsonParser')
P = TypeVar('P')
def json_schema(schema: Union[Schema, Type[Schema]], cls: P,
unknown: str = EXCLUDE) -> P:
return types.new_class('JsonInputParams', (cls,),
exec_body=set_parser(JsonParser.create(schema, unknown=unknown)))
def json_payload(schema: Union[Schema, Type[Schema]], unknown=EXCLUDE) -> Type[Mapping]:
return json_schema(schema, Mapping, unknown=unknown)
class JsonParser(SchemaParser):
__slots__ = ()
@property
def location(self):
return 'body'
@property
def media_type(self):
return 'application/json'
async def parse(self, request: Request):
body = await request.body()
if body is None:
data = {}
else:
try:
data = ujson.loads(body)
except (TypeError, ValueError) as e:
raise DecodeError('Invalid json body') from e
return self.schema.load(data, unknown=self.unknown)
| 2.140625 | 2 |
2-resources/_External-learning-resources/02-pyth/python-patterns-master/patterns/creational/prototype.py | eengineergz/Lambda | 0 | 12793994 | <reponame>eengineergz/Lambda
"""
*What is this pattern about?
This patterns aims to reduce the number of classes required by an
application. Instead of relying on subclasses it creates objects by
copying a prototypical instance at run-time.
This is useful as it makes it easier to derive new kinds of objects,
when instances of the class have only a few different combinations of
state, and when instantiation is expensive.
*What does this example do?
When the number of prototypes in an application can vary, it can be
useful to keep a Dispatcher (aka, Registry or Manager). This allows
clients to query the Dispatcher for a prototype before cloning a new
instance.
Below provides an example of such Dispatcher, which contains three
copies of the prototype: 'default', 'objecta' and 'objectb'.
*TL;DR
Creates new object instances by cloning prototype.
"""
from typing import Any, Dict
class Prototype:
def __init__(self, value: str = "default", **attrs: Any) -> None:
self.value = value
self.__dict__.update(attrs)
def clone(self, **attrs: Any) -> None:
"""Clone a prototype and update inner attributes dictionary"""
# Python in Practice, <NAME>field
# copy.deepcopy can be used instead of next line.
obj = self.__class__(**self.__dict__)
obj.__dict__.update(attrs)
return obj
class PrototypeDispatcher:
def __init__(self):
self._objects = {}
def get_objects(self) -> Dict[str, Prototype]:
"""Get all objects"""
return self._objects
def register_object(self, name: str, obj: Prototype) -> None:
"""Register an object"""
self._objects[name] = obj
def unregister_object(self, name: str) -> None:
"""Unregister an object"""
del self._objects[name]
def main() -> None:
"""
>>> dispatcher = PrototypeDispatcher()
>>> prototype = Prototype()
>>> d = prototype.clone()
>>> a = prototype.clone(value='a-value', category='a')
>>> b = a.clone(value='b-value', is_checked=True)
>>> dispatcher.register_object('objecta', a)
>>> dispatcher.register_object('objectb', b)
>>> dispatcher.register_object('default', d)
>>> [{n: p.value} for n, p in dispatcher.get_objects().items()]
[{'objecta': 'a-value'}, {'objectb': 'b-value'}, {'default': 'default'}]
>>> print(b.category, b.is_checked)
a True
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3.0625 | 3 |
compile.py | Brikwerk/cvpr-explorer | 0 | 12793995 | <reponame>Brikwerk/cvpr-explorer
import json
import time
import os
from urllib.parse import urljoin
import argparse
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('year', type=int,
help="""Specifies the CVPR year to compile into a library.""")
parser.add_argument('-d', '--delay', required=False, default=0.25,
help="""Specifies the delay between publications requests.""")
parser.add_argument('-u', '--useragent', required=False, default="CVPR-Explorer",
help="""Specifies the user-agent string for publication requests.""")
args = parser.parse_args()
def get_paged_papers(soup, base_url):
all_papers = False
page_links = []
for page in soup.findAll('dd'):
page_link = page.findAll("a")[0].get('href')
if "all" in page_link:
all_papers = page_link
break
else:
page_links.append(page_link)
if all_papers:
url = urljoin(base_url, all_papers)
html_text = requests.get(url).text
page_soup = BeautifulSoup(html_text, 'html.parser')
return page_soup.findAll('dt', {'class': 'ptitle'})
else:
paper_elms = []
for page_link in page_links:
url = urljoin(base_url, page_link)
html_text = requests.get(url).text
page_soup = BeautifulSoup(html_text, 'html.parser')
paper_elms = paper_elms + page_soup.findAll('dt', {'class': 'ptitle'})
time.sleep(args.delay)
return paper_elms
if __name__ == "__main__":
user_agent = "CVPR-Explorer"
headers = {
'User-Agent': user_agent
}
cvpr_base_url = "https://openaccess.thecvf.com"
cvpr_year = args.year
cvpr_url = f"{cvpr_base_url}/CVPR{cvpr_year}"
html_text = requests.get(cvpr_url).text
soup = BeautifulSoup(html_text, 'html.parser')
print(f"Getting the publication list for CVPR {args.year}")
if "Day 1: " in soup.select_one('dd').text:
paper_elms = get_paged_papers(soup, cvpr_base_url)
else:
paper_elms = soup.findAll('dt', {'class': 'ptitle'})
print(len(paper_elms), "publications found.")
print("Compiling library...")
papers = {}
for i in tqdm(range(len(paper_elms))):
paper_elm = paper_elms[i]
try:
paper_anchor = paper_elm.findAll('a')[0]
paper_info_link = urljoin(cvpr_base_url, paper_anchor.get('href'))
paper_title = paper_anchor.contents[0]
html_text = requests.get(paper_info_link).text
soup = BeautifulSoup(html_text, "html.parser")
paper_abstract = soup.find('div', {'id': 'abstract'}).contents[0]
paper_link = soup.findAll("a", string="pdf")[0].get('href')
paper_link = urljoin(cvpr_base_url, paper_link)
papers[i] = {
"paper_title": paper_title,
"paper_info_link": paper_info_link,
"paper_link": paper_link,
"paper_abstract": paper_abstract
}
time.sleep(args.delay)
except Exception as e:
print("\n\n--> ERROR <--")
print(e)
print("\n\n")
time.sleep(args.delay)
print("Writing library...")
if not os.path.isdir("./libraries"):
os.mkdir("./libraries")
with open(f"./libraries/cvpr{cvpr_year}.json", "w") as f:
f.write(json.dumps(papers, indent=4))
print("Done!")
| 2.90625 | 3 |
recursion/sumTriangleFromArrray.py | kushvr7/High-On-DSA | 76 | 12793996 | <gh_stars>10-100
def triangle(array, result):
result.insert(0,array)
array = [array[i-1]+array[i] for i in range(1, len(array))]
return triangle(array , result) if len(array)>0 else result
for i in triangle([1,2,3,4,5], []):
print(i)
'''
triangle(3,5,7,9)
-> triangle (8,12,16)
-> triangle (20 ,28)
-> triangle (48)
-> triangle () return
-> print(48)
-> print(20, 28)
->print(8,12,16)
->print(3,5,7,9)
->print(1,2,3,4,5)
''' | 3.65625 | 4 |
hal_fuzz/hal_fuzz/handlers/debug.py | diagprov/hal-fuzz | 117 | 12793997 | <gh_stars>100-1000
from unicorn.arm_const import *
def stop(uc):
print_context(uc)
input("...")
def print_context(uc):
print("==== State ====")
r0 = uc.reg_read(UC_ARM_REG_R0)
r1 = uc.reg_read(UC_ARM_REG_R1)
r2 = uc.reg_read(UC_ARM_REG_R2)
r3 = uc.reg_read(UC_ARM_REG_R3)
r4 = uc.reg_read(UC_ARM_REG_R4)
r5 = uc.reg_read(UC_ARM_REG_R5)
r7 = uc.reg_read(UC_ARM_REG_R7)
sp = uc.reg_read(UC_ARM_REG_SP)
pc = uc.reg_read(UC_ARM_REG_PC)
print("r0: 0x{:x}\nr1: 0x{:x}\nr2: 0x{:x}\nr3: 0x{:x}\nr4: 0x{:x}\nr5: 0x{:x}\nr7: 0x{:x}\npc: 0x{:x}\nsp: 0x{:x}".format(r0, r1, r2, r3, r4, r5, r7, pc, sp))
def breakpoint(uc):
import ipdb; ipdb.set_trace()
| 2.125 | 2 |
artellapipe/tools/welcome/widgets/frame.py | ArtellaPipe/artellapipe-tools-welcome | 0 | 12793998 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains frame widget implementation
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import Qt
from Qt.QtWidgets import QSizePolicy, QFrame
from Qt.QtGui import QPainter, QPainterPath
class WelcomeFrame(QFrame, object):
def __init__(self, pixmap, parent=None):
super(WelcomeFrame, self).__init__(parent)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setFrameShape(QFrame.NoFrame)
self.setFrameShadow(QFrame.Plain)
self._pixmap = pixmap
self.setStyleSheet('QFrame { border-radius: 10px; }')
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
def paintEvent(self, event):
"""
Override base QFrame paintEvent function
:param event: QPaintEvent
"""
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
path = QPainterPath()
path.addRoundedRect(0, 0, self.width(), self.height(), 10, 10)
painter.setClipPath(path)
painter.drawPixmap(0, 0, self.width(), self.height(), self._pixmap)
| 2.1875 | 2 |
tests/blackbox_tests/__init__.py | stevecotton/i18nspector | 1 | 12793999 | <gh_stars>1-10
# Copyright © 2012-2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import difflib
import inspect
import io
import multiprocessing as mp
import os
import re
import shlex
import signal
import subprocess as ipc
import sys
import traceback
import unittest
import nose
import nose.plugins
from .. import tools
here = os.path.dirname(__file__)
# ----------------------------------------
def this():
'''
Return function that called this function. (Hopefully!)
'''
return globals()[inspect.stack()[1][0].f_code.co_name]
# ----------------------------------------
_parse_etag = re.compile(r'([A-Z]): (([\w-]+).*)').match
def parse_etag(contents, path):
match = _parse_etag(contents)
if match is None:
return
t = ETag(match.group(1), path, match.group(2))
return t
def etags_from_tagstring(obj, path):
try:
docstring = obj.tagstring
except AttributeError:
return
for line in docstring.splitlines():
line = line.lstrip()
t = parse_etag(line, path)
if t is not None:
yield t
def tagstring(s):
def update(x):
x.tagstring = s
return x
return update
# ----------------------------------------
class ETag():
_ellipsis = '<...>'
_split = re.compile('({})'.format(re.escape(_ellipsis))).split
def __init__(self, code, path, rest):
self._s = s = '{code}: {path}: {rest}'.format(
code=code,
path=path,
rest=rest,
)
self.tag = rest.split(None, 1)[0]
regexp = ''.join(
'.*' if chunk == self._ellipsis else re.escape(chunk)
for chunk in self._split(s)
)
self._regexp = re.compile('^{}$'.format(regexp))
def __eq__(self, other):
if isinstance(other, str):
return self._regexp.match(other)
else:
return NotImplemented
def __str__(self):
return self._s
def __repr__(self):
return repr(self._s)
# ----------------------------------------
def _get_signal_names():
data = dict(
(name, getattr(signal, name))
for name in dir(signal)
if re.compile('^SIG[A-Z0-9]*$').match(name)
)
try:
if data['SIGABRT'] == data['SIGIOT']:
del data['SIGIOT']
except KeyError:
pass
try:
if data['SIGCHLD'] == data['SIGCLD']:
del data['SIGCLD']
except KeyError:
pass
for name, n in data.items():
yield n, name
_signal_names = dict(_get_signal_names())
def get_signal_name(n):
try:
return _signal_names[n]
except KeyError:
return str(n)
# ----------------------------------------
test_file_extensions = ('.mo', '.po', '.pot', '.pop')
# .pop is a special extension to trigger unknown-file-type
class Plugin(nose.plugins.Plugin):
name = 'po-plugin'
enabled = True
def options(self, parser, env):
pass
def wantFile(self, path):
if path.endswith(test_file_extensions):
if path.startswith(os.path.join(os.path.abspath(here), '')):
return True
def loadTestsFromFile(self, path):
if self.wantFile(path):
yield TestCase(path)
def wantFunction(self, func):
if getattr(func, 'redundant', False):
return False
class TestCase(unittest.TestCase):
def __init__(self, path):
super().__init__('_test')
self.path = os.path.relpath(path)
def _test(self):
_test_file(self.path, basedir=None)
def __str__(self):
return self.path
class SubprocessError(Exception):
pass
def queue_get(queue, process):
'''
Remove and return an item from the queue.
Block until the process terminates.
'''
while True:
try:
return queue.get(timeout=1)
# This semi-active waiting is ugly, but there doesn't seem be any
# obvious better way.
except mp.queues.Empty:
if process.exitcode is None:
continue
else:
raise
def run_i18nspector(options, path):
commandline = os.environ.get('I18NSPECTOR_COMMANDLINE')
if commandline is None:
# We cheat here a bit, because exec(3)ing is very expensive.
# Let's load the needed Python modules, and use multiprocessing to
# “emulate” the command execution.
import lib.cli # pylint: disable=import-outside-toplevel
assert lib.cli # make pyflakes happy
prog = os.path.join(here, os.pardir, os.pardir, 'i18nspector')
commandline = [sys.executable, prog]
queue = mp.Queue()
child = mp.Process(
target=_mp_run_i18nspector,
args=(prog, options, path, queue)
)
child.start()
[stdout, stderr] = (
s.splitlines()
for s in queue_get(queue, child)
)
child.join()
rc = child.exitcode
else:
commandline = shlex.split(commandline)
commandline += options
commandline += [path]
fixed_env = dict(os.environ, PYTHONIOENCODING='UTF-8')
with ipc.Popen(commandline, stdout=ipc.PIPE, stderr=ipc.PIPE, env=fixed_env) as child:
stdout, stderr = (
s.decode('UTF-8').splitlines()
for s in child.communicate()
)
rc = child.poll()
assert isinstance(rc, int)
if rc == 0:
return stdout
if rc < 0:
message = ['command was interrupted by signal {sig}'.format(sig=get_signal_name(-rc))] # pylint: disable=invalid-unary-operand-type
else:
message = ['command exited with status {rc}'.format(rc=rc)]
message += ['']
if stdout:
message += ['stdout:']
message += ['| ' + s for s in stdout] + ['']
else:
message += ['stdout: (empty)']
if stderr:
message += ['stderr:']
message += ['| ' + s for s in stderr]
else:
message += ['stderr: (empty)']
raise SubprocessError('\n'.join(message))
def _mp_run_i18nspector(prog, options, path, queue):
with open(prog, 'rt', encoding='UTF-8') as file:
code = file.read()
sys.argv = [prog] + list(options) + [path]
orig_stdout = sys.stdout
orig_stderr = sys.stderr
code = compile(code, prog, 'exec')
io_stdout = io.StringIO()
io_stderr = io.StringIO()
gvars = dict(
__file__=prog,
)
(sys.stdout, sys.stderr) = (io_stdout, io_stderr)
try:
try:
exec(code, gvars) # pylint: disable=exec-used
finally:
(sys.stdout, sys.stderr) = (orig_stdout, orig_stderr)
stdout = io_stdout.getvalue()
stderr = io_stderr.getvalue()
except SystemExit:
queue.put([stdout, stderr])
raise
except: # pylint: disable=bare-except
exctp, exc, tb = sys.exc_info()
stderr += ''.join(
traceback.format_exception(exctp, exc, tb)
)
queue.put([stdout, stderr])
sys.exit(1)
raise # hi, pydiatra!
else:
queue.put([stdout, stderr])
sys.exit(0)
def assert_emit_tags(path, etags, *, options=()):
etags = list(etags)
stdout = run_i18nspector(options, path)
expected_failure = os.path.basename(path).startswith('xfail-')
if stdout != etags:
if expected_failure:
raise nose.SkipTest('expected failure')
str_etags = [str(x) for x in etags]
message = ['Tags differ:', '']
diff = list(
difflib.unified_diff(str_etags, stdout, n=9999)
)
message += diff[3:]
raise AssertionError('\n'.join(message))
elif expected_failure:
raise AssertionError('unexpected success')
class TestFileSyntaxError(Exception):
pass
def _parse_test_header_file(file, path, *, comments_only):
etags = []
options = []
for n, line in enumerate(file):
orig_line = line
if comments_only:
if n == 0 and line.startswith('#!'):
continue
if line.startswith('# '):
line = line[2:]
else:
break
if line.startswith('--'):
options += shlex.split(line)
else:
etag = parse_etag(line, path)
if etag is None:
if comments_only:
break
else:
raise TestFileSyntaxError(orig_line)
etags += [etag]
return etags, options
def _parse_test_headers(path):
# <path>.tags:
try:
file = open(path + '.tags', encoding='UTF-8') # pylint: disable=consider-using-with
except FileNotFoundError:
pass
else:
with file:
return _parse_test_header_file(file, path, comments_only=False)
# <path>.gen:
try:
file = open(path + '.gen', encoding='UTF-8', errors='ignore') # pylint: disable=consider-using-with
except FileNotFoundError:
pass
else:
with file:
return _parse_test_header_file(file, path, comments_only=True)
# <path>:
with open(path, 'rt', encoding='UTF-8', errors='ignore') as file:
return _parse_test_header_file(file, path, comments_only=True)
def _test_file(path, basedir=here):
if basedir is not None:
path = os.path.relpath(os.path.join(basedir, path), start=os.getcwd())
options = []
etags, options = _parse_test_headers(path)
assert_emit_tags(path, etags, options=options)
def get_coverage_for_file(path):
etags, options = _parse_test_headers(path)
del options
return (t.tag for t in etags)
def get_coverage_for_function(fn):
for etag in etags_from_tagstring(fn, ''):
yield etag.tag
def _get_test_filenames():
for root, dirnames, filenames in os.walk(here):
del dirnames
for filename in filenames:
if not filename.endswith(test_file_extensions):
continue
yield os.path.join(root, filename)
def test_file():
for filename in _get_test_filenames():
path = os.path.relpath(filename, start=here)
yield _test_file, path
test_file.redundant = True # not needed if the plugin is enabled
@tagstring('''
E: os-error No such file or directory
''')
def test_os_error_no_such_file():
with tools.temporary_directory() as tmpdir:
path = os.path.join(tmpdir, 'nonexistent.po')
expected = etags_from_tagstring(this(), path)
assert_emit_tags(path, expected)
@tagstring('''
E: os-error Permission denied
''')
def test_os_error_permission_denied():
if os.getuid() == 0:
raise nose.SkipTest('this test must not be run as root')
with tools.temporary_directory() as tmpdir:
path = os.path.join(tmpdir, 'denied.po')
with open(path, 'wb'):
pass
os.chmod(path, 0)
expected = etags_from_tagstring(this(), path)
assert_emit_tags(path, expected)
# ----------------------------------------
def get_coverage():
coverage = set()
for filename in _get_test_filenames():
for tag in get_coverage_for_file(filename):
coverage.add(tag)
for objname, obj in globals().items():
if not objname.startswith('test_'):
continue
for tag in get_coverage_for_function(obj):
coverage.add(tag)
return coverage
# vim:ts=4 sts=4 sw=4 et
| 1.78125 | 2 |
minydra/console.py | pg2455/minydra | 0 | 12794000 | <gh_stars>0
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class MinyConsole:
def _end(self, key, *args):
return f"{key}{' '.join(args)}{bcolors.ENDC}"
def okblue(self, *args):
return self._end(bcolors.OKBLUE, *args)
def warn(self, *args):
return self._end(bcolors.WARNING, *args)
def okgreen(self, *args):
return self._end(bcolors.OKGREEN, *args)
def fail(self, *args):
return self._end(bcolors.FAIL, *args)
def bold(self, *args):
return self._end(bcolors.BOLD, *args)
def underline(self, *args):
return self._end(bcolors.UNDERLINE, *args)
| 2.515625 | 3 |
imperative/python/megengine/traced_module/_passes/fuse_pass.py | Olalaye/MegEngine | 1 | 12794001 | <reponame>Olalaye/MegEngine
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import operator
from collections import defaultdict
from typing import Any, Callable, List
from ... import functional as F
from ... import module as M
from ...logger import get_logger
from ...tensor import Parameter, Tensor
from ...utils.bn_fusion import fold_weight_bias
from ..expr import Expr, is_call_function
from ..utils import assign_attr, get_subattr
from .matcher import PatternMatcher
from .pass_base import BackwardPass, register_pass
from .pattern import ExprPattern, any_node, is_const, is_op, is_var
from .utils import get_const_value, register_obj
logger = get_logger(__name__)
@register_pass("FuseAddMul")
class FuseAddMul(BackwardPass):
"""Fold adjacent const add or mul binary operations.
For example, the following code
.. code-block::
x = x + 1
x = 2 + x
x = x * 4
x = x * 0.25
will be changed to
.. code-block::
x = x + 3
"""
name = "FuseAddMul"
required_pass = ["NormElemWise"]
run_once = False
def __init__(self,):
super().__init__()
def _make_pattern(op_0, op_1) -> ExprPattern:
x = is_var().check_users(False)
if op_0 not in [operator.add, operator.mul]:
op_0 = is_op(op_0)
if op_1 not in [operator.add, operator.mul]:
op_1 = is_op(op_1)
pattern = op_0(x, is_const()) | op_0(x, "*")
pattern = op_1(pattern, is_const()) | op_1(pattern, "*")
return pattern
self.pattern_dict = {}
for op, func in zip([operator.add, F.pow], [self.fold_add, self.fold_pow],):
self.pattern_dict[_make_pattern(op, op)] = func
for op_0 in [F.neg, operator.mul]:
for op_1 in [F.neg, operator.mul]:
self.pattern_dict[_make_pattern(op_0, op_1)] = self.fold_mul
def run_transform(self, expr: Expr):
matcher = PatternMatcher()
for pattern, func in self.pattern_dict.items():
res = matcher.match(pattern, expr)
if res:
break
if not res:
return expr
return func(expr)
def _fold_helper(self, expr: Expr, op_c: Callable, op_t: Callable):
const_0 = self.get_const_value(expr)
# todo: support more shape
if isinstance(const_0, Tensor) and const_0._tuple_shape not in [(1,), tuple()]:
return expr
const_1 = self.get_const_value(expr.inputs[0].expr)
if isinstance(const_1, Tensor) and const_1._tuple_shape not in [(1,), tuple()]:
return expr
inp_node = expr.inputs[0].expr.inputs[0]
const = op_c(const_0, const_1)
graph = expr.top_graph
if (const == 1 and op_t in [operator.pow, operator.mul]) or (
const == 0 and op_t in [operator.add]
):
graph.replace_node({expr.outputs[0]: inp_node})
graph.compile()
return expr
with expr.top_graph.insert_exprs():
out_node = op_t(inp_node, const)
graph.replace_node({expr.outputs[0]: out_node})
graph.compile()
return out_node.expr
def fold_add(self, expr: Expr):
return self._fold_helper(expr, operator.add, operator.add)
def fold_mul(self, expr):
return self._fold_helper(expr, operator.mul, operator.mul)
def fold_pow(self, expr):
return self._fold_helper(expr, operator.mul, F.pow)
def get_const_value(self, expr: Expr):
if is_call_function(expr, F.neg):
return -1
if len(expr.inputs) == 2:
value = get_const_value(expr.inputs[1].expr, None)
assert value is not None, " "
return value
value = expr.const_val[0][-1]
return value
@register_pass("FuseConvBn")
class FuseConvBn(BackwardPass):
r"""Fuse BN layers into conv2d."""
name = "FuseConvBn"
required_pass = ["AttrToConstant"]
run_once = True
def __init__(self):
super().__init__()
self.used_name = defaultdict(int)
def run_transform(self, expr: Expr):
conv_pat_0 = is_op(M.Conv2d)
conv_pat_1 = is_op(F.conv2d)
bn_pat_0 = is_op(M.BatchNorm2d)(conv_pat_0 | conv_pat_1)
bn_pat_1 = is_op(F.batch_norm)
# inp, running_mean, running_var, weight, bias
bn_inps = (
conv_pat_0 | conv_pat_1,
is_const(),
is_const(),
is_const(),
is_const(),
)
bn_pat = (
(bn_pat_1(*bn_inps[:3]))
| (bn_pat_1(*bn_inps[:4]))
| (bn_pat_1(*bn_inps))
| bn_pat_0
)
matcher = PatternMatcher()
if not matcher.match(bn_pat, expr):
return expr
matched_exprs = matcher.matched_exprs
if conv_pat_0 in matched_exprs:
return self.fold_convm_bn(matched_exprs[conv_pat_0], matched_exprs[bn_pat])
else:
return self.fold_convf_bn(matched_exprs[conv_pat_1], matched_exprs[bn_pat])
def fold_convm_bn(self, conv: Expr, bn: Expr):
mnode, inp_node = conv.inputs[:2]
self_node = mnode.expr.inputs[0]
attr_name = conv.inputs[0].expr.name
graph = conv.top_graph
if len(mnode.users) > 1:
self.used_name[mnode.qualname] += 1
attr_name = "{}_{}".format(attr_name, self.used_name[mnode.qualname])
logger.warning(
"{} is used {} times and its name will be reset to {}.{}".format(
mnode.qualname, len(mnode.users), graph.qualname, attr_name
)
)
conv_module = mnode.owner
weight, bias = conv_module.weight, conv_module.bias
mean, var, gamma, beta, eps = self.get_bn_params(bn)
weight, bias = fold_weight_bias(weight, bias, gamma, beta, mean, var, eps)
new_conv = M.Conv2d(
in_channels=conv_module.in_channels,
out_channels=conv_module.out_channels,
kernel_size=conv_module.kernel_size,
stride=conv_module.stride,
padding=conv_module.padding,
dilation=conv_module.dilation,
groups=conv_module.groups,
bias=conv_module.bias is not None,
conv_mode=conv_module.conv_mode,
compute_mode=conv_module.compute_mode,
name=conv_module.name,
)
new_conv.weight = Parameter(weight)
new_conv.bias = Parameter(bias)
new_conv.training = conv_module.training
assign_attr(new_conv, self_node.owner, attr_name)
with graph.insert_exprs(mnode.expr):
out_node = get_subattr(self_node, attr_name)(inp_node)
graph.replace_node({bn.outputs[0]: out_node})
graph.compile()
out_node.name = conv.outputs[0].name
return out_node.expr
def fold_convf_bn(self, conv: Expr, bn: Expr):
named_args = conv.named_args
weight = get_const_value(named_args["weight"], named_args["weight"])
bias = get_const_value(named_args["bias"], named_args["bias"])
mean, var, gamma, beta, eps = self.get_bn_params(bn)
weight, bias = fold_weight_bias(weight, bias, gamma, beta, mean, var, eps)
named_args["weight"] = weight
named_args["bias"] = bias
graph = conv.top_graph
with graph.insert_exprs():
out_node = F.conv2d(**named_args)
graph.replace_node({bn.outputs[0]: out_node})
graph.compile()
out_node.name = conv.outputs[0].name
return out_node.expr
def get_bn_params(self, bn: Expr):
if is_call_function(bn):
named_args = bn.named_args
mean = get_const_value(
named_args["running_mean"], named_args["running_mean"]
)
var = get_const_value(named_args["running_var"], named_args["running_var"])
gamma = get_const_value(named_args["weight"], named_args["weight"])
beta = get_const_value(named_args["bias"], named_args["bias"])
eps = named_args["eps"]
return mean, var, gamma, beta, eps
else:
bn_module = bn.inputs[0].owner
mean = bn_module.running_mean
var = bn_module.running_var
gamma = bn_module.weight
beta = bn_module.bias
eps = bn_module.eps
return mean, var, gamma, beta, eps
| 2.015625 | 2 |
treemodels.py | zgana/manatee | 0 | 12794002 | # treemodels.py
from __future__ import division
import gtk
from debug import *
import numpy as np
import matplotlib.pyplot as plt
class CountingActivitiesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for CountingActivity's in a Log."""
def __init__ (self, log):
gtk.GenericTreeModel.__init__ (self)
self.log = log
@property
def n_rows (self):
return len (self.log.counting_activities)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 2
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.log.counting_activities):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if len (self.log.counting_activities) == 0:
return None
activity = sorted (self.log.counting_activities)[row]
if col == 0:
return activity.name
elif col == 1:
return activity.unit
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class TimingActivitiesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for TimingActivity's in a Log."""
def __init__ (self, log):
gtk.GenericTreeModel.__init__ (self)
self.log = log
@property
def n_rows (self):
return len (self.log.timing_activities)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 1
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.log.timing_activities):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if len (self.log.timing_activities) == 0:
return None
activity = sorted (self.log.timing_activities)[row]
if col == 0:
return activity.name
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class CountingEntriesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for CountingEntry's in a Log."""
def __init__ (self, log, activity_name):
gtk.GenericTreeModel.__init__ (self)
self.log = log
self.activity_name = activity_name
@property
def entries (self):
return self.log.get_entries (self.activity_name)
@property
def n_rows (self):
return len (self.entries)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 3
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.entries):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if self.n_rows == 0:
return None
entry = self.entries[row]
if col == 0:
return str (entry.date)
elif col == 1:
return str (entry.n)
elif col == 2:
return str (entry.error)
elif col == 3:
return str (entry.note)
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class TimingEntriesModel (gtk.GenericTreeModel):
"""Gtk TreeModel for TimingEntry's in a Log."""
def __init__ (self, log, activity_name):
gtk.GenericTreeModel.__init__ (self)
self.log = log
self.activity_name = activity_name
@property
def entries (self):
return self.log.get_entries (self.activity_name)
@property
def n_rows (self):
return len (self.entries)
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 2
def on_get_column_type (self, index):
return str
def on_get_iter (self, path):
if len (self.entries):
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
def fmt (t):
return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}'.format (
t.year, t.month, t.day, t.hour, t.minute)
if self.n_rows == 0:
return None
entry = self.entries[row]
if col == 0:
return fmt (entry.start_time)
elif col == 1:
return fmt (entry.end_time)
elif col == 2:
return str (entry.note)
else:
return None
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
class ActivityDrawModel (gtk.GenericTreeModel):
"""Gtk TreeModel for drawing Activity's in a Log."""
def __init__ (self, activities):
gtk.GenericTreeModel.__init__ (self)
self.activities = sorted (activities)
self.checks = [
False for activity in self.activities]
n = len (self.activities)
##mpl_colors = [
## (0.0, 0.0, 1.0),
## (0.0, 0.5, 0.0),
## (1.0, 0.0, 0.0),
## (0.0, 0.75, 0.75),
## (0.75, 0.0, 0.75),
## (0.75, 0.75, 0.0),
## (0.0, 0.0, 0.0),
## (0.0, 0.0, 1.0) ]
##n_color = len (mpl_colors)
##self.colors = [
## gtk.gdk.Color (*mpl_colors[i % n_color]) for i in xrange (n)]
cm = plt.get_cmap ('rainbow')
self.colors = [
gtk.gdk.Color (*cm (i / n)[:3])
for i in xrange (n)]
self.color_tuples = [
cm (i / n)[:3]
for i in xrange (n)]
self.alphas = [
int (.8 * 65535) for activity in self.activities]
@property
def n_rows (self):
return len (self.activities)
# toggle
def toggle (self, path):
row = int (path)
self.checks[row] = not self.checks[row]
def toggle_all (self):
if np.sum (self.checks) == len (self.checks):
value = False
else:
value = True
for row in xrange (len (self.checks)):
self.checks[row] = value
# Implementation of gtk.GenericTreeModel
def on_get_flags (self):
return gtk.TREE_MODEL_LIST_ONLY
def on_get_n_columns (self):
return 3
def on_get_column_type (self, index):
if index == 0:
return bool
elif index == 1:
return str
elif index == 2:
return gtk.gdk.Pixbuf
def on_get_iter (self, path):
if self.n_rows:
return path[0]
def on_get_path (self, rowref):
return (rowref,)
def on_get_value (self, row, col):
if self.n_rows == 0:
return None
activity = sorted (self.activities)[row]
if col == 0:
return self.checks[row]
elif col == 1:
return activity.name
else:
pb = gtk.gdk.Pixbuf (
gtk.gdk.COLORSPACE_RGB, True, 8, 16, 16)
color = self.colors[row]
color_str = '{0:02x}{1:02x}{2:02x}{3:02x}'.format (
*map (int,
(color.red / 256, color.green / 256, color.blue / 256,
self.alphas[row] / 256)))
pb.fill (int (color_str, 16))
return pb
def on_iter_next (self, rowref):
if rowref == self.n_rows - 1 or self.n_rows == 0:
return None
else:
return rowref + 1
def on_iter_children (self, parent):
return 0 # TODO: is this right?
def on_iter_has_child (self, rowref):
return False
def on_iter_n_children (self, rowref):
if rowref:
return 0
else:
return self.n_rows
def on_iter_nth_child (self, parent, n):
if parent:
return None
elif n < self.n_rows:
return n
else:
return None
def on_iter_parent (self, child):
return None
| 2.609375 | 3 |
python/interpret-core/interpret/ext/glassbox/__init__.py | prateekiiest/interpret | 2,674 | 12794003 | <reponame>prateekiiest/interpret<gh_stars>1000+
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import sys
from interpret.ext.extension_utils import load_class_extensions
from interpret.ext.extension import GLASSBOX_EXTENSION_KEY, _is_valid_glassbox_explainer
load_class_extensions(
sys.modules[__name__], GLASSBOX_EXTENSION_KEY, _is_valid_glassbox_explainer
)
| 1.203125 | 1 |
rlo/scripts/azureml_ray.py | tomjaguarpaw/knossos-ksc | 31 | 12794004 | <reponame>tomjaguarpaw/knossos-ksc
"""
Quick start instructions:
1. Make sure you have access to the knossos Azure subscription.
> az login
> az account set -s Knossos
You'll need to have at least Contributor access to the resource group (knossosamlrg).
You can (not recommended) temporarily elevate yourself to Contributor using Azure
Privileged Identity Management (PIM), but you will need to have said level of access
_for_the_duration_of_the_run_ not just when it starts, or the run will fail to complete.
Alternatively (recommended), you can temporarily elevate yourself to Owner using PIM, and
give yourself permanent Contributor rights to the knossosaml resource group.
2. Install dependencies (better to do it in an isolated conda environment because it may clash)
```
pip install -r test/builds/ci-requirements.txt
```
3. Try to run `fewer_simplify_rules` scenario
```
python scripts/azureml_ray.py fewer_simplify_rules --num_workers=2
```
Note: any additional arguments will be passed to `ray_main.py`.
"""
import argparse
from datetime import datetime
import os
import subprocess
import sys
from tempfile import NamedTemporaryFile
from azureml.core import Workspace, Experiment, Environment
from azureml.core.authentication import AzureCliAuthentication
from azureml.pipeline.core import PipelineRun
# Azure ML Reinforcement Learning imports
from azureml.contrib.train.rl import ReinforcementLearningEstimator, Ray
from azureml.contrib.train.rl import WorkerConfiguration
from azureml_util import combined_source_directory
sys.path.append(os.path.join(os.path.dirname(__file__), "../src"))
from rlo import git_utils
def get_environment():
print("Getting docker password...")
password = (
subprocess.check_output(["bash", "./test/builds/docker_password.sh"])
.decode("ASCII")
.strip()
)
docker_tag = (
subprocess.check_output(["bash", "./test/builds/docker_tag.sh"])
.decode("ASCII")
.strip()
)
print("Docker tag", docker_tag)
env = Environment("my_ray_env")
# ws.register(env) - no, let's not
env.docker.base_image = "rlo_linux_base:" + docker_tag
env.docker.base_image_registry.address = "knossos.azurecr.io"
env.docker.base_image_registry.password = password
env.docker.base_image_registry.username = "00000000-0000-0000-0000-000000000000"
env.python.user_managed_dependencies = True
return env
def check_params(scenario, params):
# import locally so that CI can run without dependencies
from rlo.flags import make_config_for_scenario
from ray_main import ray_run_arguments
if not os.path.isfile(scenario):
scenario = os.path.join("src", "scenarios", f"{scenario}.json")
make_config_for_scenario(scenario, ray_run_arguments, cmdline=params)
def check_best_episodes(run):
""" Checks best_episodes.kso file does not contain NaNs
Regression test for #1438
"""
head_run = next(rn for rn in run.get_children() if rn.id.endswith("_head"))
kso_file = next(
f for f in head_run.get_file_names() if f.endswith("best_episodes.kso")
)
with NamedTemporaryFile(mode="r") as f:
head_run.download_file(kso_file, output_file_path=f.name)
content = f.read()
print(f"Downloaded {kso_file} (length={len(content)})")
if "NaN" in content:
lines = [line for line in content.split("\n") if "NaN" in line]
raise ValueError(
"Expected no NaNs, but found the following lines:\n" + "\n".join(lines)
)
print("Found no NaNs!")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("scenario", type=str, help="Scenario to run")
parser.add_argument(
"--aml_config_file",
type=str,
default="aml_config.json",
help="Path to the Azure ML config file",
)
parser.add_argument(
"--head_cluster",
type=str,
default="ray-head",
help="Name of the headnode cluster",
)
parser.add_argument(
"--body_cluster",
type=str,
default="ray-p40",
help="Name of the worker cluster",
)
parser.add_argument(
"--num_workers", type=int, default=10, help="Number of worker nodes",
)
parser.add_argument(
"--enable_cli_auth",
action="store_true",
help="Enable AzureCliAuthentication (for CI)",
)
parser.add_argument(
"--wait_for_completion",
action="store_true",
help="Wait for the completion of the run",
)
parser.add_argument(
"--no-check-params",
action="store_false",
dest="check_params",
help="Don't check if the parameters are valid locally before submission",
)
args, additional_args = parser.parse_known_args()
run_id = "{}_{}".format(datetime.now().strftime("%Y-%m-%d-%H-%M-%S"), args.scenario)
params = [
args.scenario,
"--run_id",
run_id,
"--gitlog",
git_utils.get_git_revision_short_hash(),
] + additional_args
# Check arguments are valid
if args.check_params:
check_params(args.scenario, params)
if args.enable_cli_auth:
cli_auth = AzureCliAuthentication()
else:
cli_auth = None
ws = Workspace.from_config(args.aml_config_file, auth=cli_auth)
head_compute = ws.compute_targets[args.head_cluster]
body_compute = ws.compute_targets[args.body_cluster]
env = get_environment()
# Specify the Ray worker configuration
worker_conf = WorkerConfiguration(
compute_target=body_compute,
node_count=args.num_workers,
use_gpu=True,
environment=env,
)
with combined_source_directory() as source_dir:
# This defines the head configuration - note, no GPU required, the head is expected to be only a co-ordinator.
est = ReinforcementLearningEstimator(
source_directory=source_dir,
compute_target=head_compute,
environment=env,
entry_script="ray_main.py",
script_params=dict(
[(p, "") for p in params]
+ [
# This tells ray_main.py to connect to an existing Ray/redis server rather than start its own
("--address", "auto"),
# The command-line ray process run by the AzureML RL framework seems to default to this.
# This can be seen by "pip install ray==0.8.7; ray start --head".
("--redis_token", "<PASSWORD>"),
# Ensure workers are using GPUs
("--force_gpu", ""),
]
),
rl_framework=Ray(version="0.8.3"),
worker_configuration=worker_conf,
use_gpu=False,
cluster_coordination_timeout_seconds=3600, # How long to wait for whole cluster to start
max_run_duration_seconds=40 * 3600,
# Allow the docker container Ray runs in to make full use
# of the shared memory available from the host OS.
shm_size=24 * 1024 * 1024 * 1024,
)
# This runs the estimator, but doesn't do anything to save the outputs (PNGs or events.json)
experiment = Experiment(ws, args.scenario)
run = experiment.submit(config=est)
print(run)
print("Run submitted!", run.get_portal_url())
if args.wait_for_completion:
run.wait_for_completion()
# Check no NaN in best_episodes.kso
check_best_episodes(run)
print("Run succeeded!")
print(
"Download from container azureml path ExperimentRun/dcid.{run_id}/outputs"
)
print("Note: run_id is the ID of the head step:")
pipeline_run = PipelineRun(experiment, run.id)
child_ids = [child_run.id for child_run in pipeline_run.get_steps()]
print([id for id in child_ids if "head" in id])
if __name__ == "__main__":
main()
| 1.960938 | 2 |
awacs/devicefarm.py | alanjjenkins/awacs | 358 | 12794005 | <filename>awacs/devicefarm.py
# Copyright (c) 2012-2021, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "AWS Device Farm"
prefix = "devicefarm"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateDevicePool = Action("CreateDevicePool")
CreateInstanceProfile = Action("CreateInstanceProfile")
CreateNetworkProfile = Action("CreateNetworkProfile")
CreateProject = Action("CreateProject")
CreateRemoteAccessSession = Action("CreateRemoteAccessSession")
CreateTestGridProject = Action("CreateTestGridProject")
CreateTestGridUrl = Action("CreateTestGridUrl")
CreateUpload = Action("CreateUpload")
CreateVPCEConfiguration = Action("CreateVPCEConfiguration")
DeleteDevicePool = Action("DeleteDevicePool")
DeleteInstanceProfile = Action("DeleteInstanceProfile")
DeleteNetworkProfile = Action("DeleteNetworkProfile")
DeleteProject = Action("DeleteProject")
DeleteRemoteAccessSession = Action("DeleteRemoteAccessSession")
DeleteRun = Action("DeleteRun")
DeleteTestGridProject = Action("DeleteTestGridProject")
DeleteUpload = Action("DeleteUpload")
DeleteVPCEConfiguration = Action("DeleteVPCEConfiguration")
GetAccountSettings = Action("GetAccountSettings")
GetDevice = Action("GetDevice")
GetDeviceInstance = Action("GetDeviceInstance")
GetDevicePool = Action("GetDevicePool")
GetDevicePoolCompatibility = Action("GetDevicePoolCompatibility")
GetInstanceProfile = Action("GetInstanceProfile")
GetJob = Action("GetJob")
GetNetworkProfile = Action("GetNetworkProfile")
GetOfferingStatus = Action("GetOfferingStatus")
GetProject = Action("GetProject")
GetRemoteAccessSession = Action("GetRemoteAccessSession")
GetRun = Action("GetRun")
GetSuite = Action("GetSuite")
GetTest = Action("GetTest")
GetTestGridProject = Action("GetTestGridProject")
GetTestGridSession = Action("GetTestGridSession")
GetUpload = Action("GetUpload")
GetVPCEConfiguration = Action("GetVPCEConfiguration")
InstallToRemoteAccessSession = Action("InstallToRemoteAccessSession")
ListArtifacts = Action("ListArtifacts")
ListDeviceInstances = Action("ListDeviceInstances")
ListDevicePools = Action("ListDevicePools")
ListDevices = Action("ListDevices")
ListInstanceProfiles = Action("ListInstanceProfiles")
ListJobs = Action("ListJobs")
ListNetworkProfiles = Action("ListNetworkProfiles")
ListOfferingPromotions = Action("ListOfferingPromotions")
ListOfferingTransactions = Action("ListOfferingTransactions")
ListOfferings = Action("ListOfferings")
ListProjects = Action("ListProjects")
ListRemoteAccessSessions = Action("ListRemoteAccessSessions")
ListRuns = Action("ListRuns")
ListSamples = Action("ListSamples")
ListSuites = Action("ListSuites")
ListTagsForResource = Action("ListTagsForResource")
ListTestGridProjects = Action("ListTestGridProjects")
ListTestGridSessionActions = Action("ListTestGridSessionActions")
ListTestGridSessionArtifacts = Action("ListTestGridSessionArtifacts")
ListTestGridSessions = Action("ListTestGridSessions")
ListTests = Action("ListTests")
ListUniqueProblems = Action("ListUniqueProblems")
ListUploads = Action("ListUploads")
ListVPCEConfigurations = Action("ListVPCEConfigurations")
PurchaseOffering = Action("PurchaseOffering")
RenewOffering = Action("RenewOffering")
ScheduleRun = Action("ScheduleRun")
StopJob = Action("StopJob")
StopRemoteAccessSession = Action("StopRemoteAccessSession")
StopRun = Action("StopRun")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateDeviceInstance = Action("UpdateDeviceInstance")
UpdateDevicePool = Action("UpdateDevicePool")
UpdateInstanceProfile = Action("UpdateInstanceProfile")
UpdateNetworkProfile = Action("UpdateNetworkProfile")
UpdateProject = Action("UpdateProject")
UpdateTestGridProject = Action("UpdateTestGridProject")
UpdateUpload = Action("UpdateUpload")
UpdateVPCEConfiguration = Action("UpdateVPCEConfiguration")
| 2.265625 | 2 |
tools/tube.py | fsanges/glTools | 165 | 12794006 | <filename>tools/tube.py<gh_stars>100-1000
import maya.cmds as mc
import glTools.tools.controlBuilder
import glTools.utils.attach
import glTools.utils.base
import glTools.utils.attribute
import glTools.utils.component
import glTools.utils.stringUtils
def buildProfile(radius=1,spans=8):
'''
Create tube profile curve (circle)
@param radius: Profile radius
@type radius: float
@param spans: Number of profile curve spans
@type spans: int
'''
crv = mc.circle(c=[0,0,0],nr=[0,0,1],sw=360,r=radius,s=spans,d=3,ch=False)
return crv
def buildOffsetCurve(crv):
'''
'''
prefix = glTools.utils.stringUtils.stripSuffix(crv)
offsetCrvShape = mc.createNode('nurbsCurve',n=prefix+'_offsetCrvShape')
offsetCrv = mc.listRelatives(offsetCrvShape,p=True,pa=True)[0]
mc.connectAttr(crv+'.worldSpace[0]',offsetCrvShape+'.create',f=True)
return offsetCrv
def buildSubCurveDetach(crv):
'''
'''
# Get Prefix
prefix = glTools.utils.stringUtils.stripSuffix(crv)
# Prep Curve
mc.rebuildCurve(crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=1,kep=1,kt=0,s=0,d=3)
mc.delete(crv,ch=True)
# Detach Curve
detach = mc.detachCurve(crv,p=(0.001,0.999),k=(0,1,0),rpo=False)
detachCrv = detach[1]
detachNode = detach[-1]
mc.delete(detach[0],detach[2])
# Connect Detach Min/Max
mc.addAttr(subCrv,ln='min',min=0,max=0.999,dv=0,k=True)
mc.addAttr(subCrv,ln='max',min=0.001,max=1,dv=1,k=True)
mc.addAttr(subCrv,ln='offset',min=-1,max=1,dv=1,k=True)
minAdd = mc.createNode('addDoubleLinear',n=prefix+'_minAdd_addDoubleLinear')
maxAdd = mc.createNode('addDoubleLinear',n=prefix+'_maxAdd_addDoubleLinear')
minMaxClamp = mc.createNode('clamp',n=prefix+'_minMax_clamp')
mc.connectAttr(subCrv+'.min',minAdd+'.input1',f=True)
mc.connectAttr(subCrv+'.offset',minAdd+'.input2',f=True)
mc.connectAttr(subCrv+'.max',maxAdd+'.input1',f=True)
mc.connectAttr(subCrv+'.offset',maxAdd+'.input2',f=True)
mc.connectAttr(minAdd+'.output',minMaxClamp+'.inputR',f=True)
mc.connectAttr(maxAdd+'.output',minMaxClamp+'.inputB',f=True)
mc.setAttr(minMaxClamp+'.min',0,0,0.0001)
mc.setAttr(minMaxClamp+'.max',0.9999,0,0)
mc.connectAttr(minMaxClamp+'.outputR',detachNode+'.parameter[0]',f=True)
mc.connectAttr(minMaxClamp+'.outputB',detachNode+'.parameter[1]',f=True)
# Return Result
return detachCrv
def buildCurveRig(crv):
'''
'''
# Get Prefix
prefix = glTools.utils.stringUtils.stripSuffix(crv)
# Build Joints
pts = glTools.utils.base.getPointArray(crv)
jnts = []
mc.select(cl=True)
for i in range(len(pts)):
ind = glTools.utils.stringUtils.alphaIndex(i)
jnt = mc.joint(p=pts[i],n=prefix+'_fk'+ind+'_jnt')
mc.joint()
mc.select(jnt)
# Orient Joints
# Build FK
# Build Offset
def buildSubCurve(crv):
'''
'''
# Build Sub Curve
prefix = glTools.utils.stringUtils.stripSuffix(crv)
subCrvShape = mc.createNode('nurbsCurve',n=prefix+'_subCrvShape')
subCrv = mc.listRelatives(subCrvShape,p=True,pa=True)[0]
subCrvNode = mc.createNode('subCurve',n=prefix+'_subCurve')
# Connect Sub Curve
mc.connectAttr(crv+'.worldSpace[0]',subCrvNode+'.inputCurve',f=True)
mc.connectAttr(subCrvNode+'.outputCurve',subCrvShape+'.create',f=True)
# Connect Sub Curve Min/Max
mc.addAttr(subCrv,ln='min',min=0,max=0.999,dv=0,k=True)
mc.addAttr(subCrv,ln='max',min=0.001,max=1,dv=1,k=True)
mc.connectAttr(subCrv+'.min',subCrvNode+'.minValue',f=True)
mc.connectAttr(subCrv+'.max',subCrvNode+'.maxValue',f=True)
mc.setAttr(subCrvNode+'.relative',1)
# Return Result
return subCrv
def resetCV(cvs):
'''
'''
# Check CVs
if not cvs: return None
cvList = mc.filterExpand(cvs,ex=True,sm=28)
# Reset CVs
for cv in cvList:
crv = mc.ls(cv,o=True)[0]
i = glTools.utils.component.index(cv)
mc.setAttr(crv+'.controlPoints['+i+'].xValue',0)
mc.setAttr(crv+'.controlPoints['+i+'].yValue',0)
mc.setAttr(crv+'.controlPoints['+i+'].zValue',0)
def attachCurve(base,crv,cleanup=True):
'''
'''
# Get Spans
spans = mc.getAttr(crv+'.spans')
mc.setAttr(base+'.spans',spans)
# Match Shape
shapeOrig = base+'ShapeOrig'
mc.setAttr(shapeOrig+'.intermediateObject',0)
mc.rebuildCurve(shapeOrig,ch=True,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=0,s=spans,d=3)
bs = mc.blendShape(crv,shapeOrig)[0]
mc.setAttr(bs+'.w[0]',1)
# Delete Orig
if cleanup:
mc.delete(shapeOrig,ch=True)
mc.delete(crv)
# Restore Intermediate Shape
mc.setAttr(shapeOrig+'.intermediateObject',1)
# Return Result
return
def attachToCurveParam(ctrl,crv):
'''
'''
grp = mc.listRelatives(ctrl,p=True,pa=True)[0]
param = mc.getAttr(ctrl+'.param')
glTools.utils.attach.attachToCurve(crv,grp,param,uAttr='param')
mc.connectAttr(ctrl+'.param',grp+'.param',f=True)
def addDropoffControls(locs,prefix):
'''
'''
ctrlBuilder = glTools.tools.controlBuilder.ControlBuilder()
for i in range(len(locs)):
pre = prefix+glTools.utils.stringUtils.stripSuffix(locs[i])
wire = mc.listConnections(locs[i]+'.param',s=False,d=True)[0]
param = mc.getAttr(locs[i]+'.param')
ind = glTools.utils.attribute.getConnectionIndex(locs[i]+'.param')
ctrl = ctrlBuilder.create('sphere',pre+'_ctrl')
grp = glTools.utils.base.group(ctrl)
mc.connectAttr(locs[i]+'.worldPosition[0]',grp+'.translate',f=True)
mc.addAttr(ctrl,ln='param',min=0,max=1,dv=param,k=True)
mc.addAttr(ctrl,ln='bulge',min=-1,dv=0,k=True)
mc.connectAttr(ctrl+'.param',locs[i]+'.param['+str(ind)+']',f=True)
mc.connectAttr(ctrl+'.bulge',wire+'.wireLocatorEnvelope['+str(ind)+']',f=True)
def buildTube( crv
profile=None,
addCage=False,
prefix=None)
'''
'''
# Nurbs Tube
mc.extrude(
ch = True,
rn = False,
po = 0,
et = 2,
ucp = 1,
fpt = 1,
upn = 1,
rotation =0,
scale = 1,
rsp = 1
)
# Polygon Tube
mc.extrude(
ch = True,
rn = False,
po = 1,
et = 2,
ucp = 1,
fpt = 1,
upn = 1,
rotation =0,
scale =1,
rsp = 1
)
| 2.09375 | 2 |
doba/structures/__init__.py | marverix/doba | 0 | 12794007 | from .BackupPath import BackupPath
from .Container import Container
from .DobaContainersConfig import DobaContainersConfig
from .Image import Image
from .Port import Port
from .Volume import Volume
| 1.03125 | 1 |
utilities/data_io.py | mahmoud-al-najar/DSPEB | 0 | 12794008 | <filename>utilities/data_io.py
import os
import time
import gdal
import pyproj
import tarfile
import warnings
import numpy as np
import xarray as xr
import pandas as pd
import netCDF4 as nc
import xml.etree.ElementTree as et
import datagen_config as cfg
from datetime import datetime, timedelta
from utilities.common import isin_tile
# from utilities.wrappers import Sentinel2Tile, Sentinel2Safe
def get_cloud_coverage(path):
"""
Find the cloud coverage of the S2-L1C image in the xml file
:param path:(str) path to the .SAFE repository of the image
:return: cloud_coverage (float)
"""
xml = os.path.join(path, 'MTD_MSIL1C.xml')
tree = et.parse(xml)
root = tree.getroot()
cloud_coverage = float(root[3][0].text)
return cloud_coverage
def get_top_left_corner_coordinates_for_image(path):
"""
Find the x, y coordinates of the top left corner of the S2-L1C image in the WGS84 coordinate system of the tile
and its epsg reference number
:param path:(str) path to the .SAFE repository of the image
:return: x_corner, y_corner, epsg (int, int, str)
"""
xml = os.path.join(path, 'GRANULE', os.listdir(os.path.join(path, 'GRANULE'))[0], 'MTD_TL.xml')
tree = et.parse(xml)
root = tree.getroot()
x_corner = int(root[1][0][5][0].text)
y_corner = int(root[1][0][5][1].text)
epsg = root[1][0][1].text
return x_corner, y_corner, epsg
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def get_tidal_elevation_for_image(safe):
date = safe[11:19]
time = safe[20:26]
tidal_path = cfg.in_path_tidal
df = xr.open_dataset(tidal_path).to_dataframe()
tid = df[df['S2_fname'] == safe[0:len(safe) - 5]]
if tid['prediction_at_ERA5_point'].empty:
print(f' {date} - {time}: no tidal elevation data')
return None
else:
if np.isnan(tid['prediction_at_ERA5_point'].values[0]):
delta_t = timedelta(hours=1)
timing = datetime.strptime(date + time, '%Y%m%d%H%M%S')
timing_start = timing - delta_t
timing_end = timing + delta_t
timing_start = timing_start.strftime('%Y-%m-%d %H:%M:%S')
timing_end = timing_end.strftime('%Y-%m-%d %H:%M:%S')
mask = (df['S2_time'] > timing_start) & (df['S2_time'] < timing_end)
timing_search = df.loc[mask]
b = 1 # +/- 1 degree to search for the tidal information (around the actual tile center)
a = timing_search[(timing_search['S2_lon'] < (tid['S2_lon'].values[0] + b)) &
(timing_search['S2_lon'] > (tid['S2_lon'].values[0] - b)) &
(timing_search['S2_lat'] < (tid['S2_lat'].values[0] + b)) &
(timing_search['S2_lat'] > (tid['S2_lat'].values[0] - b))]
tidal = np.nanmean(a['prediction_at_ERA5_point'].values)
return tidal
else:
tidal = tid['prediction_at_ERA5_point'].values[0]
return tidal
# def get_geo_transform_for_image(s2_path, tile_id, safe_id):
# safe_path = os.path.join(s2_path, tile_id, safe_id)
# date = safe_id[11:19]
# t_time = safe_id[20:26]
# a = os.listdir(os.path.join(safe_path, 'GRANULE'))
# path = os.path.join(safe_path, 'GRANULE', a[0], 'IMG_DATA', f'T{tile_id}_{date}T{t_time}_')
# ds = gdal.Open(path + 'B04.jp2')
# return ds.GetGeoTransform()
def parse_sentinel2_imagesafe_metadata(safe_path):
from utilities.wrappers import Sentinel2Safe # TODO: UNCOMMENT MAIN IMPORT AND COME BACK TO THIS
safe_id = safe_path.split('/')[-1]
print(f'parse_sentinel2_imagesafe_metadata() safe_id: {safe_id}')
date = safe_id[11:19]
t_time = safe_id[20:26]
tidal = get_tidal_elevation_for_image(safe_id)
temp_safe = Sentinel2Safe()
temp_safe.date = date
temp_safe.time = t_time
temp_safe.s2_path = safe_path
x, y, epsg = get_top_left_corner_coordinates_for_image(safe_path)
temp_safe.corners = (x, y)
temp_safe.epsg = epsg
if tidal:
temp_safe.tidal_elevation = tidal
else:
warnings.warn(f'No tidal elevation data for safe: {safe_id}')
temp_safe.tidal_elevation = 0
return temp_safe
def parse_sentinel2_tiles_metadata():
"""
This function returns the info of the nb_max_date tiles with the smallest cloud coverage
:return: corners, paths, dates, epsgs: infos of the selected tiles
"""
from utilities.wrappers import Sentinel2Tile # TODO: UNCOMMENT MAIN IMPORT AND COME BACK TO THIS
sentinel2_tiles = []
i = -1
for tile in cfg.tiles:
temp_tile = Sentinel2Tile()
temp_tile.id = tile
print(f'--------------TILE: {temp_tile.id}')
n = 0
i += 1 # tile index
path_t = os.path.join(cfg.in_path_s2, tile)
safes = os.listdir(path_t)
temp = []
for safe in safes:
if safe.endswith('SAFE'):
temp.append(safe)
safes = temp
for safe in safes:
path_s = os.path.join(path_t, safe)
cloud_coverage = "{0:0.2f}".format(get_cloud_coverage(path_s))
if float(cloud_coverage) < cfg.max_cc and n < cfg.nb_max_date:
temp_safe = parse_sentinel2_imagesafe_metadata(path_s)
if temp_safe:
temp_tile.safes.append(temp_safe)
print(f'safe.corners: {temp_safe.corners}')
print(f'safe.s2_path: {temp_safe.s2_path}')
print(f'safe.date: {temp_safe.date}')
print(f'safe.time: {temp_safe.time}')
print(f'safe.epsg: {temp_safe.epsg}')
print(f'safe.tidal_elevation: {temp_safe.tidal_elevation}')
if temp_safe.epsg not in temp_tile.epsgs:
temp_tile.epsgs.append(temp_safe.epsg)
if len(temp_tile.epsgs) > 1:
warnings.warn(f'==================================== Tile {temp_tile.id}: multiple epsg\'s')
exit()
if temp_tile.corner['x'] and temp_tile.corner['y']:
if temp_safe.corners[0] != temp_tile.corner['x'] or temp_safe.corners[1] != temp_tile.corner['y']:
id = temp_tile.id
warnings.warn(
f'============================================ Tile {id}: multiple corners')
exit()
else:
pass # Different snapshots of the same tile should have the exact same corners
else:
temp_tile.corner['x'] = temp_safe.corners[0]
temp_tile.corner['y'] = temp_safe.corners[1]
n += 1
sentinel2_tiles.append(temp_tile)
return sentinel2_tiles
def parse_sentinel2_tiles_metadata_from_datalake():
"""
This function returns the info of the nb_max_date tiles with the smallest cloud coverage
:return: corners, paths, dates, epsgs: infos of the selected tiles
"""
from utilities.wrappers import Sentinel2Tile # TODO: UNCOMMENT MAIN IMPORT AND COME BACK TO THIS
sentinel2_tiles = []
with open(cfg.in_path_safes_list, 'r') as file:
safe_ids = file.read().splitlines()
for safe_id in safe_ids:
year = safe_id[11:15]
month = safe_id[15:17]
day = safe_id[17:19]
tile_id = safe_id[39:44]
temp_tile = Sentinel2Tile()
temp_tile.id = tile_id
if temp_tile in sentinel2_tiles:
in_tiles = True
temp_tile = sentinel2_tiles[sentinel2_tiles.index(temp_tile)]
else:
in_tiles = False
print(f'--------------TILE: {temp_tile.id}')
path_t = os.path.join(cfg.in_path_datalake_s2, temp_tile.id)
path_s = os.path.join(path_t, year, month, day, safe_id + '.SAFE')
cloud_coverage = "{0:0.2f}".format(get_cloud_coverage(path_s))
if float(cloud_coverage) < cfg.max_cc:
temp_safe = parse_sentinel2_imagesafe_metadata(path_s)
if temp_safe:
temp_tile.safes.append(temp_safe)
print(f'safe.corners: {temp_safe.corners}')
print(f'safe.s2_path: {temp_safe.s2_path}')
print(f'safe.date: {temp_safe.date}')
print(f'safe.time: {temp_safe.time}')
print(f'safe.epsg: {temp_safe.epsg}')
print(f'safe.tidal_elevation: {temp_safe.tidal_elevation}')
if temp_safe.epsg not in temp_tile.epsgs:
temp_tile.epsgs.append(temp_safe.epsg)
if len(temp_tile.epsgs) > 1:
warnings.warn(f'==================================== Tile {temp_tile.id}: multiple epsg\'s')
exit()
if temp_tile.corner['x'] and temp_tile.corner['y']:
if temp_safe.corners[0] != temp_tile.corner['x'] or temp_safe.corners[1] != temp_tile.corner['y']:
id = temp_tile.id
warnings.warn(
f'============================================ Tile {id}: multiple corners')
exit()
else:
pass # Different snapshots of the same tile should have the exact same corners
else:
temp_tile.corner['x'] = temp_safe.corners[0]
temp_tile.corner['y'] = temp_safe.corners[1]
if not in_tiles:
sentinel2_tiles.append(temp_tile)
return sentinel2_tiles
def datagen_get_bathy_xyz(sentinel2tile_list):
"""
This function returns the useful bathy points according to 2 criteria :
distance to each others (not to much redundancy) & not to close to tile borders & depth limited (+ & -)
:param path: path of the bathymetry
:param x: x coordinates of already kept bathy points
:param y: y coordinates of already kept bathy points
:param z: z coordinates of already kept bathy points
:return: (x, y, z): coordinates of the bathy points kept, appended to the previous ones
"""
precision = 10
nb_tiles = len(cfg.tiles)
# first bathy pts filtering
x, y, z = [[]] * nb_tiles, [[]] * nb_tiles, [[]] * nb_tiles
proj = [[]] * nb_tiles
for i in range(nb_tiles):
tile = sentinel2tile_list[i]
if len(tile.epsgs) == 1:
proj[i] = pyproj.Proj(proj='utm', init=tile.epsgs[0], ellps='WGS84')
elif len(tile.epsgs) > 1:
warnings.warn(f'AGAIN =================================================== Tile {tile.id}: multiple epsg\'s')
exit()
else:
warnings.warn(
f'THIS SHOULD NEVER HAPPEN ====================================== didn\'t find tile {tile.id}\'s epsg?')
exit()
bathy_points = pd.DataFrame()
for directory in os.listdir(cfg.in_path_bathy):
path = f'{os.path.join(cfg.in_path_bathy, directory, directory)}.xyz'
df = pd.read_csv(path, header=2)
bathy_points = bathy_points.append(df, ignore_index=True)
bins = np.linspace(cfg.depth_lim_min, 100, 10)
max_depth = bathy_points['depth(m - down positive - LAT)'].max()
if max_depth > 100:
bins = np.append(bins, max_depth)
labels = np.linspace(0, len(bins) - 2, len(bins) - 1)
bathy_points['depth label'] = pd.cut(bathy_points['depth(m - down positive - LAT)'], bins=bins, labels=labels)
print(bathy_points['depth label'].value_counts(sort=False))
n_tot = 0
nb_tot = 0
for label in labels:
bathy_label_points = bathy_points[bathy_points['depth label'] == label]
if len(bathy_label_points) > 0:
n = 0
nb = 0
t = time.time()
while nb < cfg.nb_max_pt_per_tile and n < cfg.line_max_read:
n += 1
n_tot += 1
chosen_idx = np.random.choice(len(bathy_label_points))
random_point = bathy_label_points.iloc[chosen_idx]
if cfg.depth_lim_min <= random_point['depth(m - down positive - LAT)'] <= cfg.depth_lim_max:
if not len(sentinel2tile_list) == 0:
for i in range(nb_tiles):
tile = sentinel2tile_list[i]
if tile.epsgs[0] != 'EPSG:32628':
# proj to the good coordinate system & round to the tenth to fit on the sentinel 2 max precision
x_point, y_point = proj[i](random_point['long(DD)'], random_point['lat(DD)'])
else:
# bathy and s2 are already on the same coordinate system
x_point, y_point = random_point['long(DD)'], random_point['lat(DD)']
x_point = int(round(x_point, -1))
y_point = int(round(y_point, -1))
# get the indices of the points to close to the actual point
ind = np.where(np.abs(np.array(x[i], copy=False) - x_point) < precision)
ind = np.where(np.abs(np.array(y[i], copy=False)[ind] - y_point) < precision)
# keep the point only if it is not to close to others
if len(ind[0]) == 0:
if isin_tile(x_point, y_point, tile.corner['x'], tile.corner['y']):
nb += 1
nb_tot += 1
x[i] = np.append(x[i], [x_point])
y[i] = np.append(y[i], [y_point])
z[i] = np.append(z[i], random_point['depth(m - down positive - LAT)'])
if n % 1000 == 0:
print(n)
print('label :', label, nb, '/', n, time.time() - t)
else:
print(f'len(bathy_label_points) at label:{label} == 0')
print('nb of lines read/selected :', n_tot, '/', nb_tot)
return x, y, z
def __flip_bathymetry_y_axis(arr):
unique_values = np.unique(arr)
flipped = np.empty(np.array(arr).shape)
for i in range(len(arr)):
flipped[i] = np.flipud(unique_values)[np.where(unique_values == arr[i])]
return flipped
def read_nc_file(path_to_nc, projection_in=None, projection_out=None):
ncd = nc.Dataset(path_to_nc)
print(ncd)
n_x = len(ncd.variables['x'])
n_y = len(ncd.variables['y'])
n_k = len(ncd.variables['kKeep'])
n_t = len(ncd.variables['time'])
out_x = []
out_y = []
out_z = []
n_err = 0
n_good = 0
n_all = 0
n_dash = 0
for i_t in range(n_t):
for i_x in range(n_x):
for i_y in range(n_y):
ncd_time = ncd.variables['time'][i_t]
ncd_x = ncd.variables['x'][i_x]
ncd_y = ncd.variables['y'][i_y]
z = None
for i_k in range(n_k):
ncd_z = ncd['depth'][i_y, i_x, i_k, i_t]
n_all += 1
if ncd_z != '--':
if z is None:
z = ncd_z
n_good += 1
out_x.append(ncd_x)
out_y.append(ncd_y)
out_z.append(z)
else:
new_z = (z + ncd_z) / 2
z = new_z
n_err += 1
else:
n_dash += 1
if n_all % 5000 == 0:
print(f'all: {n_all}, keep: {n_good}, errs: {n_err}, dash: {n_dash}')
fn = path_to_nc.split("/")[-1]
print(f'Filename: {fn}')
print(f' Total: {n_all}, 1k: {n_good}, nk: {n_err}, --: {n_dash}')
print(f' len(x): {len(out_x)}, len(y): {len(out_y)}, len(z): {len(out_z)}')
print(f' Creating CSV file for {fn}...')
out_y = __flip_bathymetry_y_axis(out_y)
return out_x, out_y, out_z
def read_fxyz_file(path_to_xyz, projection_in=None, projection_out=None):
df = pd.read_csv(path_to_xyz, header=0)
lng = np.array(df.lng)
lat = np.array(df.lat)
z = np.array(df.z)
if projection_in and projection_out:
proj = pyproj.Proj(proj='utm', init=projection_out, ellps=projection_in)
lng, lat = proj(lng, lat)
return lng, lat, z
| 2.34375 | 2 |
photon/stats.py | logarithm/photon-python | 2 | 12794009 | <reponame>logarithm/photon-python<gh_stars>1-10
"""
Copyright 2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class TrafficStats:
def __init__(self):
self.packageHeaderSize = 0
self.reliableCommandCount = 0
self.unreliableCommandCount = 0
self.fragmentCommandCount = 0
self.controlCommandCount = 0
self.totalPacketCount = 0
self.totalCommandsInPackets = 0
self.reliableCommandBytes = 0
self.unreliableCommandBytes = 0
self.fragmentCommandBytes = 0
self.controlCommandBytes = 0
def total_command_count(self):
return self.reliableCommandCount + self.unreliableCommandCount + \
self.fragmentCommandCount + self.controlCommandCount
def total_command_bytes(self):
return self.reliableCommandBytes + self.unreliableCommandBytes + \
self.fragmentCommandBytes + self.controlCommandBytes
def total_packet_bytes(self):
return self.total_command_bytes() + self.totalPacketCount * self.packageHeaderSize
def count_control_command(self, size):
self.controlCommandBytes += size
self.controlCommandCount += 1
def count_reliable_op_command(self, size):
self.reliableCommandBytes += size
self.reliableCommandCount += 1
def count_unreliable_op_command(self, size):
self.unreliableCommandBytes += size
self.unreliableCommandCount += 1
def count_fragment_op_command(self, size):
self.fragmentCommandBytes += size
self.fragmentCommandCount += 1
def __str__(self, *args, **kwargs):
return "TotalPacketBytes: {}\nTotalCommandBytes: {}\nTotalPacketCount: {}\nTotalCommandsInPackets: {}" \
.format(self.total_packet_bytes(), self.total_command_bytes(),
self.totalPacketCount, self.totalCommandsInPackets) | 1.992188 | 2 |
11-20/11-20/_11_20.py | JuYoon-Kim/leaning_python | 0 | 12794010 | <gh_stars>0
num ={2,1,3}
for x in num:
print(x,end="",)
for j in num:
print(j,end="")
for k in num:
print(k,end="")
s = 'Never put off till tomorrow what you can do today.'
print(s.split())
print(s.isalpha())
print(s.isdigit())
print(s.upper())
print(s.lower())
print(s.replace('put','off'))
print(s.startswith('Never'))
a = input()
b = len(a)
c = ''
for i in range(b - 1, -1, -1):
c = c + a[i]
if c == a :
print("회문 입니다.")
else:
print("회문이 아닙니다.")
class Counter:
def reset(self):
self.count = 0
def incr(self):
self.count += 1
def get(self):
return self.count
a = Counter()
a.reset()
for i in range(5):
a.incr()
print(a.get()) | 3.53125 | 4 |
bosm2015/events/__init__.py | dvm-bitspilani/BITS-BOSM-2015 | 1 | 12794011 | default_app_config = 'events.apps.EventConfig' | 1.148438 | 1 |
pipda/operator.py | vishalbelsare/pipda | 21 | 12794012 | <gh_stars>10-100
"""Provide the Operator class"""
import operator
from enum import Enum
from functools import wraps
from typing import Any, Callable, Mapping, Tuple, ClassVar, Type
from .context import ContextAnnoType, ContextBase
from .function import Function
class Operator(Function):
"""Operator class, defining how the operators in verb/function arguments
should be evaluated
Args:
op: The operator
context: Should be None while initialization. It depends on the
verb or the function that uses it as an argument
args: The arguments of the operator
kwargs: The keyword arguments of the operator
datarg: Should be False. No data argument for the operator function.
Attributes:
REGISTERED: The registered Operator class. It's this class by default
Use `register_operator` as a decorator to register a operator class
"""
REGISTERED: ClassVar[Type["Operator"]] = None
def __init__(
self,
op: str,
args: Tuple,
kwargs: Mapping[str, Any],
datarg: bool = False,
) -> None:
self.op = op
self.data = None
op_func = self._get_op_func()
super().__init__(op_func, args, kwargs, datarg)
@staticmethod
def set_context(
context: ContextAnnoType,
extra_contexts: Mapping[str, ContextAnnoType] = None,
) -> Callable[[Callable], Callable]:
"""Set custom context for a operator method"""
def wrapper(func):
func.context = (
context.value if isinstance(context, Enum) else context
)
extra_contexts2 = extra_contexts or {}
func.extra_contexts = {
key: ctx.value if isinstance(ctx, Enum) else ctx
for key, ctx in extra_contexts2.items()
}
return func
return wrapper
def _pipda_eval(
self, data: Any, context: ContextBase = None
) -> Any:
"""Evaluate the operator
No data passed to the operator function. It should be used to evaluate
the arguments.
"""
# set the context and data in case they need to be used
# inside the function.
self.data = data
return super()._pipda_eval(data, context)
def _get_op_func(self) -> Callable:
"""Get the operator function from the operator module by name"""
def _opfunc(opname: str) -> Callable:
self_op_name = f"_op_{opname}"
if hasattr(self.__class__, self_op_name):
return getattr(self, self_op_name)
return getattr(operator, opname, None)
op_func = _opfunc(self.op)
if op_func:
return op_func
if self.op[0] == 'r':
# if we get radd, swap left and right operands
op_func = _opfunc(self.op[1:])
if op_func:
@wraps(op_func)
def left_op_func(arg_a, arg_b, *args, **kwargs):
return op_func(arg_b, arg_a, *args, **kwargs)
return left_op_func
raise ValueError(
f"No operator function defined for {self.op!r}"
)
| 3.203125 | 3 |
run_pptx_tagging.py | kmkurn/uxtspwsd | 0 | 12794013 | <reponame>kmkurn/uxtspwsd<filename>run_pptx_tagging.py
#!/usr/bin/env python
# Copyright (c) 2021 <NAME>
from itertools import chain
from pathlib import Path
import math
import os
import pickle
from einops import rearrange
from gensim.models.keyedvectors import KeyedVectors
from rnnr import Event, Runner
from rnnr.attachments import EpochTimer, MeanReducer, ProgressBar, SumReducer
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.utils import apply_backspaces_and_linefeeds
from text2array import BucketIterator, ShuffleIterator
from tqdm import tqdm
import torch
from aatrn import compute_ambiguous_tag_pairs_mask
from callbacks import compute_l2_loss, log_grads, log_stats, save_state_dict, update_params
from crf import LinearCRF
from ingredients.corpus import ing as corpus_ing, read_tagging_samples
from serialization import dump, load
from utils import extend_word_embedding
ex = Experiment("xduft-pptx-tagging-testrun", ingredients=[corpus_ing])
ex.captured_out_filter = apply_backspaces_and_linefeeds
# Setup mongodb observer
mongo_url = os.getenv("SACRED_MONGO_URL")
db_name = os.getenv("SACRED_DB_NAME")
if None not in (mongo_url, db_name):
ex.observers.append(MongoObserver.create(url=mongo_url, db_name=db_name))
@ex.config
def default():
# directory to save finetuning artifacts
artifacts_dir = ""
# discard train/dev/test samples with length greater than these numbers
max_length = {"train": 60}
# load source models from these directories and parameters {key: (load_from, load_params)}
load_src = {}
# whether to treat keys in load_src as lang codes
src_key_as_lang = False
# the main source to start finetuning from
main_src = ""
# device to run on [cpu, cuda]
device = "cuda" if torch.cuda.is_available() else "cpu"
# path to word embedding in word2vec format
word_emb_path = "wiki.en.vec"
# cumulative prob threshold
thresh = 0.95
# batch size
batch_size = 80
# learning rate
lr = 1e-5
# coefficient of L2 regularization against initial parameters
l2_coef = 1.0
# max number of epochs
max_epoch = 10
# whether to save the final samples as an artifact
save_samples = False
# load samples from this file (*.pkl)
load_samples_from = ""
# how to combine PPTX charts
combine = "union"
# whether to evaluate on train set at every epoch end
eval_on_train = False
# load src2ws from this path
load_src2ws_from = ""
@ex.named_config
def prag():
l2_coef = 0.1
lr = 5.9e-5
combine = "union"
@ex.named_config
def prag_gmean():
l2_coef = 4.7e-3
lr = 2.6e-4
combine = "geom_mean"
@ex.named_config
def prag_lopw():
l2_coef = 0.062
lr = 4.7e-4
combine = "geom_mean"
@ex.named_config
def testrun():
seed = 12345
max_epoch = 2
corpus = dict(portion=0.05)
@ex.capture
def run_eval(model, vocab, samples, device="cpu", batch_size=16, compute_loss=True):
runner = Runner()
SumReducer("corr", value="bcorr").attach_on(runner)
SumReducer("total", value="btotal").attach_on(runner)
ProgressBar(total=sum(len(s["words"]) for s in samples), unit="tok", leave=False).attach_on(
runner
)
if compute_loss:
MeanReducer("mean_pptx_loss", value="pptx_loss").attach_on(runner)
@runner.on(Event.BATCH)
def evaluate_batch(state):
batch = state["batch"].to_array()
words = torch.from_numpy(batch["words"]).to(device)
tags = torch.from_numpy(batch["tags"]).to(device)
mask = words != vocab["words"].index(vocab.PAD_TOKEN)
assert mask.all(), "must not have masking at test time"
model.eval()
scores = model(words)
ptags = LinearCRF(scores).argmax()
assert ptags.shape == tags.shape
if (tags[:, 0] == vocab["tags"].index("<s>")).all():
tags, ptags = tags[:, 1:], ptags[:, 1:]
if (tags[:, -1] == vocab["tags"].index("</s>")).all():
tags, ptags = tags[:, :-1], ptags[:, :-1]
state["bcorr"] = (ptags == tags).long().sum().item()
state["btotal"] = tags.numel()
state["n_items"] = words.numel()
if compute_loss:
state["scores"] = scores
state["pptx_mask"] = torch.from_numpy(batch["pptx_mask"]).to(device).bool()
@runner.on(Event.BATCH)
def maybe_compute_loss(state):
if not compute_loss:
return
masked_scores = state["scores"].masked_fill(~state["pptx_mask"], -1e9)
crf = LinearCRF(masked_scores.contiguous())
crf_z = LinearCRF(state["scores"].contiguous())
pptx_loss = -crf.log_partitions().sum() + crf_z.log_partitions().sum()
state["pptx_loss"] = pptx_loss.item()
state["size"] = masked_scores.size(0)
with torch.no_grad():
runner.run(BucketIterator(samples, lambda s: len(s["words"]), batch_size))
return runner.state["corr"] / runner.state["total"], runner.state.get("mean_pptx_loss")
@ex.automain
def finetune(
corpus,
_log,
_run,
_rnd,
max_length=None,
load_src=None,
main_src=None,
load_src2ws_from=None,
artifacts_dir=None,
load_samples_from=None,
word_emb_path="wiki.id.vec",
src_key_as_lang=False,
device="cpu",
combine="union",
thresh=0.95,
batch_size=16,
save_samples=False,
lr=1e-5,
l2_coef=1.0,
eval_on_train=False,
max_epoch=10,
):
"""Finetune/adapt a trained tagger with PPTX."""
if max_length is None:
max_length = {}
if load_src is None:
load_src = {"src": ("artifacts", "model.pth")}
main_src = "src"
elif main_src not in load_src:
raise ValueError(f"{main_src} not found in load_src")
if artifacts_dir:
artifacts_dir = Path(artifacts_dir)
if load_samples_from:
_log.info("Loading samples from %s", load_samples_from)
with open(load_samples_from, "rb") as f:
samples = pickle.load(f)
else:
samples = {
wh: list(read_tagging_samples(wh, max_length.get(wh)))
for wh in ["train", "dev", "test"]
}
for wh in samples:
n_toks = sum(len(s["words"]) - 2 for s in samples[wh]) # don't count BOS/EOS tokens
_log.info("Read %d %s samples and %d tokens", len(samples[wh]), wh, n_toks)
kv = KeyedVectors.load_word2vec_format(word_emb_path)
if load_samples_from:
_log.info("Skipping non-main src because samples are processed and loaded")
srcs = []
else:
srcs = [src for src in load_src if src != main_src]
if src_key_as_lang and corpus["lang"] in srcs:
_log.info("Removing %s from src parsers because it's the tgt", corpus["lang"])
srcs.remove(corpus["lang"])
srcs.append(main_src)
if load_src2ws_from:
_log.info("Loading src weights from %s", load_src2ws_from)
src2ws = load(Path(load_src2ws_from).read_text(encoding="utf8"))
if any(src not in src2ws for src in srcs):
_log.warning("Some srcs have no weights, will be set to zero")
if any(src not in srcs for src in src2ws):
_log.warning("Too many srcs in src2ws, weights won't sum to one")
_log.info("Sources: %s", list(srcs))
_log.info("Weights: %s", [src2ws[src] for src in srcs])
else:
src2ws = {src: 1 / len(srcs) for src in srcs}
for src_i, src in enumerate(srcs):
_log.info("Processing src %s [%d/%d]", src, src_i + 1, len(srcs))
load_from, load_params = load_src[src]
path = Path(load_from) / "vocab.yml"
_log.info("Loading %s vocabulary from %s", src, path)
vocab = load(path.read_text(encoding="utf8"))
for name in vocab:
_log.info("Found %d %s", len(vocab[name]), name)
_log.info("Extending %s vocabulary with target words", src)
vocab.extend(chain(*samples.values()), ["words"])
_log.info("Found %d words now", len(vocab["words"]))
samples_ = {wh: list(vocab.stoi(samples[wh])) for wh in samples}
path = Path(load_from) / "model.yml"
_log.info("Loading %s model from metadata %s", src, path)
model = load(path.read_text(encoding="utf8"))
path = Path(load_from) / load_params
_log.info("Loading %s model parameters from %s", src, path)
model.load_state_dict(torch.load(path, "cpu"))
_log.info("Creating %s extended word embedding layer", src)
assert model.word_emb.embedding_dim == kv.vector_size
with torch.no_grad():
model.word_emb = torch.nn.Embedding.from_pretrained(
extend_word_embedding(
model.word_emb.weight,
vocab["words"],
kv,
vocab["words"].index(vocab.UNK_TOKEN),
)
)
model.to(device)
for wh in ["train", "dev"]:
if load_samples_from:
assert all("pptx_mask" in s for s in samples[wh])
continue
for i, s in enumerate(samples_[wh]):
s["_id"] = i
runner = Runner()
runner.state["_ids"] = []
if combine == "geom_mean":
runner.state.update({"log_marginals": [], "pred_tags": []})
elif combine == "union":
runner.state["pptx_masks"] = []
else:
raise ValueError(f"unknown value for combine: {combine}")
@runner.on(Event.BATCH)
def compute_pptx_ambiguous_tag_pairs_mask(state):
batch = state["batch"].to_array()
words = torch.from_numpy(batch["words"]).to(device)
mask = words != vocab["words"].index(vocab.PAD_TOKEN)
assert mask.all(), "must not have masking at test time"
model.eval()
scores = model(words)
if combine == "geom_mean":
crf = LinearCRF(scores)
state["log_marginals"].extend((crf.marginals() + 1e-9).log())
state["pred_tags"].extend(crf.argmax())
else:
pptx_mask = compute_ambiguous_tag_pairs_mask(scores, thresh)
state["pptx_masks"].extend(pptx_mask)
state["_ids"].extend(batch["_id"].tolist())
state["n_items"] = words.numel()
n_toks = sum(len(s["words"]) for s in samples_[wh])
ProgressBar(total=n_toks, unit="tok").attach_on(runner)
if combine == "geom_mean":
_log.info(
"Computing marginals and best tags for %s set with source %s", wh, src
)
else:
_log.info(
"Computing PPTX ambiguous tag pairs mask for %s set with source %s", wh, src
)
with torch.no_grad():
runner.run(BucketIterator(samples_[wh], lambda s: len(s["words"]), batch_size))
for x in "pptx_masks log_marginals pred_tags".split():
assert x not in runner.state or len(runner.state[x]) == len(samples_[wh])
assert len(runner.state["_ids"]) == len(samples_[wh])
if "pptx_masks" in runner.state:
for i, pptx_mask in zip(runner.state["_ids"], runner.state["pptx_masks"]):
samples_[wh][i]["pptx_mask"] = pptx_mask.tolist()
else:
zips = [runner.state[x] for x in "log_marginals pred_tags".split()]
for i, lms, pts in zip(runner.state["_ids"], *zips):
samples_[wh][i]["log_marginals"] = lms
samples_[wh][i]["pred_tags"] = pts
if combine != "geom_mean":
_log.info("Computing median number of tag sequences in chart on %s set", wh)
log_ntags = []
for s in tqdm(samples_[wh], unit="sample", leave=False):
mask = torch.tensor(s["pptx_mask"]).unsqueeze(0)
cnt_scores = torch.zeros_like(mask).float().masked_fill(~mask, -1e9)
log_ntags.append(LinearCRF(cnt_scores).log_partitions().item())
log_ntags.sort()
mid = len(log_ntags) // 2
if len(log_ntags) % 2:
log_med = log_ntags[mid]
else:
max_ = max(log_ntags[mid - 1], log_ntags[mid])
log_med = (
max_
+ math.log(
math.exp(log_ntags[mid - 1] - max_)
+ math.exp(log_ntags[mid] - max_)
)
- math.log(2)
)
_log.info("Median number of tag sequences in chart: %.1e", math.exp(log_med))
assert len(samples_[wh]) == len(samples[wh])
if combine == "geom_mean":
_log.info("Combining the marginals")
for i in tqdm(range(len(samples_[wh])), unit="sample", leave=False):
lms = samples[wh][i].get("log_marginals", 0)
pts = samples[wh][i].get("pred_tags", [])
lms = torch.tensor(lms, device=device) + src2ws[src] * samples_[wh][i]["log_marginals"]
pts.append(samples_[wh][i]["pred_tags"].tolist())
samples[wh][i]["log_marginals"] = lms.tolist()
samples[wh][i]["pred_tags"] = pts
else:
_log.info("Combining the ambiguous tag pairs mask")
for i in tqdm(range(len(samples_[wh])), unit="sample", leave=False):
pptx_mask = torch.tensor(samples_[wh][i]["pptx_mask"])
assert pptx_mask.dim() == 3
if "pptx_mask" in samples[wh][i]:
old_mask = torch.tensor(samples[wh][i]["pptx_mask"])
else:
old_mask = torch.zeros(1, 1, 1).bool()
samples[wh][i]["pptx_mask"] = (old_mask | pptx_mask).tolist()
if not load_samples_from and combine == "geom_mean":
for wh in ["train", "dev"]:
_log.info("Computing the ambiguous tag pairs mask on %s set", wh)
for s in tqdm(samples[wh], unit="sample", leave=False):
lms = torch.tensor(s["log_marginals"])
assert lms.dim() == 3 and lms.size(1) == lms.size(2)
# Renormalise the marginal probabilities
lms = rearrange(lms, "slen nntags ntags -> slen (nntags ntags)")
lms = lms.log_softmax(dim=1)
lms = rearrange(
lms, "slen (nntags ntags) -> slen nntags ntags", ntags=len(vocab["tags"])
)
lms = lms.unsqueeze(0)
mask = compute_ambiguous_tag_pairs_mask(lms, thresh, is_log_marginals=True)
assert mask.shape == lms.shape
mask = mask.squeeze(0)
for pts in s["pred_tags"]:
for j in range(1, len(pts)):
mask[j - 1, pts[j], pts[j - 1]] = True
s["pptx_mask"] = mask.tolist()
for k in "log_marginals pred_tags".split():
s.pop(k)
assert src == main_src
_log.info("Main source is %s", src)
if artifacts_dir:
path = artifacts_dir / "vocab.yml"
_log.info("Saving vocabulary to %s", path)
path.write_text(dump(vocab), encoding="utf8")
path = artifacts_dir / "model.yml"
_log.info("Saving model metadata to %s", path)
path.write_text(dump(model), encoding="utf8")
if artifacts_dir and save_samples:
path = artifacts_dir / "samples.pkl"
_log.info("Saving samples to %s", path)
with open(path, "wb") as f:
pickle.dump(samples, f)
samples = {wh: list(vocab.stoi(samples[wh])) for wh in samples}
for wh in ["train", "dev"]:
_log.info("Computing median number of tag sequences in chart on %s set", wh)
log_ntags = []
for s in tqdm(samples[wh], unit="sample", leave=False):
mask = torch.tensor(s["pptx_mask"]).unsqueeze(0)
cnt_scores = torch.zeros_like(mask).float().masked_fill(~mask, -1e9)
log_ntags.append(LinearCRF(cnt_scores).log_partitions().item())
log_ntags.sort()
mid = len(log_ntags) // 2
if len(log_ntags) % 2:
log_med = log_ntags[mid]
else:
max_ = max(log_ntags[mid - 1], log_ntags[mid])
log_med = (
max_
+ math.log(
math.exp(log_ntags[mid - 1] - max_) + math.exp(log_ntags[mid] - max_)
)
- math.log(2)
)
_log.info("Median number of tag sequences in chart: %.1e", math.exp(log_med))
_log.info("Creating optimizer")
opt = torch.optim.Adam(model.parameters(), lr=lr)
finetuner = Runner()
EpochTimer().attach_on(finetuner)
ProgressBar(
stats="stats", total=sum(len(s["words"]) for s in samples["train"]), unit="tok"
).attach_on(finetuner)
origin_params = {name: p.clone().detach() for name, p in model.named_parameters()}
finetuner.on(Event.BATCH, compute_l2_loss(model, origin_params))
@finetuner.on(Event.BATCH)
def compute_loss(state):
batch = state["batch"].to_array()
words = torch.from_numpy(batch["words"]).to(device)
pptx_mask = torch.from_numpy(batch["pptx_mask"]).to(device).bool()
mask = words != vocab["words"].index(vocab.PAD_TOKEN)
model.train()
scores = model(words, mask)
bsz, slen = words.shape
assert scores.shape == (bsz, slen - 1, len(vocab["tags"]), len(vocab["tags"]))
assert pptx_mask.shape == scores.shape
masked_scores = scores.masked_fill(~pptx_mask, -1e9)
lengths = mask.long().sum(dim=1)
mask[torch.arange(bsz).to(mask.device), lengths - 1] = False # exclude last position
crf = LinearCRF(
masked_scores.contiguous(), mask[:, :-1]
) # exclude last position from mask
crf_z = LinearCRF(scores.contiguous(), mask[:, :-1]) # exclude last position from mask
pptx_loss = (-crf.log_partitions().sum() + crf_z.log_partitions().sum()) / bsz
loss = pptx_loss + l2_coef * state["l2_loss"]
state["loss"] = loss
state["stats"] = {
"pptx_loss": pptx_loss.item(),
"l2_loss": state["l2_loss"].item(),
}
state["extra_stats"] = {"loss": loss.item()}
state["n_items"] = lengths.sum().item()
finetuner.on(Event.BATCH, [update_params(opt), log_grads(_run, model), log_stats(_run)])
@finetuner.on(Event.EPOCH_FINISHED)
def maybe_eval_on_train(state):
if not eval_on_train:
return
_log.info("Evaluating on train")
acc, pptx_loss = run_eval(model, vocab, samples["train"])
_log.info("train_acc: %.1f%%", 100 * acc)
_run.log_scalar("train_acc", acc, step=state["n_iters"])
assert pptx_loss is not None
_log.info("train_pptx_loss: %.4f", pptx_loss)
_run.log_scalar("train_pptx_loss", pptx_loss, step=state["n_iters"])
@finetuner.on(Event.EPOCH_FINISHED)
def eval_on_dev(state):
_log.info("Evaluating on dev")
acc, pptx_loss = run_eval(model, vocab, samples["dev"])
_log.info("dev_acc: %.1f%%", 100 * acc)
_run.log_scalar("dev_acc", acc, step=state["n_iters"])
assert pptx_loss is not None
_log.info("dev_pptx_loss: %.4f", pptx_loss)
_run.log_scalar("dev_pptx_loss", pptx_loss, step=state["n_iters"])
state["dev_acc"] = acc
@finetuner.on(Event.EPOCH_FINISHED)
def maybe_eval_on_test(state):
if state["epoch"] != max_epoch:
return
_log.info("Evaluating on test")
acc, _ = run_eval(model, vocab, samples["test"], compute_loss=False)
_log.info("test_acc: %.1f%%", 100 * acc)
_run.log_scalar("test_acc", acc, step=state["n_iters"])
if artifacts_dir:
finetuner.on(Event.EPOCH_FINISHED, save_state_dict("model", model, under=artifacts_dir))
samples["train"].sort(key=lambda s: len(s["words"]))
trn_iter = ShuffleIterator(
BucketIterator(samples["train"], lambda s: len(s["words"]) // 10, batch_size), rng=_rnd
)
_log.info("Starting finetuning")
try:
finetuner.run(trn_iter, max_epoch)
except KeyboardInterrupt:
_log.info("Interrupt detected, training will abort")
else:
return finetuner.state.get("dev_acc")
| 1.898438 | 2 |
cfod/analysis/filterbank/sigproc.py | chime-frb-open-data/chime-frb-data | 6 | 12794014 | """Sigproc header definitions and tools. Slightly modified version of:
https://github.com/scottransom/presto/blob/master/lib/python/sigproc.py
by <NAME>.
<NAME>, <EMAIL>
"""
import struct
header_params = {
"HEADER_START": "flag",
"telescope_id": "i",
"machine_id": "i",
"data_type": "i",
"rawdatafile": "str",
"source_name": "str",
"barycentric": "i",
"pulsarcentric": "i",
"az_start": "d",
"za_start": "d",
"src_raj": "d",
"src_dej": "d",
"tstart": "d",
"tsamp": "d",
"nbits": "i",
"nsamples": "i",
"nbeams": "i",
"ibeam": "i",
"fch1": "d",
"foff": "d",
"FREQUENCY_START": "flag",
"fchannel": "d",
"FREQUENCY_END": "flag",
"nchans": "i",
"nifs": "i",
"refdm": "d",
"period": "d",
"npuls": "q",
"nbins": "i",
"HEADER_END": "flag",
}
def prep_string(string):
return struct.pack("i", len(string)) + string
def prep_double(name, value):
return prep_string(name) + struct.pack("d", float(value))
def prep_int(name, value):
return prep_string(name) + struct.pack("i", int(value))
def addto_hdr(parameter, value):
"""Prepare parameter and value for writing to binary file."""
if header_params[parameter] == "d":
return prep_double(parameter, value)
elif header_params[parameter] == "i":
return prep_int(parameter, value)
elif header_params[parameter] == "str":
return prep_string(parameter) + prep_string(value)
elif header_params[parameter] == "flag":
return prep_string(parameter)
else:
print(f"WARNING key '{parameter}' is unknown!")
| 2.421875 | 2 |
dataloader.py | sha2nkt/dvs_super_slomo | 3 | 12794015 | <reponame>sha2nkt/dvs_super_slomo
import torch
import torch.utils.data as data
import numpy as np
import cv2
from PIL import Image
import glob
import os, pdb
import random
def populateTrainList(folderPath):
folderList_pre = [x[0] for x in os.walk(folderPath)]
folderList = []
trainList = []
for folder in folderList_pre:
if folder[-3:] == '240':
folderList.append(folder + "/" + folder.split("/")[-2])
for folder in folderList:
imageList = sorted(glob.glob(folder + '/' + '*.jpg'))
for i in range(0, len(imageList), 12):
tmp = imageList[i:i+12]
if len(tmp) == 12:
trainList.append(imageList[i:i+12])
return trainList
def populateTrainList2(rgbPath, dvsPath):
folderListRgb = [x[0] for x in os.walk(rgbPath)]
folderListDvs = [x[0] for x in os.walk(dvsPath)]
trainList_rgb = []
trainList_dvs = []
for im_folder, dvs_folder in zip(folderListRgb, folderListDvs):
imageList = sorted(glob.glob(im_folder + '/' + '*.jpg'))
dvsList = sorted(glob.glob(dvs_folder + '/' + '*.png'))
minLen = min(len(imageList), len(dvsList))
imageList = imageList[:minLen]
dvsList = dvsList[:minLen]
for i in range(0, len(imageList), 12):
tmp_rgb = imageList[i:i+12]
tmp_dvs = dvsList[i:i+12]
if len(tmp_rgb) == 12:
trainList_rgb.append(imageList[i:i+12])
trainList_dvs.append(dvsList[i:i+12])
return trainList_rgb, trainList_dvs
def populateTrainList2Rgb(folderPath):
folderList = [x[0] for x in os.walk(folderPath)]
trainList = []
for folder in folderList:
imageList = sorted(glob.glob(folder + '/' + '*.jpg'))
for i in range(0, len(imageList), 12):
tmp = imageList[i:i+12]
if len(tmp) == 12:
trainList.append(imageList[i:i+12])
return trainList
def populateValList2Rgb(folderPath):
folderList = [x[0] for x in os.walk(folderPath)]
trainList = []
for folder in folderList:
imageList = sorted(glob.glob(folder + '/' + '*.jpg'))
for i in range(0, len(imageList), 9):
tmp = imageList[i:i+9]
if len(tmp) == 9:
trainList.append(imageList[i:i+9])
return trainList
# def populateDvsList2(folderPath):
# folderList = [x[0] for x in os.walk(folderPath)]
# trainList = []
# for folder in folderList:
# imageList = sorted(glob.glob(folder + '/' + '*.jpg'))
# for i in range(0, len(imageList), 12):
# tmp = imageList[i:i+12]
# if len(tmp) == 12:
# trainList.append(imageList[i:i+12])
# return trainList
def populateTestList2(folderPath):
folderList = [x[0] for x in os.walk(folderPath)]
trainList = []
for folder in folderList:
imageList = sorted(glob.glob(folder + '/' + '*.jpg'))
for i in range(0, len(imageList), 2):
tmp = imageList[i:i+2]
if len(tmp) == 2:
trainList.append(imageList[i:i+2])
return trainList
def fixedCropOnList(image_list, output_size):
cropped_img_list = []
h,w = output_size
height, width, _ = image_list[0].shape
i = 100
j = 0
st_y = 0
ed_y = w
st_x = 0
ed_x = h
or_st_y = i
or_ed_y = i + w
or_st_x = j
or_ed_x = j + h
#print(st_x, ed_x, st_y, ed_y)
#print(or_st_x, or_ed_x, or_st_y, or_ed_y)
for img in image_list:
new_img = np.empty((h,w,3), dtype=np.float32)
new_img.fill(128)
new_img[st_y: ed_y, st_x: ed_x, :] = img[or_st_y: or_ed_y, or_st_x: or_ed_x, :].copy()
cropped_img_list.append(np.ascontiguousarray(new_img))
return cropped_img_list
def randomCropOnList(image_list, output_size):
cropped_img_list = []
h,w = output_size
height, width, _ = image_list[0].shape
#print(h,w,height,width)
i = random.randint(0, height - h)
j = random.randint(0, width - w)
st_y = 0
ed_y = w
st_x = 0
ed_x = h
or_st_y = i
or_ed_y = i + w
or_st_x = j
or_ed_x = j + h
#print(st_x, ed_x, st_y, ed_y)
#print(or_st_x, or_ed_x, or_st_y, or_ed_y)
for img in image_list:
new_img = np.empty((h,w,3), dtype=np.float32)
new_img.fill(128)
new_img[st_y: ed_y, st_x: ed_x, :] = img[or_st_y: or_ed_y, or_st_x: or_ed_x, :].copy()
cropped_img_list.append(np.ascontiguousarray(new_img))
return cropped_img_list
def randomCropOnListDvs(image_list, dvs_list, output_size):
cropped_img_list = []
cropped_dvs_list = []
h,w = output_size
height, width, _ = image_list[0].shape
dvs_h, dvs_w = dvs_list[0].shape
#print(h,w,height,width)
i = random.randint(0, height - h)
j = random.randint(0, width - w)
st_y = 0
ed_y = w
st_x = 0
ed_x = h
or_st_y = i
or_ed_y = i + w
or_st_x = j
or_ed_x = j + h
#print(st_x, ed_x, st_y, ed_y)
#print(or_st_x, or_ed_x, or_st_y, or_ed_y)
for img,dvs in zip(image_list, dvs_list):
new_img = np.empty((h,w,3), dtype=np.float32)
new_dvs = np.empty((h,w), dtype=np.float32)
new_img.fill(128)
new_img[st_y: ed_y, st_x: ed_x, :] = img[or_st_y: or_ed_y, or_st_x: or_ed_x, :].copy()
new_dvs[st_y: ed_y, st_x: ed_x] = dvs[or_st_y: or_ed_y, or_st_x: or_ed_x].copy()
cropped_img_list.append(np.ascontiguousarray(new_img))
cropped_dvs_list.append(np.ascontiguousarray(new_dvs))
return cropped_img_list, cropped_dvs_list
#print(len(populateTrainList('/home/user/data/nfs/')))
class expansionLoader(data.Dataset):
def __init__(self, folderPath, mode='train'):
self.trainList = populateTrainList2Rgb(folderPath)
self.mode = mode
print("# of training samples:", len(self.trainList))
def __getitem__(self, index):
img_path_list = self.trainList[index]
start = random.randint(0,3)
h,w,c = cv2.imread(img_path_list[0]).shape
image = cv2.imread(img_path_list[0])
#print(h,w,c)
if h > w:
scaleX = int(360*(h/w))
scaleY = 360
elif h <= w:
scaleX = 360
scaleY = int(360*(w/h))
img_list = []
flip = random.randint(0,1)
if flip:
for img_path in img_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(img_path), (scaleX,scaleY))[:,:,(2,1,0)]
img_list.append(np.array(cv2.flip(tmp,1), dtype=np.float32))
else:
for img_path in img_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(img_path), (scaleX, scaleY))[:,:,(2,1,0)]
img_list.append(np.array(tmp,dtype=np.float32))
#cv2.imshow("j",tmp)
#cv2.waitKey(0) & 0xff
#brak
for i in range(len(img_list)):
#print(img_list[i].shape)
#brak
img_list[i] /= 255
img_list[i][:,:,0] -= 0.485#(img_list[i]/127.5) - 1
img_list[i][:,:,1] -= 0.456
img_list[i][:,:,2] -= 0.406
img_list[i][:,:,0] /= 0.229
img_list[i][:,:,1] /= 0.224
img_list[i][:,:,2] /= 0.225
cropped_img_list = randomCropOnList(img_list,(352,352))
for i in range(len(cropped_img_list)):
cropped_img_list[i] = torch.from_numpy(cropped_img_list[i].transpose((2, 0, 1)))
return cropped_img_list
def __len__(self):
return len(self.trainList)
class testLoader(data.Dataset):
def __init__(self, folderPath, mode='test'):
self.trainList = populateTestList2(folderPath)
self.mode = mode
print("# of training samples:", len(self.trainList))
def __getitem__(self, index):
img_path_list = self.trainList[index]
start = 0
h,w,c = cv2.imread(img_path_list[0]).shape
image = cv2.imread(img_path_list[0])
#print(h,w,c)
if h > w:
scaleX = int(360*(h/w))
scaleY = 360
elif h <= w:
scaleX = 360
scaleY = int(360*(w/h))
img_list = []
for img_path in img_path_list[start:start+2]:
tmp = cv2.resize(cv2.imread(img_path), (scaleX, scaleY))[:,:,(2,1,0)]
img_list.append(np.array(tmp,dtype=np.float32))
#cv2.imshow("j",tmp)
#cv2.waitKey(0) & 0xff
#brak
for i in range(len(img_list)):
#print(img_list[i].shape)
#brak
img_list[i] /= 255
img_list[i][:,:,0] -= 0.485#(img_list[i]/127.5) - 1
img_list[i][:,:,1] -= 0.456
img_list[i][:,:,2] -= 0.406
img_list[i][:,:,0] /= 0.229
img_list[i][:,:,1] /= 0.224
img_list[i][:,:,2] /= 0.225
cropped_img_list = fixedCropOnList(img_list, (352, 352))
for i in range(len(cropped_img_list)):
cropped_img_list[i] = torch.from_numpy(cropped_img_list[i].transpose((2, 0, 1)))
return cropped_img_list
def __len__(self):
return len(self.trainList)
class valLoader(data.Dataset):
def __init__(self, folderPath, mode='test'):
self.trainList = populateValList2Rgb(folderPath)
self.mode = mode
print("# of training samples:", len(self.trainList))
def __getitem__(self, index):
img_path_list = self.trainList[index]
start = 0
h,w,c = cv2.imread(img_path_list[0]).shape
image = cv2.imread(img_path_list[0])
#print(h,w,c)
if h > w:
scaleX = int(360*(h/w))
scaleY = 360
elif h <= w:
scaleX = 360
scaleY = int(360*(w/h))
img_list = []
for img_path in img_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(img_path), (scaleX, scaleY))[:,:,(2,1,0)]
img_list.append(np.array(tmp,dtype=np.float32))
#cv2.imshow("j",tmp)
#cv2.waitKey(0) & 0xff
#brak
for i in range(len(img_list)):
#print(img_list[i].shape)
#brak
img_list[i] /= 255
img_list[i][:,:,0] -= 0.485#(img_list[i]/127.5) - 1
img_list[i][:,:,1] -= 0.456
img_list[i][:,:,2] -= 0.406
img_list[i][:,:,0] /= 0.229
img_list[i][:,:,1] /= 0.224
img_list[i][:,:,2] /= 0.225
cropped_img_list = fixedCropOnList(img_list, (352, 352))
for i in range(len(cropped_img_list)):
cropped_img_list[i] = torch.from_numpy(cropped_img_list[i].transpose((2, 0, 1)))
return cropped_img_list
def __len__(self):
return len(self.trainList)
class dvsLoader(data.Dataset):
def __init__(self, imFolderPath, dvsFolderPath, mode='train'):
self.trainList, self.dvsList = populateTrainList2(imFolderPath, dvsFolderPath)
# self.dvsList = populateTrainList2Dvs(dvsFolderPath)
self.mode = mode
print("# of training samples:", len(self.trainList))
print("# of dvs samples:", len(self.dvsList))
def __getitem__(self, index):
img_path_list = self.trainList[index]
dvs_path_list = self.dvsList[index]
start = random.randint(0,3)
h,w,c = cv2.imread(img_path_list[0]).shape
h,w = cv2.imread(dvs_path_list[0], cv2.COLOR_BGR2GRAY).shape
image = cv2.imread(img_path_list[0])
dvs = cv2.imread(dvs_path_list[0], cv2.COLOR_BGR2GRAY)
#print(h,w,c)
if h > w:
scaleX = int(360*(h/w))
scaleY = 360
elif h <= w:
scaleX = 360
scaleY = int(360*(w/h))
img_list = []
dvs_list = []
flip = random.randint(0,1)
if flip:
for img_path in img_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(img_path), (scaleX,scaleY))[:,:,(2,1,0)]
img_list.append(np.array(cv2.flip(tmp,1), dtype=np.float32))
for dvs_path in dvs_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(dvs_path, cv2.COLOR_BGR2GRAY), (scaleX,scaleY))
thresh = 127
tmp = cv2.threshold(tmp, thresh, 1, cv2.THRESH_BINARY)[1]
dvs_list.append(np.array(cv2.flip(tmp,1), dtype=np.float32))
else:
for img_path in img_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(img_path), (scaleX, scaleY))[:,:,(2,1,0)]
img_list.append(np.array(tmp,dtype=np.float32))
for dvs_path in dvs_path_list[start:start+9]:
tmp = cv2.resize(cv2.imread(dvs_path, cv2.COLOR_BGR2GRAY), (scaleX, scaleY))
thresh = 127
tmp = cv2.threshold(tmp, thresh, 1, cv2.THRESH_BINARY)[1]
dvs_list.append(np.array(tmp,dtype=np.float32))
#cv2.imshow("j",tmp)
#cv2.waitKey(0) & 0xff
#brak
for i in range(len(img_list)):
#print(img_list[i].shape)
#brak
img_list[i] /= 255
img_list[i][:,:,0] -= 0.485#(img_list[i]/127.5) - 1
img_list[i][:,:,1] -= 0.456
img_list[i][:,:,2] -= 0.406
img_list[i][:,:,0] /= 0.229
img_list[i][:,:,1] /= 0.224
img_list[i][:,:,2] /= 0.225
cropped_img_list, cropped_dvs_list = randomCropOnListDvs(img_list, dvs_list ,(352,352))
for i in range(len(cropped_img_list)):
cropped_img_list[i] = torch.from_numpy(cropped_img_list[i].transpose((2, 0, 1)))
for i in range(len(cropped_dvs_list)):
cropped_dvs_list[i] = torch.from_numpy(cropped_dvs_list[i])
return cropped_img_list, cropped_dvs_list
def __len__(self):
return len(self.trainList) | 2.328125 | 2 |
step2_train_rewarder.py | felixba93/summary-reward-no-reference-master | 0 | 12794016 | import sys
import torch
from torch.autograd import Variable
import numpy as np
import os
from os import path
import argparse
import random
import copy
from tqdm import tqdm
import pickle
from scorer.data_helper.json_reader import read_sorted_scores, read_pair_anno_scores, read_articles, \
read_processed_scores, read_scores
from scipy.stats import spearmanr, pearsonr, kendalltau
import math
from torchvision import models
from resources import MODEL_WEIGHT_DIR
from resources import OUTPUTS_DIR
from matplotlib import pyplot as plt
import csv
def parse_split_data(sorted_scores, train_percent, dev_percent, prompt='structure'):
train = {}
dev = {}
test = {}
all = {}
topic_count = 0
for article_id, scores_list in tqdm(sorted_scores.items()):
entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
entry['sys_summ' + repr(sid)] = [s['scores'][prompt] for s in scores_list if s['summ_id'] == sid][
0] # that can be done more efficiently, but who cares...
rand = random.random()
all[article_id] = entry
if rand < train_percent:
train[article_id] = entry
elif rand < train_percent + dev_percent:
dev[article_id] = entry
else:
test[article_id] = entry
topic_count += 1
print("topics in parse_split_data", topic_count)
return train, dev, test, all
def parse_split_data_balanced(sorted_scores, train_percent, dev_percent, prompt='structure'):
train = {}
dev = {}
test = {}
all = {}
topic_count = 0
article_ids = list(sorted_scores.keys())
random.shuffle(article_ids)
num_articles = len(article_ids)
train_ids = article_ids[0:int(train_percent * num_articles)]
dev_ids = article_ids[int(train_percent * num_articles):int((train_percent + dev_percent) * num_articles)]
# test_ids=article_ids[int((train_percent+dev_percent)*num_articles):]
for article_id, scores_list in tqdm(sorted_scores.items()):
entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
entry['sys_summ' + repr(sid)] = [s['scores'][prompt] for s in scores_list if s['summ_id'] == sid][
0] # that can be done more efficiently, but who cares...
# rand = random.random()
all[article_id] = entry
if article_id in train_ids:
train[article_id] = entry
elif article_id in dev_ids:
dev[article_id] = entry
else:
test[article_id] = entry
topic_count += 1
print("topics in parse_split_data", topic_count)
return train, dev, test, all
def build_model(model_type, vec_length, learn_rate=None):
if 'linear' in model_type:
deep_model = torch.nn.Sequential(
torch.nn.Linear(vec_length, 1),
)
else:
deep_model = torch.nn.Sequential(
torch.nn.Linear(vec_length, int(vec_length / 2)),
torch.nn.ReLU(),
torch.nn.Linear(int(vec_length / 2), 1),
)
if learn_rate is not None:
optimiser = torch.optim.Adam(deep_model.parameters(), lr=learn_rate)
return deep_model, optimiser
else:
return deep_model
def deep_pair_train(vec_list, target, deep_model, optimiser, device):
# print(np.array(vec_list).shape)
input = Variable(torch.from_numpy(np.array(vec_list)).float())
# print(input)
if 'gpu' in device:
input = input.to('cuda')
value_variables = deep_model(input)
# print(value_variables)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(value_variables)
# print(pred)
# print(np.array(target).shape, np.array(target).reshape(-1, 2, 1).shape)
target_variables = Variable(torch.from_numpy(np.array(target)).float()).view(-1, 2, 1)
# print(target_variables)
if 'gpu' in device:
target_variables = target_variables.to('cuda')
loss_fn = torch.nn.BCELoss()
loss = loss_fn(pred, target_variables)
# print(loss)
optimiser.zero_grad()
loss.backward()
optimiser.step()
return loss.cpu().item()
def deep_pair_train_loss_only(vec_list, target, deep_model, optimiser, device):
# print(np.array(vec_list).shape)
input = Variable(torch.from_numpy(np.array(vec_list)).float())
# print(input)
if 'gpu' in device:
input = input.to('cuda')
value_variables = deep_model(input)
# print(value_variables)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(value_variables)
# print(pred)
# print(np.array(target).shape, np.array(target).reshape(-1, 2, 1).shape)
target_variables = Variable(torch.from_numpy(np.array(target)).float()).view(-1, 2, 1)
# print(target_variables)
if 'gpu' in device:
target_variables = target_variables.to('cuda')
loss_fn = torch.nn.BCELoss()
loss = loss_fn(pred, target_variables)
# print(loss)
return loss.cpu().item()
def build_pairs(entries):
pair_list = []
topic_count = 0
summ_count = 0
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
if i == j: continue
if entry[summ_ids[i]] > entry[summ_ids[j]]:
pref = [1, 0]
elif entry[summ_ids[i]] < entry[summ_ids[j]]:
pref = [0, 1]
else:
pref = [0.5, 0.5]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
# print(pair_list)
topic_count += 1
summ_count = summ_count + len(summ_ids)
print("topics", topic_count)
print("summ", summ_count)
return pair_list
def build_anno_pairs(entries, pair_anno_scores):
pair_list = []
topic_count = 0
summ_count = 0
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
if i == j: continue
# get keys from dictionary
entry_keys = list(entry.keys())
# get pair preference from pair_anno_scores
for pair in pair_anno_scores[article_id]:
if pair['summ_id_i'] == int(entry_keys[i][8]) and pair['summ_id_j'] == int(entry_keys[j][8]):
if pair['pref'] == 1:
pref = [1, 0]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
else:
pref = [0, 1]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
elif pair['summ_id_j'] == int(entry_keys[i][8]) and pair['summ_id_i'] == int(entry_keys[j][8]):
if pair['pref'] == 1:
pref = [0, 1]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
else:
pref = [1, 0]
pair_list.append((article_id, summ_ids[i], summ_ids[j], pref))
continue
topic_count += 1
summ_count = summ_count + len(summ_ids)
print("topics", topic_count)
print("summ", summ_count)
# print(pair_list)
return pair_list
def build_human_pair_scores(pair_list):
human_pair_scores = {}
for entry in pair_list:
article_id = str(entry[0])
sum_id_i = str(entry[1])
sum_id_j = str(entry[2])
pref = entry[3]
summ_entry = {}
if article_id in human_pair_scores:
if pref == [1, 0]:
if sum_id_i in human_pair_scores[article_id]:
human_pair_scores[article_id][sum_id_i] + 1
else:
human_pair_scores[article_id][sum_id_i] = 1
else:
if sum_id_j in human_pair_scores[article_id]:
human_pair_scores[article_id][sum_id_j] + 1
else:
human_pair_scores[article_id][sum_id_j] = 1
else:
if pref == [1, 0]:
summ_entry[sum_id_i] = 1
summ_entry[sum_id_j] = 0
else:
summ_entry[sum_id_i] = 0
summ_entry[sum_id_j] = 1
human_pair_scores[article_id] = summ_entry
return human_pair_scores
# randomize_pref_order and double_prefs are only relevant if the learning function learns f(s0,s1)=pref. in our case, we learn f(s0)=pref[0] and f(s1)=pref[1], so this should be set to False
def build_pairs_majority_preferences(entries, sorted_scores, target_type='graded', ignore_ties=False,
randomize_pref_order=False, double_prefs=False):
pair_list = []
topic_count = 0
anno_count = 0
summ_count = 0
entries_text = {}
# get summary text and matching id
for article_id, scores_list in tqdm(sorted_scores.items()):
temp_entry = {}
summ_ids = [s['summ_id'] for s in scores_list]
for sid in summ_ids:
# get summary text
s_text = [s['sys_summ'] for s in scores_list if s['summ_id'] == sid][0]
temp_entry['sys_summ' + repr(sid)] = s_text
# save in dictionary
entries_text[article_id] = temp_entry
for article_id in entries:
entry = entries[article_id]
summ_ids = list(entry.keys())
# mapping from summary text to last summary id with that text. that's the one we will use
summ2id = {entries_text[article_id][summ_id]: summ_id for summ_id in summ_ids}
# put here the prefs for this article
article_prefs = {}
# still run through all pairs
# really iterate over all pairs. there was an error here before since j started from 1, to prevent i,j=0,0. but this also lead to i,j=x,0 never be chosen the situation i=j is solved otherwise
for i in range(len(summ_ids)):
for j in range(len(summ_ids)):
# run through dictionary containing summ_ids and matching text
# for key, value in entries_text[article_id].items():
# get text for current summaries i and j
# if key == summ_ids[i]:
# text_i = value
# elif key == summ_ids[j]:
# text_j = value
text_i = entries_text[article_id][summ_ids[i]]
text_j = entries_text[article_id][summ_ids[j]]
# check if text is identical, if yes skip
if i == j or text_i == text_j:
# print("DUPLICATE FOUND: TEXT i", text_i, "TEXT j", text_i)
continue
# get the unique summ ids
unique_summ_id_pair = [summ2id[text_i], summ2id[text_j]]
# some debug output
# noinspection PyUnreachableCode
if False:
print("%s vs. %s (IDs %s vs. %s)" % (
summ_ids[i], summ_ids[j], unique_summ_id_pair[0], unique_summ_id_pair[1]))
full_entry = sorted_scores[article_id]
print(" system %s with score %s (%s) vs." % (
full_entry[i]['sys_name'], full_entry[i]['scores']['redundancy'], entry[summ_ids[i]]))
print(" system %s with score %s (%s)" % (
full_entry[j]['sys_name'], full_entry[j]['scores']['redundancy'], entry[summ_ids[j]]))
print(
" \"%s...\" vs. \"%s...\"" % (full_entry[i]['sys_summ'][:20], full_entry[j]['sys_summ'][:20]))
# unique_summ_id_pair.sort()
if entry[summ_ids[i]] > entry[summ_ids[j]]:
pref = [1, 0]
elif entry[summ_ids[i]] < entry[summ_ids[j]]:
pref = [0, 1]
else:
pref = [0.5, 0.5]
# if entry[unique_summ_id_pair[0]] > entry[unique_summ_id_pair[1]]:
# pref = [1, 0]
# elif entry[unique_summ_id_pair[0]] > entry[unique_summ_id_pair[1]]:
# pref = [0, 1]
# else:
# # todo we could completely ignore ties. doesnt change much. low prio
# pref = [0.5, 0.5]
# sort the ids so that we get a unique key, so that (sys_summ0,sys_summ1) and (sys_summ1,sys_summ0) are the same
if unique_summ_id_pair[1] < unique_summ_id_pair[0]:
unique_summ_id_pair = unique_summ_id_pair[::-1]
pref = pref[::-1]
# convert to tuple, otherwise its not hashable for the dict
unique_summ_id_pair = tuple(unique_summ_id_pair)
# add up the pref to the total pref vector of the specific summary pair. create a new entry if not existing
article_prefs[unique_summ_id_pair] = article_prefs.get(unique_summ_id_pair,
np.array([0, 0])) + np.array(pref)
# transform to target
for unique_summ_id_pair, pref in article_prefs.items():
# depending on the mode, use binary target, or graded one
pref = (pref / (pref[0] + pref[1])).tolist()
if target_type == 'binary':
if pref[0] > pref[1]:
pref = [1, 0]
elif pref[0] < pref[1]:
pref = [1, 0]
else:
pref = [0.5, 0.5]
# skip if it is a tie and you want to ignore ties
if pref[0] != 0.5 or not ignore_ties:
# include the pref two times, once in one direction and once in the other direction
if double_prefs:
pair_list.append((article_id, unique_summ_id_pair[1], unique_summ_id_pair[0], pref[::-1]))
pair_list.append((article_id, unique_summ_id_pair[0], unique_summ_id_pair[1], pref))
else:
# include the pref in the reverse order by chance. this might be necessary if there is a bias in the distribution of the score, e.g. if they are ordered
if randomize_pref_order and bool(random.getrandbits(1)):
pair_list.append((article_id, unique_summ_id_pair[1], unique_summ_id_pair[0], pref[::-1]))
else:
pair_list.append((article_id, unique_summ_id_pair[0], unique_summ_id_pair[1], pref))
topic_count += 1
anno_count += len(summ_ids)
summ_count += len(summ2id)
print("topics", topic_count)
print("annotations", anno_count)
print("summ", summ_count)
print("summ pairs", len(pair_list))
return pair_list
def build_pair_vecs(vecs, pairs):
pair_vec_list = []
for aid, sid1, sid2, _ in pairs:
article_vec = list(vecs[aid]['article'])
s1_vec = list(vecs[aid][sid1])
s2_vec = list(vecs[aid][sid2])
pair_vec_list.append([article_vec + s1_vec, article_vec + s2_vec])
return pair_vec_list
def pair_train_rewarder(vec_dic, pairs, deep_model, optimiser, loss_only, batch_size=32, device='cpu'):
loss_list = []
shuffled_pairs = pairs[:]
np.random.shuffle(shuffled_pairs)
vec_pairs = build_pair_vecs(vec_dic, shuffled_pairs)
# print('total number of pairs built: {}'.format(len(vec_pairs)))
for pointer in range(int((len(
pairs) - 1) / batch_size) + 1): # there was a bug here. when len(pairs) was a vielfaches of 32, then there was a last batch with [] causing an exception
vec_batch = vec_pairs[pointer * batch_size:(pointer + 1) * batch_size]
target_batch = shuffled_pairs[pointer * batch_size:(pointer + 1) * batch_size]
target_batch = [ee[-1] for ee in target_batch]
if loss_only:
loss = deep_pair_train_loss_only(vec_batch, target_batch, deep_model, optimiser, device)
else:
loss = deep_pair_train(vec_batch, target_batch, deep_model, optimiser, device)
loss_list.append(loss)
return np.mean(loss_list)
def test_rewarder(vec_list, human_scores, model, device, plot_file=None):
results = {'rho': [], 'rho_p': [], 'pcc': [], 'pcc_p': [], 'tau': [], 'tau_p': [], 'rho_global': [],
'pcc_global': [], 'tau_global': []}
true_scores_all = []
pred_scores_all = np.array([])
# print(human_scores)
# pred_scores_all = []
for article_id in human_scores:
entry = human_scores[article_id]
summ_ids = list(entry.keys())
if len(summ_ids) < 2: continue
concat_vecs = []
true_scores = []
for i in range(len(summ_ids)):
article_vec = list(vec_list[article_id]['article'])
summ_vec = list(vec_list[article_id][summ_ids[i]])
# print(np.array(concat_vecs).shape, np.array(article_vec).shape, np.array(summ_vec).shape)
concat_vecs.append(article_vec + summ_vec)
# print(np.array(concat_vecs).shape)
# print(entry[summ_ids[i]])
true_scores.append(entry[summ_ids[i]])
true_scores_all += true_scores # add scores for topic to list of all scores
input = Variable(torch.from_numpy(np.array(concat_vecs)).float())
if 'gpu' in device:
input = input.to('cuda')
model.eval()
with torch.no_grad():
# print(true_scores)
# print(np.array(true_scores).shape)
# print(input)
# print(input.shape)
# print(model(input).data.cpu().numpy())
# print(model(input).data.cpu().numpy().shape)
pred_scores = model(input).data.cpu().numpy().reshape(1, -1)[0]
pred_scores_all = np.concatenate((pred_scores_all, pred_scores), axis=0)
# pred_scores_all += pred_scores.tolist()
rho, rho_p = spearmanr(true_scores, pred_scores)
pcc, pcc_p = pearsonr(true_scores, pred_scores)
tau, tau_p = kendalltau(true_scores, pred_scores)
if not (math.isnan(rho) or math.isnan(pcc) or math.isnan(tau)):
results['rho'].append(rho)
results['rho_p'].append(rho_p)
results['pcc'].append(pcc)
results['pcc_p'].append(pcc_p)
results['tau'].append(tau)
results['tau_p'].append(tau_p)
rho = spearmanr(true_scores_all, pred_scores_all)[0]
pcc = pearsonr(true_scores_all, pred_scores_all)[0]
tau = kendalltau(true_scores_all, pred_scores_all)[0]
if not (math.isnan(rho) or math.isnan(pcc) or math.isnan(tau)):
results['rho_global'].append(rho)
results['pcc_global'].append(pcc)
results['tau_global'].append(tau)
if plot_file is not None:
fig, ax = plt.subplots()
# true_scores_all=np.array(true_scores_all)
# pred_scores_all=np.array(pred_scores_all)
unique = np.sort(np.unique(true_scores_all))
data_to_plot = [pred_scores_all[true_score == true_scores_all] for true_score in unique]
# bw_methods determines how soft the distribution curve will be. lower values are more sharp
ax.violinplot(data_to_plot, showmeans=True, showmedians=True, bw_method=0.2)
ax.scatter(true_scores_all + np.random.normal(0, 0.1, pred_scores_all.shape[0]), pred_scores_all, marker=".",
s=3, alpha=0.5)
ax.set_title('Comparison and distributions of true values to predicted score')
ax.set_xlabel('true scores')
ax.set_ylabel('predicted scores')
xticklabels = true_scores_all
ax.set_xticks(true_scores_all)
print("violin plot written to: %s" % plot_file)
plt.savefig(plot_file)
return results
def parse_args(argv):
ap = argparse.ArgumentParser("arguments for summary sampler")
ap.add_argument('-e', '--epoch_num', type=int, default=50)
ap.add_argument('-b', '--batch_size', type=int, default=32)
ap.add_argument('-tt', '--train_type', type=str, help='pairwise or regression', default='pairwise')
ap.add_argument('-tp', '--train_percent', type=float, help='how many data used for training', default=.64)
ap.add_argument('-dp', '--dev_percent', type=float, help='how many data used for dev', default=.16)
ap.add_argument('-lr', '--learn_rate', type=float, help='learning rate', default=3e-4)
ap.add_argument('-mt', '--model_type', type=str, help='deep/linear', default='linear')
ap.add_argument('-dv', '--device', type=str, help='cpu/gpu', default='gpu')
ap.add_argument('-se', '--seed', type=int, help='random seed number', default='1')
ap.add_argument('-fn', '--file_name', type=str, help='file name for csv output',
default='BetterRewardsStatistics_test.csv')
args = ap.parse_args(argv)
return args.epoch_num, args.batch_size, args.train_type, args.train_percent, args.dev_percent, args.learn_rate, args.model_type, args.device, args.seed, args.file_name
def main(argv):
epoch_num, batch_size, train_type, train_percent, dev_percent, learn_rate, model_type, device, seed, file_name = parse_args(
argv[1:])
print('\n=====Arguments====')
print('epoch num {}'.format(epoch_num))
print('batch size {}'.format(batch_size))
print('train type {}'.format(train_type))
print('train percent {}'.format(train_percent))
print('dev percent {}'.format(dev_percent))
print('learn rate {}'.format(learn_rate))
print('model type {}'.format(model_type))
print('device {}'.format(device))
print('seed {}'.format(seed))
print('file name {}'.format(file_name))
print('=====Arguments====\n')
csv_column_names = ['seed', 'learn_rate', 'model_type', 'train_pairs', 'dev_pairs', 'test_pairs', 'epoch_num',
'loss_train', 'loss_dev', 'loss_test', 'rho_train', 'rho_p_train', 'pcc_train', 'pcc_p_train',
'tau_train', 'tau_p_train', 'rho_train_global', 'pcc_train_global', 'tau_train_global',
'rho_dev', 'rho_p_dev', 'pcc_dev', 'pcc_p_dev', 'tau_dev', 'tau_p_dev',
'rho_dev_global', 'pcc_dev_global', 'tau_dev_global', 'rho_test', 'rho_p_test', 'pcc_test',
'pcc_p_test', 'tau_test', 'tau_p_test', 'rho_test_global', 'pcc_test_global', 'tau_test_global']
# check if csv_file exists
if path.exists(file_name):
csv_exists = True
else:
csv_exists = False
with open(file_name, 'a', newline='') as csv_file:
writer = csv.writer(csv_file)
# if a new csv_file is generated, write column names
if csv_exists is False:
writer.writerow(csv_column_names)
np.random.seed(seed=seed)
random.seed(seed)
torch.random.manual_seed(seed)
torch.manual_seed(seed)
if train_percent + dev_percent >= 1.:
print('ERROR! Train data percentage plus dev data percentage is {}! Make sure the sum is below 1.0!'.format(
train_percent + dev_percent))
exit(1)
BERT_VEC_LENGTH = 1024 # change this to 768 if you use bert-base
deep_model, optimiser = build_model(model_type, BERT_VEC_LENGTH * 2, learn_rate)
if 'gpu' in device:
deep_model.to('cuda')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# read human scores and vectors for summaries/docs, and split the train/dev/test set
sorted_scores = read_sorted_scores()
# read pair anno scores
pair_anno_scores = read_pair_anno_scores()
# train, dev, test, all = parse_split_data(sorted_scores, train_percent, dev_percent)
train, dev, test, all = parse_split_data_balanced(sorted_scores, train_percent, dev_percent)
# without majority preferences
# train_pairs = build_pairs(train)
# dev_pairs = build_pairs(dev)
# test_pairs = build_pairs(test)
# without majority preferences but with pair anno
train_pairs = build_anno_pairs(train, pair_anno_scores)
dev_pairs = build_anno_pairs(dev, pair_anno_scores)
test_pairs = build_anno_pairs(test, pair_anno_scores)
# with majority preferences
# train_pairs = build_pairs_majority_preferences(train, sorted_scores)
# dev_pairs = build_pairs_majority_preferences(dev, sorted_scores)
# test_pairs = build_pairs_majority_preferences(test, sorted_scores)
# with majority preferences and pair anno
# train_pairs = build_anno_pairs_majority_preferences(train, sorted_scores, pair_anno_scores)
# dev_pairs = build_anno_pairs_majority_preferences(dev, sorted_scores, pair_anno_scores)
# test_pairs = build_anno_pairs_majority_preferences(test, sorted_scores, pair_anno_scores)
# build human pair scores for pairs
train_anno = build_human_pair_scores(train_pairs)
dev_anno = build_human_pair_scores(dev_pairs)
test_anno = build_human_pair_scores(test_pairs)
print(len(train_pairs), len(dev_pairs), len(test_pairs))
# read bert vectors
with open('data/doc_summ_bert_vectors.pkl', 'rb') as ff:
all_vec_dic = pickle.load(ff)
pcc_list = []
weights_list = []
for ii in range(epoch_num + 1):
print('\n=====EPOCH {}====='.format(ii))
if ii == 0:
# do not train in epoch 0, just evaluate the performance of the randomly initialized model (sanity check and baseline)
loss_train = pair_train_rewarder(all_vec_dic, train_pairs, deep_model, optimiser, True, batch_size,
device)
else:
# from epoch 1 on, receive the data and learn from it. the loss is still the loss before fed with the training examples
loss_train = pair_train_rewarder(all_vec_dic, train_pairs, deep_model, optimiser, False, batch_size,
device)
loss_dev = pair_train_rewarder(all_vec_dic, dev_pairs, deep_model, optimiser, True, batch_size, device)
loss_test = pair_train_rewarder(all_vec_dic, test_pairs, deep_model, optimiser, True, batch_size, device)
csv_row = [seed, learn_rate, model_type, len(train_pairs), len(dev_pairs), len(test_pairs), ii, loss_train,
loss_dev, loss_test]
print('--> losses (train,dev,test)', loss_train, loss_dev, loss_test)
# Train-Data only
print("==Train==")
# results_train = test_rewarder(all_vec_dic, train, deep_model, device)
results_train = test_rewarder(all_vec_dic, train_anno, deep_model, device)
for metric in results_train:
print('{}\t{}'.format(metric, np.mean(results_train[metric])))
csv_row.append(np.mean(results_train[metric]))
print("==Dev==")
# results = test_rewarder(all_vec_dic, dev, deep_model, device)
results = test_rewarder(all_vec_dic, dev_anno, deep_model, device)
for metric in results:
print('{}\t{}'.format(metric, np.mean(results[metric])))
csv_row.append(np.mean(results[metric]))
# Test-Data only
print("==Test==")
# results_test = test_rewarder(all_vec_dic, test, deep_model, device)
results_test = test_rewarder(all_vec_dic, test_anno, deep_model, device)
for metric in results_test:
print('{}\t{}'.format(metric, np.mean(results_test[metric])))
csv_row.append(np.mean(results_test[metric]))
writer.writerow(csv_row)
pcc_list.append(np.mean(results['pcc']))
weights_list.append(copy.deepcopy(deep_model.state_dict()))
idx = np.argmax(pcc_list)
best_result = pcc_list[idx]
print('\n======Best results come from epoch no. {}====='.format(idx))
deep_model.load_state_dict(weights_list[idx])
output_pattern = 'batch{}_{}_trainPercent{}_seed{}_lrate{}_{}_epoch{}'.format(
batch_size, train_type, train_percent, seed, learn_rate, model_type, epoch_num
)
test_results = test_rewarder(all_vec_dic, test, deep_model, device,
os.path.join(OUTPUTS_DIR, output_pattern + '_onTest.pdf'))
test_rewarder(all_vec_dic, train, deep_model, device,
os.path.join(OUTPUTS_DIR, output_pattern + '_onTrain.pdf'))
test_rewarder(all_vec_dic, dev, deep_model, device, os.path.join(OUTPUTS_DIR, output_pattern + '_onDev.pdf'))
print('Its performance on the test set is:')
for metric in test_results:
print('{}\t{}'.format(metric, np.mean(test_results[metric])))
model_weight_name = 'pcc{0:.4f}_'.format(np.mean(test_results['pcc']))
model_weight_name += 'seed{}_epoch{}_batch{}_{}_trainPercent{}_lrate{}_{}.model'.format(
seed, epoch_num, batch_size, train_type, train_percent, learn_rate, model_type
)
torch.save(weights_list[idx], os.path.join(MODEL_WEIGHT_DIR, model_weight_name))
print('\nbest model weight saved to: {}'.format(os.path.join(MODEL_WEIGHT_DIR, model_weight_name)))
if __name__ == '__main__':
main(sys.argv)
| 2.234375 | 2 |
config.py | koivunen/connvis | 0 | 12794017 | import argparse
parser = argparse.ArgumentParser(prog="connvis",description='Web-based conntrack that tries to simplify the data for privacy research')
parser.add_argument('--nodnsseed', help='do not seed domains from dnsmasq history',action='store_true')
parser.add_argument('--shell', help='Enable interactive shell',action='store_true')
args = parser.parse_args()
import ipaddress
homenetwork = ipaddress.ip_network('192.168.0.0/24')
homenetwork_router = ipaddress.ip_address('192.168.0.1')
aggregate_google=True # That is a lot of domains
ignored_domains=["osoite.local"] | 2.125 | 2 |
program_synthesis/karel/scripts/eval_refinement.py | kavigupta/program_synthesis | 123 | 12794018 | import collections
import cPickle as pickle
import glob
import itertools
import json
import operator
import os
import re
import sys
from program_synthesis.karel.dataset import dataset
from program_synthesis.karel.dataset import executor
from program_synthesis.karel.dataset.karel_runtime import KarelRuntime
from program_synthesis.karel.models import karel_model
from program_synthesis.common.tools.saver import restore_args
BASE_DIR = ""
with open(BASE_DIR + "text2code-models/karel-sgd-cl1-lr1-lds100k-ldr0.5/report-dev-00100100.jsonl") as f:
baseline_report = []
print(f.readline())
for line in f:
baseline_report.append(json.loads(line))
class Args(object):
model_dir = BASE_DIR + 'program_synthesis-models/karel-lgrl-ref-m123-sgd-cl1-lr0.1-lds100k-ldr0.5'
step = 250100
args = Args()
restore_args(args)
args.word_vocab = ',,/data/karel/word.vocab'
m = karel_model.KarelLGRLRefineModel(args)
batch_processor = m.batch_processor(for_eval=True)
m.args.max_beam_trees = 64
m.args.max_eval_trials = 64
i = 0
result = []
while i < len(baseline_report):
batch = []
while len(batch) < 32 and i < len(baseline_report):
if baseline_report[i]['code']['info']['trees_checked'] == 1:
i += 1
continue
e = dataset.KarelExample.from_dict(baseline_report[i]['example'])
ref_code_sequence = baseline_report[i]['code']['info']['candidates'][0]
e.ref_example = dataset.KarelExample(idx=None, guid=None, code_sequence=ref_code_sequence, input_tests=e.input_tests, tests=e.tests)
batch.append(e)
i += 1
print("Starting batch (%d)..." % i)
res = m.inference(batch_processor(batch))
for example, infer in zip(batch, res):
result.append((example, infer))
# if i > 100:
# break
print(len(result), len(baseline_report))
the_executor = executor.KarelExecutor()
stats = {'total': len(result), 'fixed': 0}
refinement_results = []
for example, infer in result:
if not infer.code_sequence:
continue
correct = True
for test in example.input_tests + example.tests:
try:
log = the_executor.execute(infer.code_sequence, None, test['input'])
if log.result != test['output']:
correct = False
break
except (executor.ExecutorRuntimeException, executor.ExecutorSyntaxException) as e:
correct = False
break
refinement_results.append(correct)
if correct:
stats['fixed'] += 1
print(float(stats['fixed']) / stats['total'], stats['fixed'], stats['total'])
| 1.953125 | 2 |
tests/unit_tests/test_CythonInterfaceMaps.py | HTenkanen/cykhash | 0 | 12794019 | import pyximport;
pyximport.install(setup_args = {"script_args" : ["--force"]},
language_level=3)
import unittest
import uttemplate
import cymapinterfacetester as cyt
from cykhash import Int64to64Map, Int32to32Map, Float64to64Map, Float32to32Map, PyObjectMap
AS_LIST = {'int64' : cyt.as_py_list_int64,
'int32' : cyt.as_py_list_int32,
'float64' : cyt.as_py_list_int64_float64,
'float32' : cyt.as_py_list_int32_float32,
'object' : cyt.as_py_list_pyobject,
}
USE_INT = {'int64' : cyt.use_int64,
'int32' : cyt.use_int32,
'float64' : cyt.use_int64_float64,
'float32' : cyt.use_int32_float32,
'object' : cyt.use_pyobject,
}
USE_FLOAT = {'int64' : cyt.use_float64,
'int32' : cyt.use_float32,
'float64' : cyt.use_float64_float64,
'float32' : cyt.use_float32_float32,
'object' : cyt.use_pyobject,
}
MAP = {'int64' : Int64to64Map,
'int32' : Int32to32Map,
'float64' : Float64to64Map,
'float32' : Float32to32Map,
'object' : PyObjectMap,
}
#just making sure the interface can be accessed:
@uttemplate.from_templates(['int64',
'int32',
'float64',
'float32',
])
class CyMypInterfaceTester(unittest.TestCase):
def template_cimport_use_int(self, map_type):
received=USE_INT[map_type]([1,2,3,4], [5,6,7,8], [2,3])
expected=[6,7]
self.assertEqual(received, expected)
def template_cimport_use_float(self, map_type):
received=USE_FLOAT[map_type]([1,2,3,4], [5.5,6.5,7.5,8.5], [2,3])
expected=[6.5,7.5]
self.assertEqual(received, expected)
def template_as_py_list(self, map_type):
cy_map = MAP[map_type]()
cy_map[3] = 20
lst = AS_LIST[map_type](cy_map)
self.assertEqual(lst, [3,20])
def template_as_py_list_2(self, map_type):
cy_map = MAP[map_type]()
cy_map[3] = 5
cy_map[4] = 6
lst = AS_LIST[map_type](cy_map)
self.assertEqual(set(lst), set([3,4,5,6]))
| 1.929688 | 2 |
application/data_entry_review.py | PaulineLc/OpenDoorData | 0 | 12794020 | <reponame>PaulineLc/OpenDoorData<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 27 17:41:41 2016
@author: <NAME>
"""
import models
import data_entry_functions
import linear_model
def main():
# insert tables into database
tables = ['room', 'User', 'module', 'wifi_log', 'timetable', 'survey', 'regressionModel']
data_entry_functions.createTables(models, tables)
# insert room data into room table in db
data_entry_functions.roomCap(models, 'room', 'room_num', 'room_cap', 'building', 2, 90, 'school of computer science')
data_entry_functions.roomCap(models, 'room', 'room_num', 'room_cap', 'building', 3, 90, 'school of computer science')
data_entry_functions.roomCap(models, 'room', 'room_num', 'room_cap', 'building', 4, 220, 'school of computer science')
# create admin user
data_entry_functions.createAdmin(models, 'User', 'username', 'password', 'email', 'first_name', 'last_name', 'admin', 'password', "<PASSWORD> <EMAIL>", 'Don', 'Blaine')
# set user password
data_entry_functions.setPassword(models, 'User', 'username', 'admin', 'password')
# insert data into database from csv file
file = "cleaned_data/timetable.csv"
data_entry_functions.insertModCode(file, models, 'module', 'module_code', 'instructor', 'user', 'username')
# setting weight to be linear model coef - create function ?
models.regressionModel.create(weight = linear_model.get_linear_coef())
# insert timetable data
data_entry_functions.insertTimetableData(file, models, 'timetable', 'room_id', 'building', 'mod_code', 'event_time', 'reg_stu', 'time')
# insert wifi log data
file = r"cleaned_data/full.csv"
data_entry_functions.insertWifiData(file, models, 'wifi_log', 'room_id', 'event_time', 'assoc_devices', 'auth_devices', 'time')
# insert survey data
file = r"cleaned_data/survey_data.csv"
data_entry_functions.insertSurveyData(file, models, 'survey', 'room_id', 'building', 'event_time', 'occupancy', 'reporter', 'time')
models.db.close()
print ("The database should now be available")
if __name__ == '__main__':
main() | 2.25 | 2 |
examples/roberta/test_load_allcorpus.py | linyongnan/fairseq | 0 | 12794021 | <filename>examples/roberta/test_load_allcorpus.py
from fairseq.data.infinibatch.infinibatch_dataset import extract_number_values, word_to_idx
from fairseq.data import Dictionary
from multiprocessing import cpu_count, Pool
import os
import re
import sys
import json
import glob
import math
import torch
from nltk import word_tokenize
import sentencepiece as spm
alias = ""
spm_tok_files_path = f"/home/{alias}/data/linyong/cu/res/allcorpus160g_num-norm_offline"
dictionary_path = f"/home/{alias}/data/linyong/cu/res/vocab/allcorpus160g_num-norm_64k/dict.txt"
spm_path = f"/home/{alias}/data/linyong/cu/res/vocab/allcorpus160g_num-norm_64k/sp.num-norm.bpe.model"
dictionary = Dictionary.load(dictionary_path)
mask_idx = dictionary.add_symbol('<mask>')
print('| Dictionary: {} types'.format(len(dictionary)))
tokenizer = spm.SentencePieceProcessor(model_file=spm_path)
def test_load_file(file_path):
file = open(file_path, 'r', encoding='utf8')
try:
lines = file.read().strip().split('\n')
except:
print(f"File: {file_path} failed to split into lines.")
file.close()
tokenized_tokens = [line.strip().split(' ') for line in lines]
tokenized_idx = [word_to_idx(dictionary, sent) for sent in tokenized_tokens]
orig_tokens = [torch.LongTensor(item) for item in tokenized_idx]
src_number_token_values = extract_number_values(orig_tokens, dictionary, tokenizer, send_log_value=False)
for sent_i, bon_pos, value in src_number_token_values:
if not math.isfinite(value):
print(f"[transformer_numlm] value {value} is not finite!")
tasks = [file_path for file_path in glob.glob(spm_tok_files_path+'/*.txt.*')]
pool = Pool(cpu_count() - 1)
pool.map(test_load_file, tasks) | 2.203125 | 2 |
Splice/markup_gtfs.py | agladstein/RNA-Seq-in-the-Cloud | 0 | 12794022 | <reponame>agladstein/RNA-Seq-in-the-Cloud
#!/usr/bin/env python3
import csv
import sys
import os
import re
infile = sys.argv[1]
outfile = sys.argv[2]
with open(infile, 'rt') as fi, open(outfile, 'wt') as fo:
tblin = csv.reader(fi, delimiter = '\t')
tblout = csv.writer(fo,
delimiter = '\t',
lineterminator = os.linesep,
quotechar = '\xb6' )
for line in tblin:
if not line[0].startswith('#'):
if 'reference_id' not in line[8]:
tx_id = re.search('transcript_id "STRG.(\d+).\d+"', line[8]).group(1)
if int(tx_id) % 3 == 0:
line[8] = line[8] + ' novel "yes";'
tblout.writerow(line)
| 2.359375 | 2 |
tests/node/test_zmq.py | eric-volz/defichainLibrary | 1 | 12794023 | import pytest
from tests.util import createNode
node = createNode()
@pytest.mark.query
def test_getzmqnotifications(): # 01
zmq = node.zmq.getzmqnotifications()
assert zmq or zmq == []
| 2.109375 | 2 |
digits.py | yogya-ch/pyprograms | 0 | 12794024 | # Prints the digits in a given number
n=int(input("give a number "))
l=[]
while n>0:
a=n%10
l.append(a)
n=n//10
l.reverse()
for i in l:
print(i) | 3.75 | 4 |
src/004_join/04/answer.py | negi524/awesomebook | 0 | 12794025 | from pandas._config.config import reset_option
from preprocess.load_data.data_loader import load_hotel_reserve
import pandas as pd
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
def main():
"""全結合処理
顧客ごとに2017年1月〜2017年3月の月間合計利用金額を計算
利用がない日は0とする
日付はチェックイン日付を利用する
"""
customer_tb, hotel_tb, reserve_tb = load_hotel_reserve()
print(reserve_tb)
print(customer_tb)
# 日付マスタを生成
month_mst = pd.DataFrame({'year_month': [(date(2017, 1, 1) + relativedelta(months=x)).strftime("%Y%m") for x in range(0, 3)]})
# month_mst['year_month'] = pd.to_datetime(month_mst['year_month'])
# cross_joinのためにすべて同じ値の結合キーを準備
customer_tb['join_key'] = 0
month_mst['join_key'] = 0
# 顧客テーブルと月テーブルを全結合する
customer_mst = pd.merge(customer_tb[['customer_id', 'join_key']], month_mst, on='join_key')
customer_mst.info()
# 年月の結合キーを予約テーブルで準備
reserve_tb['year_month'] = reserve_tb['checkin_date'] \
.apply(lambda x: pd.to_datetime(x).strftime("%Y%m"))
reserve_tb.info()
# 予約レコードと結合し、合計予約金額を計算
# TODO: yearで結合できていない(月単位で結合されてしまっている)
summary_result = pd.merge(
customer_mst,
reserve_tb[['customer_id', 'year_month', 'total_price']],
on=['customer_id', 'year_month'],
how='left'
).groupby(['customer_id', 'year_month'])['total_price'].sum().reset_index()
# 予約レコードがなかった場合の合計金額を値なしから0に変換
summary_result.fillna(0, inplace=True)
print(summary_result.query('customer_id == "c_1"'))
print(reserve_tb.query('customer_id == "c_1"'))
if __name__ == '__main__':
main()
| 3.078125 | 3 |
app/module_load/__init__.py | B02902008/TaipeiWater | 0 | 12794026 | <gh_stars>0
from flask import Blueprint
blue_load = Blueprint('load', __name__)
from . import view | 1.164063 | 1 |
bpmn/utils/string_utils.py | marcelobbfonseca/SFDjango-BPMN | 1 | 12794027 | <reponame>marcelobbfonseca/SFDjango-BPMN<gh_stars>1-10
from re import sub
def snake_case(s):
return '_'.join(
sub('([A-Z][a-z]+)', r' \1',
sub('([A-Z]+)', r' \1',
s.replace('-', ' '))).split()).lower()
| 2.734375 | 3 |
adaboost.py | haidawyl/Mnist | 12 | 12794028 | <filename>adaboost.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.decomposition import PCA
from time import time
import numpy as np
import pandas as pd
import mnist
import roc
if __name__ == "__main__":
# 读取Mnist数据集, 测试AdaBoost的分类模型
mnistSet = mnist.loadLecunMnistSet()
train_X, train_Y, test_X, test_Y = mnistSet[0], mnistSet[1], mnistSet[2], mnistSet[3]
m, n = np.shape(train_X)
idx = range(m)
np.random.shuffle(idx)
# 使用PCA降维
# num = 30000
# pca = PCA(n_components=0.9, whiten=True, random_state=0)
# for i in range(int(np.ceil(1.0 * m / num))):
# minEnd = min((i + 1) * num, m)
# sub_idx = idx[i * num:minEnd]
# train_pca_X = pca.fit_transform(train_X[sub_idx])
# print np.shape(train_pca_X)
print "\n**********测试AdaBoostClassifier类**********"
t = time()
# model = GridSearchCV(
# estimator=AdaBoostClassifier(
# base_estimator=DecisionTreeClassifier(splitter='random', max_features=90, max_depth=50, min_samples_split=6,
# min_samples_leaf=3), learning_rate=0.1),
# param_grid={"n_estimators": range(500, 1501, 100)}, cv=3)
# # 拟合训练数据集
# model.fit(train_X, train_Y)
# print "最好的参数是:%s, 此时的得分是:%0.2f" % (model.best_params_, model.best_score_)
model = AdaBoostClassifier(
base_estimator=DecisionTreeClassifier(splitter='random', max_features=90, max_depth=50, min_samples_split=6,
min_samples_leaf=3), n_estimators=1200, learning_rate=0.005)
# 拟合训练数据集
model.fit(train_X, train_Y)
# 预测训练集
train_Y_hat = model.predict(train_X[idx])
print "训练集精确度: ", accuracy_score(train_Y[idx], train_Y_hat)
# 预测测试集
test_Y_hat = model.predict(test_X)
print "测试集精确度: ", accuracy_score(test_Y, test_Y_hat)
print "总耗时:", time() - t, "秒"
# 绘制ROC曲线
n_class = len(np.unique(train_Y))
roc.drawROC(n_class, test_Y, test_Y_hat)
# 读取CCPP数据集, 测试AdaBoost的回归模型
data = pd.read_excel("data/CCPP/Folds5x2_pp.xlsx")
# AT:温度, V:压力, AP:湿度, RH:压强, PE:输出电力
# 样本特征X
X = data[['AT', 'V', 'AP', 'RH']]
# 数据归一化
X = StandardScaler().fit_transform(X)
# 样本输出Y
Y = data[['PE']]
# 划分训练集和测试集,将数据集的70%划入训练集,30%划入测试集
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.3, random_state=1)
m, n = np.shape(train_X)
idx = range(m)
np.random.shuffle(idx)
print "\n**********测试AdaBoostRegressor类**********"
t = time()
# model = GridSearchCV(DecisionTreeRegressor(splitter='random'),
# param_grid={'max_depth': range(10, 30, 2), 'min_samples_split': range(11, 31, 2),
# 'min_samples_leaf': range(2, 8, 1)}, cv=5)
# model = GridSearchCV(AdaBoostRegressor(
# base_estimator=DecisionTreeRegressor(splitter='random', max_depth=28, min_samples_split=11, min_samples_leaf=3),
# learning_rate=0.1), param_grid={"n_estimators": range(100, 1001, 100)}, cv=3)
# # 拟合训练数据集
# model.fit(train_X, train_Y.values.ravel())
# print("最好的参数是:%s, 此时的得分是:%0.2f" % (model.best_params_, model.best_score_))
model = AdaBoostRegressor(
base_estimator=DecisionTreeRegressor(splitter='random', max_depth=20, min_samples_split=5, min_samples_leaf=3),
n_estimators=800, learning_rate=0.1)
# 拟合训练数据集
model.fit(train_X, train_Y.values.ravel())
# 预测测试集
test_Y_pred = model.predict(test_X)
print "测试集MSE:", mean_squared_error(test_Y, test_Y_pred)
print "测试集RMSE:", np.sqrt(mean_squared_error(test_Y, test_Y_pred))
print "总耗时:", time() - t, "秒"
| 2.625 | 3 |
splunk_add_on_ucc_framework/uccrestbuilder/builder.py | artemrys/addonfactory-ucc-generator | 16 | 12794029 | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import os.path as op
from .rest_conf import RestmapConf, WebConf
__all__ = ["RestBuilderError", "RestBuilder"]
class RestBuilderError(Exception):
pass
class _RestBuilderOutput:
readme = "README"
default = "default"
bin = "bin"
def __init__(self, path, product):
self._path = path
self._product = product
self._root_path = op.abspath(self._path)
if not op.isdir(self._root_path):
os.makedirs(self._root_path)
self._content = {}
def put(self, subpath, file_name, content):
path = op.join(self._root_path, subpath)
if not op.isdir(path):
os.makedirs(path)
full_name = op.join(path, file_name)
if full_name not in self._content:
self._content[full_name] = []
self._content[full_name].append(content)
def save(self):
for full_name, contents in list(self._content.items()):
full_content = "\n\n".join(contents)
with open(full_name, "w") as f:
f.writelines(full_content)
class RestBuilder:
def __init__(self, schema, handler, output_path, *args, **kwargs):
"""
:param schema:
:param schema: RestSchema
:param handler:
:param output_path:
:param args:
:param kwargs:
"""
self._schema = schema
self._handler = handler
self._output_path = output_path
self._args = args
self._kwargs = kwargs
self.output = _RestBuilderOutput(
self._output_path,
self._schema.product,
)
@property
def restmap_admin(self):
return self._schema.namespace
@property
def restmap_admin_externals(self):
return RestmapConf.admin_externals(self._schema.endpoints)
def build(self):
for endpoint in self._schema.endpoints:
# If the endpoint is oauth, which is for getting accesstoken. Conf file entries should not get created.
if endpoint._name != "oauth":
if endpoint._name == "settings":
self.output.put(
self.output.default,
endpoint.conf_name + ".conf",
endpoint.generate_default_conf(),
)
self.output.put(
self.output.readme,
endpoint.conf_name + ".conf.spec",
endpoint.generate_spec(),
)
# Add data input of self defined conf to inputs.conf.spec
if endpoint._entities[0] and endpoint._entities[0]._conf_name:
lines = [
"[" + endpoint._name + "://<name>]",
"placeholder = placeholder",
]
self.output.put(
self.output.readme, "inputs.conf.spec", "\n".join(lines)
)
self.output.put(
self.output.bin,
endpoint.rh_name + ".py",
endpoint.generate_rh(self._handler),
)
self.output.put(
self.output.default,
"restmap.conf",
RestmapConf.build(
self._schema.endpoints,
self._schema.namespace,
self._schema.admin_match,
),
)
self.output.put(
self.output.default,
"web.conf",
WebConf.build(self._schema.endpoints),
)
self.output.save()
| 2.078125 | 2 |
bluechannel/customers/urls.py | davemerwin/blue-channel | 4 | 12794030 | from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'customers.views.subscribe', name='subscribe'),
)
| 1.203125 | 1 |
ineedtiles.py | IrupeCanet/psychic-goggles | 0 | 12794031 | <filename>ineedtiles.py
# This program calculates how many tiles you will need
# when tilling a wall or floor (in m2).
length = 3.5
width = 2.3
area = length * width
needed = area * 1.05
print("You need", needed, "tiles in metres squared")
| 3.765625 | 4 |
custom_components/solark/config_flow.py | poldim/HA-solark-PV | 11 | 12794032 | <reponame>poldim/HA-solark-PV<filename>custom_components/solark/config_flow.py<gh_stars>10-100
import re
from urllib.parse import urlparse
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant, callback
import voluptuous as vol
from .const import DEFAULT_NAME, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DOMAIN
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME, default=DEFAULT_NAME): str,
vol.Required(CONF_HOST, default='localhost'): str,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): int,
}
)
@callback
def solark_modbus_entries(hass: HomeAssistant):
"""Return the hosts already configured."""
return {
entry.data[CONF_HOST] for entry in hass.config_entries.async_entries(DOMAIN)
}
def host_valid(netloc):
parsed=urlparse(f'//{netloc}')
try:
#If it not a URL it might be a serial port.
if (parsed.port is None) and ((parsed.hostname is None) or (parsed.hostname[0:3] == "com" )):
return True
#Hostname made no sense. Error return
except:
return False
return True
class SolArkModbusConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""SolArk Modbus configflow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def _host_in_configuration_exists(self, host) -> bool:
"""Return True if host exists in configuration."""
if host in solark_modbus_entries(self.hass):
return True
return False
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
if self._host_in_configuration_exists(host):
errors[CONF_HOST] = "already_configured"
elif not host_valid(host):
errors[CONF_HOST] = "invalid host IP"
else:
await self.async_set_unique_id(user_input[CONF_HOST])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
| 2.453125 | 2 |
pacote dawload/projetos progama em Python/desafio071 Simulador de caixa eletronico.py | wagnersistemalima/Mundo-2-Python-Curso-em-Video | 1 | 12794033 | #Desafio Simulador de caixa eletronico
# Crie um progama que simule o funcionamento de um caixaeletrônico. No inicio, pergunte ao usuario
# qual será o valor a ser sacado (número inteiro) e o progama vai informar quantas cedulas de cada valor
# serão entregues. OBS: Considere o caixa possui cedulas de R$50,00 / R$20,00 / R$10,00 / R$1,00
print('{:-^40}'.format('Caixa Eletrônico'))
saque = int(input('Qual o valor a ser sacado?R$'))
montante = saque # dinheiro do saque
maior_cedula = 50 # notas
cont_cedulas = 0 # quantidades de notas usadas
status = True
while status:
if montante >= maior_cedula: # Valor do saque da para tirar 50. tire 50 ate onde dé
montante = montante - maior_cedula
cont_cedulas = cont_cedulas + 1
else: #cedula de 50 passa 20 .se não de mais p tirar 50, resto do montante vai tirar 20 ate onde de
if cont_cedulas > 0: # desconcidera 0 notas
print(f'{cont_cedulas} nota(s) de R${maior_cedula}')
if maior_cedula == 50:
maior_cedula = 20
elif maior_cedula == 20:
maior_cedula = 10
elif maior_cedula == 10:
maior_cedula = 1
cont_cedulas = 0 # quantidade de celuas e zerada pois não ha mais o que dividir
if montante == 0: # Quando o montante tiver zerado. fim
status = False
| 3.953125 | 4 |
src/scruples/scripts/analyze/resource/human_performance.py | allenai/scruples | 29 | 12794034 | """Estimate human performance for the scruples resource."""
import json
import logging
import click
from ....baselines.metrics import METRICS
logger = logging.getLogger(__name__)
# main function
@click.command()
@click.argument(
'split_path',
type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
def human_performance(
split_path: str,
output_path: str
) -> None:
"""Estimate human performance on the scruples resource.
Read in the split from SPLIT_PATH, then estimate human performance
metrics and write them to OUTPUT_PATH.
Human performance is computed by comparing the majority vote label
of the human performance annotators to the majority vote label of
the gold annotators.
"""
logger.info('Computing human performance.')
human_preds = []
gold_labels = []
with click.open_file(split_path, 'r') as split_file:
for ln in split_file:
row = json.loads(ln)
human_preds.append(row['human_perf_label'])
gold_labels.append(row['gold_label'])
with open(output_path, 'w') as metrics_file:
json.dump({
key: metric(
y_true=gold_labels,
y_pred=human_preds)
for key, (_, metric, scorer_kwargs) in METRICS.items()
if not scorer_kwargs['needs_proba']
}, metrics_file)
| 2.765625 | 3 |
tests/unit/resources/test_stream_utils.py | ebit-wise/pipelinewise-target-bigquery | 0 | 12794035 | import unittest
from datetime import datetime
from target_bigquery import stream_utils
class TestStreamUtils(unittest.TestCase):
"""
Unit Tests
"""
def test_add_metadata_values_to_record(self):
"""Test adding metadata"""
dt = "2017-11-20T16:45:33.000Z"
record = { "type": "RECORD", "stream": "foo", "time_extracted": dt, "record": {"id": "2"} }
result = stream_utils.add_metadata_values_to_record(record)
self.assertEqual(result.get("id"), "2")
self.assertEqual(result.get("_sdc_extracted_at"), datetime.strptime(dt, '%Y-%m-%dT%H:%M:%S.%fZ'))
extra_attrs = ['_sdc_batched_at', '_sdc_deleted_at']
for attr in extra_attrs:
self.assertTrue(attr in result)
def test_add_metadata_values_to_record_when_no_time_extracted(self):
"""Test adding metadata when there's no time extracted in the record message """
record = { "type": "RECORD", "stream": "foo", "record": {"id": "2"} }
dt = datetime.now()
result = stream_utils.add_metadata_values_to_record(record)
self.assertEqual(result.get("id"), "2")
self.assertGreaterEqual(result.get("_sdc_extracted_at"), dt)
extra_attrs = ['_sdc_extracted_at', '_sdc_batched_at', '_sdc_deleted_at']
for attr in extra_attrs:
self.assertTrue(attr in result)
| 2.734375 | 3 |
settings.py | GlycerinLOL/Hwa_autobuy | 0 | 12794036 | <reponame>GlycerinLOL/Hwa_autobuy<filename>settings.py
URL = "https://www.huahuacomputer.com.tw/products/gigabyte-%E6%8A%80%E5%98%89-aorus-15g-yb-%E6%A9%9F%E6%A2%B0%E8%BB%B8%E9%9B%BB%E7%AB%B6%E7%AD%86%E9%9B%BB-1"
DRIVER_PATH = "chromedriver.exe"
CHROME_PATH = r"--user-data-dir=C:\\Users\brian\AppData\\Local\\Google\\Chrome\\User Data\Default" # 可透過網址列輸入 chrome://version/ 找到
### Only for Mac ###
# DRIVER_PATH = "/usr/local/bin/chromedriver"
### Only for Mac (End) ###
# 請注意!以下皆為機密個資,請小心謹慎,勿上傳至公開平台
ACC = "<EMAIL>"
PWD = "<PASSWORD>"
| 1.976563 | 2 |
release/stubs.min/Autodesk/Revit/DB/__init___parts/ElementStructuralTypeFilter.py | YKato521/ironpython-stubs | 0 | 12794037 | <reponame>YKato521/ironpython-stubs<filename>release/stubs.min/Autodesk/Revit/DB/__init___parts/ElementStructuralTypeFilter.py
class ElementStructuralTypeFilter(ElementQuickFilter, IDisposable):
"""
A filter used to find elements matching a structural type.
ElementStructuralTypeFilter(structuralType: StructuralType,inverted: bool)
ElementStructuralTypeFilter(structuralType: StructuralType)
"""
def Dispose(self):
""" Dispose(self: ElementFilter,A_0: bool) """
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: ElementFilter,disposing: bool) """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, structuralType, inverted=None):
"""
__new__(cls: type,structuralType: StructuralType,inverted: bool)
__new__(cls: type,structuralType: StructuralType)
"""
pass
StructuralType = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The structural type.
Get: StructuralType(self: ElementStructuralTypeFilter) -> StructuralType
"""
| 2.015625 | 2 |
crossbox/tests/test_session.py | acascarla/crossbox | 0 | 12794038 | <reponame>acascarla/crossbox
import datetime
from http import HTTPStatus
from freezegun import freeze_time
from django.urls import reverse
from crossbox.tests.mixins import BaseTestCase
from crossbox.tests.tools import with_login, create_session
from crossbox.models.day import Day
from crossbox.models.hour import Hour
from crossbox.models.track import Track
from crossbox.models.session import Session
from crossbox.models.session_type import SessionType
from crossbox.models.week_template import WeekTemplate
from crossbox.models.capacity_limit import CapacityLimit
from crossbox.models.session_template import SessionTemplate
from crossbox.admin.session import SessionAdmin, SessionAdminFilter
class SessionsCase(BaseTestCase):
fixtures = [
'users', 'capacity_limits', 'session_types', 'tracks', 'hours', 'days',
'week_templates',
]
@with_login()
def test_change_session_type(self):
hour = Hour(hour=datetime.time(0, 0))
hour.save()
day = datetime.date(year=2019, month=1, day=1)
session = Session(
date=day,
hour=hour,
session_type=SessionType.objects.get(pk=1),
capacity_limit=CapacityLimit.objects.get(pk=1),
track=Track.objects.get(pk=1),
)
session.save()
self._session_view_test(
session_id=session.id,
status_code_expected=HTTPStatus.OK,
result_expected={'pk': 2, 'name': 'OPEN'},
)
self._session_view_test(
session_id=session.id,
status_code_expected=HTTPStatus.OK,
result_expected={'pk': 3, 'name': 'ESTIRAMIENTOS'},
)
self._session_view_test(
session_id=session.id,
status_code_expected=HTTPStatus.OK,
result_expected={'pk': 1, 'name': 'WOD'},
)
self._session_view_test(
session_id=session.id,
status_code_expected=HTTPStatus.OK,
result_expected={'pk': 2, 'name': 'OPEN'},
)
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_hours(self):
pass # TODO
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_days(self):
pass # TODO
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_weeks(self):
response = self.client.get(path=reverse('session-template'))
weeks = response.context_data['weeks']
self.assertEqual(len(weeks), 52)
self.assertEqual(weeks[0], 'Lunes 30/12/2019 - Semana 1 (actual)')
self.assertEqual(weeks[51], 'Lunes 21/12/2020 - Semana 52')
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_week_templates(self):
pass # TODO
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_current_week_template(self):
pass # TODO
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_tracks(self):
pass # TODO
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_current_track(self):
pass # TODO
@with_login()
@freeze_time('2020-01-1')
def test_session_template_view_context_data_capacity_limits(self):
response = self.client.get(path=reverse('session-template'))
capacity_limits = response.context_data['capacity_limits']
self.assertEqual(
{cl.pk for cl in capacity_limits},
{cl.pk for cl in CapacityLimit.objects.all()}
)
@with_login()
def test_change_session_type_no_session(self):
response = self.client.put(
path=reverse('change_session_type', args=[13371337]))
self.assertEquals(response.status_code, HTTPStatus.NOT_FOUND)
@with_login()
@freeze_time('2020-01-01')
def test_change_session_type_db_types(self):
"""Check types are taken from database"""
SessionType.objects.create(name='new_type')
session = create_session() # session_type WOD
path = reverse('change_session_type', args=[session.pk])
_ = self.client.put(path=path) # session_type OPEN
_ = self.client.put(path=path) # session_type ESTIRAMIENTOS
response = self.client.put(path=path)
self.assertEquals(
response.json()['session_type'], {'pk': 4, 'name': 'new_type'}
)
@with_login()
def test_gen_sessions_invalid_post_params(self):
path = reverse('generate-sessions')
kwargs = {
'page': None,
'week_template': 1,
'track': 1,
'capacity_limit': 1,
}
with self.assertRaises(Exception):
self.client.post(path=path, data=kwargs)
kwargs['page'] = 1
kwargs['week_template'] = None
with self.assertRaises(Exception):
self.client.post(path=path, data=kwargs)
kwargs['week_template'] = 1
kwargs['track'] = None
with self.assertRaises(Exception):
self.client.post(path=path, data=kwargs)
kwargs['track'] = 1
kwargs['capacity_limit'] = None
with self.assertRaises(Exception):
self.client.post(path=path, data=kwargs)
kwargs['capacity_limit'] = 1
self.client.post(path=path, data=kwargs) # no raise
@with_login()
@freeze_time('2020-01-01')
def test_gen_sessions_delete_same_track_and_same_week_sessions(self):
track = Track.objects.get(pk=1)
other_track = Track.objects.get(pk=2)
session_same_week = Session.objects.create(
date=datetime.date(2020, 1, 1),
hour=Hour.objects.get(pk=1),
session_type=SessionType.objects.get(pk=1),
capacity_limit=CapacityLimit.objects.get(pk=1),
track=track,
)
Session.objects.create( # other week, same track
date=datetime.date(2020, 1, 10),
hour=Hour.objects.get(pk=1),
session_type=SessionType.objects.get(pk=1),
capacity_limit=CapacityLimit.objects.get(pk=1),
track=track,
)
Session.objects.create( # same week, other track
date=datetime.date(2020, 1, 1),
hour=Hour.objects.get(pk=1),
session_type=SessionType.objects.get(pk=1),
capacity_limit=CapacityLimit.objects.get(pk=1),
track=other_track,
)
self.assertEquals(Session.objects.count(), 3)
path = reverse('generate-sessions')
kwargs = {
'page': 0,
'week_template': 1,
'track': track.pk,
'capacity_limit': 1,
}
self.client.post(path=path, data=kwargs)
self.assertEquals(Session.objects.count(), 2)
with self.assertRaises(Session.DoesNotExist):
Session.objects.get(pk=session_same_week.pk)
@with_login()
@freeze_time('2020-01-01')
def test_gen_sessions_new_sessions_for_that_week(self):
week_template = WeekTemplate.objects.get(pk=1)
session = create_session()
SessionTemplate.objects.create(
day=Day.objects.get(pk=session.date.weekday() + 1),
hour=session.hour,
week_template=week_template,
capacity_limit=session.capacity_limit,
)
path = reverse('generate-sessions')
kwargs = {
'page': 0,
'week_template': week_template.pk,
'track': session.track.pk,
'capacity_limit': session.capacity_limit.pk,
}
self.assertEquals(Session.objects.count(), 1)
self.client.post(path=path, data=kwargs)
self.assertEquals(Session.objects.count(), 1)
with self.assertRaises(Session.DoesNotExist):
Session.objects.get(pk=session.pk)
new_session = Session.objects.all()[0]
self.assertEquals(new_session.date, session.date)
self.assertEquals(new_session.hour, session.hour)
self.assertEquals(new_session.session_type, session.session_type)
self.assertEquals(new_session.capacity_limit, session.capacity_limit)
self.assertEquals(new_session.track, session.track)
@with_login()
def test_gen_sessions_redirect_created_week_page(self):
page = 5
path = reverse('generate-sessions')
kwargs = {
'page': page,
'week_template': 1,
'track': 1,
'capacity_limit': 1,
}
response = self.client.post(path=path, data=kwargs)
self.assertEquals(response.url, f'/reservation/?page={page}')
self.assertEquals(response.status_code, 302)
def _session_view_test(
self, session_id, status_code_expected, result_expected
):
response = self.client.put(
path=reverse('change_session_type', args=[session_id]))
self.assertEquals(response.status_code, status_code_expected)
self.assertEquals(response.json()['session_type'], result_expected)
class SessionAdminFilterCase(BaseTestCase):
fixtures = ['capacity_limits', 'session_types', 'tracks']
@freeze_time('2020-02-1')
def test_queryset_depending_on_filter_selected(self):
hour = Hour(hour=datetime.time(0, 0))
hour.save()
kwargs = {
'hour': hour,
'session_type': SessionType.objects.get(pk=1),
'capacity_limit': CapacityLimit.objects.get(pk=1),
'track': Track.objects.get(pk=1),
}
day_jan = datetime.date(year=2020, month=1, day=1)
day_feb = datetime.date(year=2020, month=2, day=1)
day_mar = datetime.date(year=2020, month=3, day=1)
Session.objects.bulk_create([
Session(date=day_jan, **kwargs),
Session(date=day_feb, **kwargs),
Session(date=day_mar, **kwargs),
])
session_filter = SessionAdminFilter(None, {}, Session, SessionAdmin)
session_filter.used_parameters['filter'] = None
from_this_week_sessions = session_filter.queryset(
None, Session.objects.all())
self.assertEquals(from_this_week_sessions[0].date, day_feb)
self.assertEquals(from_this_week_sessions[1].date, day_mar)
self.assertEquals(from_this_week_sessions.count(), 2)
session_filter.used_parameters['filter'] = 'past'
past_sessions = session_filter.queryset(None, Session.objects.all())
self.assertEquals(past_sessions[0].date, day_jan)
self.assertEquals(past_sessions.count(), 1)
session_filter.used_parameters['filter'] = 'all_desc'
all_desc_sessions = session_filter.queryset(
None, Session.objects.all())
self.assertEquals(all_desc_sessions[0].date, day_mar)
self.assertEquals(all_desc_sessions[1].date, day_feb)
self.assertEquals(all_desc_sessions[2].date, day_jan)
session_filter.used_parameters['filter'] = 'all_asc'
all_asc_sessions = session_filter.queryset(None, Session.objects.all())
self.assertEquals(all_asc_sessions[0].date, day_jan)
self.assertEquals(all_asc_sessions[1].date, day_feb)
self.assertEquals(all_asc_sessions[2].date, day_mar)
| 2.03125 | 2 |
TOV_mmdetection/mmdet/models/point/dense_heads/__init__.py | ucas-vg/PointTinyBenchmark | 10 | 12794039 | from .cpr_head import CPRHead
from .p2p_head import P2PHead
__all__ = [
'CPRHead', 'P2PHead',
]
| 1.078125 | 1 |
channels/hokutonoken.py | sodicarus/channels | 0 | 12794040 | # -*- coding: utf-8 -*-
# StreamOnDemand Community Edition - Kodi Addon
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canale per I <NAME>
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
from core import httptools
from platformcode import logger
from core import scrapertools
from core import servertools
from core.item import Item
__channel__ = "hokutonoken"
def mainlist(item):
logger.info("[hokutonoken.py] mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Hokuto no Ken - Prima Serie[/COLOR]",
action="episodi",
url="http://pastebin.com/BUqD13hb",
thumbnail="http://i.imgur.com/MGkqu7c.jpg",
fanart="http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg"),
Item(channel=__channel__,
title="[COLOR azure]Hokuto no Ken - Seconda Serie[/COLOR]",
action="episodi",
url="http://pastebin.com/mHXQRBxZ",
thumbnail="http://i159.photobucket.com/albums/t123/Janthem/hnk2.jpg",
fanart="http://fullhdwp.com/images/wallpapers/Group-fist-of-the-north-star-wallpaper-.jpg")]
return itemlist
def episodi(item):
logger.info("hokutonoken.py episodi")
itemlist = []
# Downloads page
data = httptools.downloadpage(item.url).data
# Extracts the entries
patron = '><br>(.*?)<a href="(.*?)" target="_blank">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(
Item(channel=__channel__,
action="findvid",
title=scrapedtitle,
thumbnail=item.thumbnail,
url=scrapedurl))
return itemlist
def findvid(item):
logger.info("[pastebin.py] findvideos")
# Downloads page
data = item.url
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
| 2.03125 | 2 |
sns_lambda_update_ssl_rule/functions/update_ssl_rule.py | GMADLA/terraform-aws-ecs-web-app | 0 | 12794041 | import os
import json, boto3
def lambda_handler(event, context):
print("Trigger Event: ")
print(event)
region = os.environ['REGION']
elbv2_client = boto3.client('elbv2', region_name=region)
available_target_groups = os.environ['AVAILABLE_TARGET_GROUPS']
arr_available_target_groups = available_target_groups.split(',')
# Get HTTP Target Group.
http_listener_arn = os.environ['HTTP_LISTENER_ARN']
http_listener = elbv2_client.describe_rules( ListenerArn=http_listener_arn)
http_target_group_arn = get_current_http_target_group(http_listener['Rules'], arr_available_target_groups)
if http_target_group_arn==False:
print("Could not identify the target arn")
return False
print("Current HTTP target group: ")
print(http_target_group_arn)
# Get HTTPS listener rules.
https_listener_arn = os.environ['SSL_LISTENER_ARN']
https_listener = elbv2_client.describe_rules(ListenerArn=https_listener_arn)
https_listener_rules = https_listener['Rules']
print("Current HTTPS target group: ")
https_target_group_arn = get_current_http_target_group(https_listener['Rules'], arr_available_target_groups)
print(https_target_group_arn)
results = {}
i = 0
while i < len(https_listener_rules):
# Skip default rule
if https_listener_rules[i]['IsDefault']==True:
i +=1
continue
actions = https_listener_rules[i]['Actions']
actions, modify = process_actions(actions, http_target_group_arn, arr_available_target_groups)
if modify==1:
print("Updating SSL listener rules..")
rule_arn = https_listener_rules[i]['RuleArn']
results[rule_arn] = modify_rules(elbv2_client, rule_arn, actions)
i +=1
# For ECS After Allow Test Traffic hook
print(results)
send_codedeploy_validation_status(event, results)
return results
# Returns the current B/G target group from a list of lister rules.
def get_current_http_target_group(http_listener_rules, arr_available_target_groups):
i=0
while i < len(http_listener_rules):
# Continue if default listener rule.
if http_listener_rules[i]['IsDefault']==True:
i +=1
continue
actions = http_listener_rules[i]['Actions']
n=0
while n<len(actions):
try:
for tg in actions[n]['ForwardConfig']['TargetGroups']:
if tg['TargetGroupArn'] in arr_available_target_groups and (tg['Weight'] is 100 or tg['Weight'] is 1) :
return tg['TargetGroupArn']
except Exception as e:
print(e)
n +=1
i +=1
return False
def process_actions(actions, http_target_group_arn, arr_available_target_groups):
modify = 0
for ak, action in enumerate(actions):
try:
if action['Type'] == "forward" and check_target_update(action['TargetGroupArn'], arr_available_target_groups):
actions[ak]['TargetGroupArn']=http_target_group_arn
for tgk, target_group in enumerate(action['ForwardConfig']['TargetGroups']):
if check_target_update(target_group['TargetGroupArn'], arr_available_target_groups):
actions[ak]['ForwardConfig']['TargetGroups'][tgk]['TargetGroupArn']=http_target_group_arn
modify=1
except Exception as e:
print(e)
return (actions), modify
# Check old target group is associated w/out available target and different.
# Be wary I found its possible the Listener rule is updated at the initial Ready Stage.
# DO NOT TRY COMPARING OLD AN NEW, SIMPLY ALWAYS UPDATE TO MATCH HTTP IF ONE OF THE AVAILABLE TARGETS
def check_target_update(old_target_group, arr_available_target_groups):
return old_target_group in arr_available_target_groups
# Sends notification to CodeDeploy on hook status...
def send_codedeploy_validation_status(event, results):
region = os.environ['REGION']
codedeploy_client = boto3.client('codedeploy', region_name=region)
status = ('Failed', 'Succeeded')[len(results) > 0]
print(status)
try:
return codedeploy_client.put_lifecycle_event_hook_execution_status(
deploymentId=event['DeploymentId'],
lifecycleEventHookExecutionId=event['LifecycleEventHookExecutionId'],
status=status
)
except Exception as e:
print("Recoverable Exception: ")
print(e)
return False
def modify_rules(elbv2_client, arn, actions):
try:
return elbv2_client.modify_rule(
RuleArn=arn,
Actions=actions
)
except Exception as e:
print(e)
| 2.203125 | 2 |
src/get_demographics.py | pthangaraj/Stroke-Phenotyping | 0 | 12794042 | #By <NAME> (<EMAIL>), <NAME> Lab at Columbia University Irving Medical Center
#Part of manuscript: "Comparative analysis, applications, and interpretation of electronic health record-based stroke phenotyping methods"
#This script prints out the demographics of all of the models' training sets
import numpy as np
import sys
import scipy as sp
from scipy.sparse import csr_matrix
from collections import defaultdict
demo_counts=defaultdict(lambda: defaultdict(list))
demo_freqs=defaultdict(lambda: defaultdict(list))
cases=['G','G','G','G','G','T','T','T','T','T','C','C','C','C','C']
controls=['N','I','C','CI','R','N','I','C','CI','R','N','I','C','CI','R']
for i in range(0,15):
case=cases[i]
control=controls[i]
filename_labels={training_set labels filename}+case+control+'.npy'
labels=np.load(filename_labels)
filename_e2i={events2cols filename}+case+control+'.npy'
e2c=np.load(filename_e2i)
e2c=e2c[()]
filename_all={demographics events filenames} + case + control + '.npy'
demo_events=np.load(filename_all)
filename_mtrain={training_set sparse matrix filename} + case + control + '.npz'
matrix=sp.sparse.load_npz(filename_mtrain)
demos_count=defaultdict(lambda: defaultdict(int))
case_len=np.sum(labels)
ctrl_len=len(labels)-case_len
print case,control,case_len,ctrl_len
for t in range(0,len(labels)):
label=labels[t]
for d in demo_events:
col=e2c[d]
if d=='American Indian' or d=='Pacific Islander' or d=='Asian':
demos_count[label]['other']+=matrix[t,col]
elif d=='U':
demos_count[label]['Unknown']+=matrix[t,col]
else:
demos_count[label][d]+=matrix[t,col]
for d in demos_count[1].keys():
demo_counts[case+'ca'][d].append(demos_count[1][d])
demo_counts[control+'co'][d].append(demos_count[0][d])
demo_freqs[case+'ca'][d].append(demos_count[1][d]/float(case_len))
demo_freqs[control+'co'][d].append(demos_count[0][d]/float(ctrl_len))
for c in demo_counts.keys():
for d in demo_counts[c]:
print c,d,demo_counts[c][d],demo_freqs[c][d]
| 2.046875 | 2 |
apis/worldtime.py | CantSayIHave/OllieBotCore_v3 | 4 | 12794043 | # worldtime module by CantSayIHave
# Created 2018/01/12
#
# Fetch time and date from a location
# Uses Google Geocoding API and Google Time Zone API
import aiohttp
import time
from datetime import datetime
API_GEOCODE = 'https://maps.googleapis.com/maps/api/geocode/json?'
API_TIMEZONE = 'https://maps.googleapis.com/maps/api/timezone/json?'
allowed_chars = [',', '%', '+', '-']
class Location:
def __init__(self, address: str, lat: int = None, long: int = None):
self.address = address
self.latitude = lat
self.longitude = long
class Time:
def __init__(self, time: datetime, timezone_id: str, timezone_name: str):
self.time = time
self.timezone_id = timezone_id
self.timezone_name = timezone_name
class WorldTime:
def __init__(self, key: str):
self.key = key
async def get_location(self, query: str) -> Location:
args = {'address': self.query_encode(query),
'key': self.key}
url = API_GEOCODE + self.param_encode(args)
search = await self.api_get(url)
if search:
try:
result = search['results'][0]
location = Location(address=result['formatted_address'],
lat=result['geometry']['location']['lat'],
long=result['geometry']['location']['lng'])
return location
except KeyError:
print('WorldTime Location Key Error')
raise
else:
return search
async def get_time(self, location: Location) -> Time:
unix_now = int(time.time())
args = {'location': '{},{}'.format(location.latitude, location.longitude),
'timestamp': unix_now,
'key': self.key}
url = API_TIMEZONE + self.param_encode(args)
search = await self.api_get(url)
if search:
try:
location_time = unix_now + search['rawOffset'] + search['dstOffset']
return Time(time=datetime.fromtimestamp(location_time),
timezone_id=search['timeZoneId'],
timezone_name=search['timeZoneName'])
except KeyError:
print('WorldTime Time Key Error')
raise
else:
return search
@staticmethod
async def api_get(url: str) -> dict:
with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200:
j = await resp.json()
if j['status'] == 'OK':
return j
elif j['status'] == 'ZERO_RESULTS':
return None
return False
@staticmethod
def query_encode(text: str) -> str:
text = ' '.join(text.split())
text = text.replace(' ', '+')
for c in text:
if c not in allowed_chars and not c.isalnum():
text = text.replace(c, '%' + hex(ord(c))[2:])
return text
@staticmethod
def param_encode(options: dict) -> str:
out = ''
for k, v in options.items():
out += '{}={}&'.format(k, v)
out = out[:-1]
return out
| 3.296875 | 3 |
generateData.py | NumericalMax/Linear-Regression | 2 | 12794044 | import numpy as np
from matplotlib import pyplot as plt
n = 100
x = range(0,n)
y = range(0,n)
for k in range(0, n):
y[k] = y[k] + 3*np.random.randn() + 100
plt.figure(figsize=(20,10))
plt.scatter(x, y)
plt.savefig("./images/rawData.png")
X = np.zeros([n,1])
target = np.zeros([n,1])
X[:,0] = x
target[:,0] = y
np.savetxt("X.txt", X, delimiter=",", fmt='%f')
np.savetxt("y.txt", target, delimiter=",", fmt='%f')
| 3.234375 | 3 |
django_typograf/admin.py | movermeyer/django-typograf | 1 | 12794045 | <reponame>movermeyer/django-typograf
from django.contrib import admin
from django_typograf.utils import get_typograf_field_name, get_typograf_hash_field_name
class TypografAdmin(admin.ModelAdmin):
""" Admin class for hide typograf fields from admin site """
def _exclude(self, obj=None):
""" Mark typograf fields as exclude """
exclude = ()
if obj:
exclude += tuple((get_typograf_field_name(field) for field in obj._meta.typografed_fields))
exclude += tuple((get_typograf_hash_field_name(field) for field in obj._meta.typografed_fields))
return exclude
def get_form(self, request, obj=None, **kwargs):
exclude = self.exclude or ()
exclude += self._exclude(obj)
kwargs.update(dict(exclude=exclude))
return super().get_form(request, obj, **kwargs)
| 2.015625 | 2 |
Server/Graph-Tests/generationpiechart.py | danielvanpaass/Demonstrator | 0 | 12794046 | <gh_stars>0
import plotly.graph_objects as go
data = {'pv':[1,4,1,2,4,2],
'wind':[1,2,5,3,2,0],
'net':[10,2,5,0,2,0]}
tot_net= sum(data['net'])
tot_pv = sum(data['pv'])+tot_net*0.05
tot_wind= sum(data['wind'])*0.08
tot_gas=tot_net*0.45
tot_coal=tot_net*0.32
tot_oil=tot_net*0.04
tot_nuclear=tot_net*0.03
tot_other=tot_net*0.03
labels=['PV','Wind','Natural Gas','Coal','Oil','Nuclear','Other']
share=[tot_pv,tot_wind,tot_gas,tot_coal,tot_oil,tot_nuclear,tot_other]
fig = go.Figure(data=[go.Pie(labels=labels, values=share)])
fig.show()
#print(power_share)
#data.update({'share': })
| 2.9375 | 3 |
datetime/main.py | minister19/Python_snippets | 0 | 12794047 | from time import timezone
import pandas as pd
from datetime import datetime
# print(datetime.fromtimestamp(1603209600))
# print(datetime.fromtimestamp(1612868324294/1000))
# print(datetime.fromtimestamp(1613283396746//1000))
print(datetime.fromtimestamp(1640851200))
print(datetime.fromtimestamp(1640649600))
print(datetime.fromtimestamp(1640617020000//1000))
a = datetime.now()
b = pd.Timestamp(ts_input=a, tzinfo=a.tzinfo)
c = b.floor(freq='T')
d = b.ceil(freq='T')
e = d.timestamp()
f = int(e)
g = datetime.fromtimestamp(f)
print(a, c, d, g)
delta = datetime.now() - datetime.utcnow()
print(delta.seconds / 3600)
| 2.765625 | 3 |
第5章/program/Chapter_5_xpath_special.py | kingname/SourceCodeOfBook | 274 | 12794048 | import lxml.html
html1 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="test-1-k">需要的内容1</div>
<div id="test-2-k">需要的内容2</div>
<div id="testfault-k">需要的内容3</div>
<div id="useless">这是我不需要的内容</div>
</body>
</html>
'''
# selector = lxml.html.fromstring(html1)
# content = selector.xpath('//div[ends-with(@id, "-k")]/text()')
# for each in content:
# print(each)
html2 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="abc-key-x">需要的内容1</div>
<div id="123-key-000">需要的内容2</div>
<div id="haha-key">需要的内容3</div>
<div id="useless">这是我不需要的内容</div>
</body>
</html>
'''
# selector = lxml.html.fromstring(html2)
# content = selector.xpath('//div[contains(@id, "-key")]/text()')
# for each in content:
# print(each)
html3 = '''
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<title></title>
</head>
<body>
<div id="test3">
我左青龙,
<span id="tiger">
右白虎,
<ul>上朱雀,
<li>下玄武。</li>
</ul>
老牛在当中,
</span>
龙头在胸口。
</div>
</body>
</html>
'''
#如果使用一般的办法,就会出现获取到的数据不完整的情况
selector = lxml.html.fromstring(html3)
# content_1 = selector.xpath('//div[@id="test3"]/text()')
# for each in content_1:
# print(each)
# 使用string(.)就可以把数据获取完整
data = selector.xpath('//div[@id="test3"]')[0]
info = data.xpath('string(.)')
print(info)
| 3.5625 | 4 |
tests/conftest.py | williamjmorenor/now-lms | 0 | 12794049 | import pytest
from now_lms import init_app, lms_app
lms_app.app_context().push()
@pytest.fixture(scope="package", autouse=True)
def setup_database():
init_app()
| 1.515625 | 2 |
week 2/datatype.py | marksikaundi/Computer-Programming-with-Python | 2 | 12794050 | <filename>week 2/datatype.py
print("this is dataype") | 1.492188 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.