max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
django_pages/language/__init__.py | lunemec/django-pages | 3 | 12794551 | <filename>django_pages/language/__init__.py
# -*- encoding: utf-8 -*-
from django.http import Http404
from ..common.errors import ConfigurationError
from .models import Language
def get_language(url_data):
"""
checks for language in data from url parsing
@param url_data: dict
@return Language object
"""
if not url_data['country_code']:
language = Language.objects.get(default=True)
if not language.active:
raise ConfigurationError('There is no default language active, please activate it in admin')
else:
try:
language = Language.objects.get(country_code=url_data['country_code'])
except Language.DoesNotExist:
raise Http404
return language
def get_languages():
"""
returns Language QuerySet or ()
"""
languages = list(Language.objects.filter(active=True))
if len(languages) > 1:
return languages
return tuple()
| 2.484375 | 2 |
scan/contours.py | Akumatic/ExamScan | 0 | 12794552 | <reponame>Akumatic/ExamScan
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Akumatic
import cv2, imutils, numpy
from . import utils
######################
# Contour operations #
######################
def find_contours (
image: numpy.ndarray,
mode: int = cv2.RETR_LIST,
method: int = cv2.CHAIN_APPROX_SIMPLE
) -> list:
""" Find all contours of the filtered image
Args:
image (ndarray):
the filtered image
Returns:
A list containing contour data
"""
cnts = cv2.findContours(image.copy(), mode, method)
return imutils.grab_contours(cnts)
def find_boxes (
contours: list,
thres_area: int = 500,
pad_ratio: float = 0.05
) -> (list, list):
""" Find contours that resemble a box
Args:
contours (list):
a list containing contour data
Returns:
A list containing the box contours
"""
boxes = list()
for c in contours:
area = cv2.contourArea(c)
perimeter = cv2.arcLength(c, True)
shape_factor = utils.circularity(area, perimeter)
if 0.7 < shape_factor < 0.85 and area >= thres_area:
boxes.append(c)
return boxes
def find_center (
contours: list
) -> list:
""" Find the center coordinates of all given contours.
Args:
contours (list):
A list containing contour data
Returns:
A list containing the center coordinates and the contour as tuples
(x, y, contour).
"""
centers = []
for contour in contours:
m = cv2.moments(contour)
try:
x = int(m["m10"] / m["m00"])
y = int(m["m01"] / m["m00"])
centers.append((x, y, contour))
except ZeroDivisionError:
pass
return centers
def filter_centers (
coords: list,
radius: int
):
""" Removes all but one entry in circles given by coordinates and radius
Args:
coords (list):
a list containing tuples of coordinates (x, y)
radius (float):
the radius around a center where no other center should be
"""
a = 0
while a < len(coords):
b = a + 1
while b < len(coords):
if utils.distance(coords[a][:2], coords[b][:2]) <= radius:
del coords[b]
else:
b += 1
a += 1
def dist_center_topleft (
contour: numpy.ndarray,
center: tuple
) -> float:
""" Calculates the distance from the center of a given contour to it's
top left corner
Args:
contour (ndarray):
The contour data
center (tuple):
A tuple containing the center coordinates (x, y)
Returns:
A float with the distance from center to the top left corner as value
"""
x, y, _, _ = cv2.boundingRect(contour)
return utils.distance((x, y), center[:2])
| 2.78125 | 3 |
setup.py | aerocyber/ShortLink | 0 | 12794553 | <reponame>aerocyber/ShortLink
# Copyright 2021 aditya
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / 'README.md').read_text(encoding='utf-8')
setup(
name='ShortLink',
version='0.1.0',
description='Python library that act as a stand alone link shorten utility.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/aerocyber/ShortLink',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'License :: OSI Approved :: Apache License v2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
package_dir={'': 'ShortLink'},
packages=find_packages(where='ShortLink'),
python_requires='>=3.6, <4',
install_requires=['validators'],
project_urls={
'Bug Reports': 'https://github.com/aerocyber/ShortLink/issues',
'Source': 'https://github.com/aerocyber/ShortLink',
},
)
| 1.71875 | 2 |
argos/cluster.py | gyplus/comparing-trajectory-clustering-methods | 92 | 12794554 | <reponame>gyplus/comparing-trajectory-clustering-methods
from scipy.sparse import lil_matrix
from scipy.spatial.distance import directed_hausdorff
import numpy as np
import math
def calculate_distance_matrix(traj_list, threshold):
def hausdorf(traj1, traj2):
d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0])
return d
size = len(traj_list)
for i in range(size):
traj_list[i] = np.array(traj_list[i])
D = lil_matrix((size, size))
for i in range(size):
for j in range(i + 1, size):
distance = hausdorf(traj_list[i], traj_list[j])
if distance < threshold:
D[i, j] = distance
D[j, i] = distance
return D
def calculate_dense_distance_matrix(traj_list):
def hausdorf(traj1, traj2):
d = max(directed_hausdorff(traj1, traj2)[0], directed_hausdorff(traj2, traj1)[0])
return d
size = len(traj_list)
for i in range(size):
traj_list[i] = np.array(traj_list[i])
D = np.empty((size, size))
for i in range(size):
for j in range(i + 1, size):
distance = hausdorf(traj_list[i], traj_list[j])
D[i, j] = distance
D[j, i] = distance
return D
def kMedoids(D, k, tmax=100):
# determine dimensions of distance matrix D
m, n = D.shape
#D = D.todense()
#D[D == 0] = math.inf
if k > n:
raise Exception('too many medoids')
# randomly initialize an array of k medoid indices
M = np.arange(n)
np.random.shuffle(M)
M = np.sort(M[:k])
# create a copy of the array of medoid indices
Mnew = np.copy(M)
# initialize a dictionary to represent clusters
C = {}
for t in range(tmax):
# determine clusters, i. e. arrays of data indices
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# update cluster medoids
for kappa in range(k):
J = np.mean(D[np.ix_(C[kappa],C[kappa])],axis=1)
j = np.argmin(J)
Mnew[kappa] = C[kappa][j]
np.sort(Mnew)
# check for convergence
if np.array_equal(M, Mnew):
break
M = np.copy(Mnew)
else:
# final update of cluster memberships
J = np.argmin(D[:,M], axis=1)
for kappa in range(k):
C[kappa] = np.where(J==kappa)[0]
# return results
return M, C | 2.578125 | 3 |
tools/pretty-printers/gdb/cista_vector.py | mayhemheroes/cista | 0 | 12794555 | <reponame>mayhemheroes/cista
import re
import gdb.xmethod
def is_cista_vector(gdb_type):
return str(gdb_type.strip_typedefs().unqualified()).startswith("cista::basic_vector")
def is_raw_vector(gdb_type):
return not str(gdb_type.strip_typedefs().template_argument(1)).startswith("cista::offset_ptr")
class CistaVector:
def __init__(self, val):
self.val = val
self.size = val['used_size_']
self.el = val['el_'] if is_raw_vector(val.type) else OffsetPointer(val['el_'])
def __len__(self):
return self.size
def __getitem__(self, idx):
return (self.el + idx).dereference()
def at(self, idx):
if (self.size < idx):
print("Accessing vector out of bounds")
return None
return self[idx]
class CistaVectorPrinter:
def __init__(self, val):
self.val = CistaVector(val)
def children(self):
for idx in range(len(self.val)):
yield '[' + str(idx) + ']', self.val[idx]
def to_string(self):
return str(self.val)
def my_pp_func(val):
if not is_cista_vector(val.type):
return
return CistaVectorPrinter(val)
### XMethod cista::vector::at
class CistaVectorWorker_at(gdb.xmethod.XMethodWorker):
def get_arg_types(self):
return gdb.lookup_type('unsigned long int')
def get_result_type(self, obj):
return obj.type.strip_typedefs().template_argument(0)
def __call__(self, this, idx):
vec = CistaVector(this.dereference())
return vec.at(idx)
class CistaVector_at(gdb.xmethod.XMethod):
def __init__(self):
gdb.xmethod.XMethod.__init__(self, 'at')
def get_worker(self, method_name):
if method_name == 'at':
return CistaVectorWorker_at()
### XMethod cista::vector::operator[]
class CistaVectorWorker_operator_brackets(gdb.xmethod.XMethodWorker):
def get_arg_types(self):
return gdb.lookup_type('unsigned long int')
def get_result_type(self, obj):
return obj.type.strip_typedefs().template_argument(0)
def __call__(self, this, idx):
vec = CistaVector(this.dereference())
return vec[idx]
class CistaVector_operator_brackets(gdb.xmethod.XMethod):
def __init__(self):
gdb.xmethod.XMethod.__init__(self, 'operator[]')
def get_worker(self, method_name):
if method_name == 'operator[]':
return CistaVectorWorker_operator_brackets()
class CistaVectorMatcher(gdb.xmethod.XMethodMatcher):
def __init__(self):
gdb.xmethod.XMethodMatcher.__init__(self, 'CistaVectorMatcher')
# List of methods 'managed' by this matcher
self.methods = [CistaVector_at(), CistaVector_operator_brackets()]
def match(self, class_type, method_name):
if not is_cista_vector(class_type):
return None
workers = []
for method in self.methods:
if method.enabled:
worker = method.get_worker(method_name)
if worker:
workers.append(worker)
return workers
gdb.pretty_printers.append(my_pp_func)
gdb.xmethod.register_xmethod_matcher(None, CistaVectorMatcher())
| 2.328125 | 2 |
var/spack/repos/builtin/packages/mrcpp/package.py | xiki-tempula/spack | 1 | 12794556 | <reponame>xiki-tempula/spack
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mrcpp(CMakePackage):
"""The MultiResolution Computation Program Package (MRCPP) is a general purpose
numerical mathematics library based on multiresolution analysis and the
multiwavelet basis which provide low-scaling algorithms as well as rigorous
error control in numerical computations."""
homepage = "https://mrcpp.readthedocs.io/en/latest/"
url = "https://github.com/MRChemSoft/mrcpp/archive/v1.1.0.tar.gz"
maintainers = ["robertodr", "stigrj", "ilfreddy"]
version('1.2.0-alpha2',
sha256='8f4df594751a5b7e76b09a62450c6c4956b1974876afa143cc9b5703156ccd40')
version('1.1.0',
sha256='e9ffb87eccbd45305f822a0b46b875788b70386b3c1d38add6540dc4e0327ab2',
preferred=True)
version('1.0.2',
sha256='d2b26f7d7b16fa67f16788119abc0f6c7562cb37ece9ba075c116463dcf19df3')
version('1.0.1',
sha256='b4d7120545da3531bc7aa0a4cb4eb579fdbe1f8e5d32b1fd1086976583e3e27c')
version('1.0.0',
sha256='0858146141d3a60232e8874380390f9e9fa0b1bd6e67099d5833704478213efd')
variant("openmp", default=True, description="Enable OpenMP support.")
variant("mpi", default=True, description="Enable MPI support")
depends_on("mpi", when="+mpi")
depends_on("[email protected]:", type="build")
depends_on("eigen")
def cmake_args(self):
args = [
"-DENABLE_OPENMP={0}".format("ON" if "+openmp" in
self.spec else "OFF"),
"-DENABLE_MPI={0}".format("ON" if "+mpi" in self.spec else "OFF"),
]
return args
| 1.210938 | 1 |
rplugin/python3/tmuxdir/test_dirmngr.py | viniarck/tmuxdir.nvim | 23 | 12794557 | import os
from tmuxdir.dirmngr import ConfigHandler, DirMngr
import pytest
@pytest.fixture
def dir_mngr() -> DirMngr:
folder_name = "/tmp/tmuxdirtest/"
os.makedirs(folder_name, exist_ok=True)
cfg_handler = ConfigHandler(folder_name=folder_name)
yield DirMngr([], [".git"], cfg_handler=cfg_handler)
try:
os.remove(str(cfg_handler._full_path))
except FileNotFoundError:
pass
os.removedirs(folder_name)
@pytest.fixture
def cfg_handler() -> ConfigHandler:
folder_name = "/tmp/tmuxdirtest/"
os.makedirs(folder_name, exist_ok=True)
config_handler = ConfigHandler(folder_name=folder_name)
yield config_handler
try:
os.remove(str(config_handler._full_path))
except FileNotFoundError:
pass
os.removedirs(folder_name)
@pytest.fixture
def tmp_git_folder() -> str:
folder_name = "/tmp/repo/.git"
os.makedirs(folder_name, exist_ok=True)
yield "/".join(folder_name.split("/")[:-1])
os.removedirs(folder_name)
class TestDirManager:
def test_first_save(self, cfg_handler: ConfigHandler):
data = {"dirs": {"/tmp": "/tmp"}, "ignored_dirs": {}}
cfg_handler.save(data)
loaded = cfg_handler.load()
assert loaded == data
def test_add_dir(self, dir_mngr: DirMngr, tmp_git_folder: str):
assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder
assert dir_mngr.add("/tmp/foo") == []
assert tmp_git_folder in dir_mngr.dirs
def test_add_dir_list(self, dir_mngr: DirMngr, tmp_git_folder: str):
folder = "/tmp/pit/"
assert dir_mngr.add(folder) == []
def test_add_clear(self, dir_mngr: DirMngr, tmp_git_folder: str):
assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder]
assert dir_mngr.clear_added_dir(tmp_git_folder)
assert dir_mngr.list_dirs() == []
assert not dir_mngr.clear_added_dir("/tmp/random/")
assert tmp_git_folder not in dir_mngr.dirs
def test_clear_added_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str):
assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder
assert dir_mngr.clear_added_dir(tmp_git_folder)
assert dir_mngr.dirs == {}
assert dir_mngr.cfg_handler.load()[dir_mngr._DIRS_KEY] == {}
def test_add_ignore(self, dir_mngr: DirMngr, tmp_git_folder: str):
assert dir_mngr.add(tmp_git_folder) == [tmp_git_folder]
assert dir_mngr.ignore(tmp_git_folder)
assert dir_mngr.add(tmp_git_folder) == []
assert tmp_git_folder not in dir_mngr.list_dirs()
def test_clear_ignored_dirs(self, dir_mngr: DirMngr, tmp_git_folder: str):
assert dir_mngr.add(tmp_git_folder)[0] == tmp_git_folder
assert dir_mngr.ignore(tmp_git_folder)
assert dir_mngr.clear_ignored_dirs()
assert dir_mngr.ignored_dirs == {}
assert dir_mngr.cfg_handler.load()[dir_mngr._IGNORED_DIRS_KEY] == {}
@pytest.mark.skipif(
not os.path.isdir(os.path.expanduser("~/b/repos"))
or not os.environ.get("TMUXDIR_BENCH", False),
reason="~/b/repos doesn't exist",
)
def test_find_projects_v10_eager(self, benchmark, dir_mngr: DirMngr) -> None:
benchmark(dir_mngr.find_projects, "~/b/repos", [".git"], 3, True)
@pytest.mark.skipif(
not os.path.isdir(os.path.expanduser("~/b/repos"))
or not os.environ.get("TMUXDIR_BENCH", False),
reason="~/b/repos doesn't exist",
)
def test_find_projects_v11_not_eager(self, benchmark, dir_mngr: DirMngr) -> None:
benchmark(dir_mngr.find_projects, "~/b/repos", [".git"], 3, False)
| 2.28125 | 2 |
kubegen/__main__.py | razaqK/kubegen | 1 | 12794558 | <gh_stars>1-10
from .kubepolicygen import KubePolicyGen
from .util import log, validate_yaml
import json
import click
@click.command()
@click.option('--kind', '-k',
help='what kind of k8s policy file are you trying to create. support type includes deployment, ingress and svc.')
@click.option('--data', '-d',
help='Supply payload for the policy file in jsonstring format e.g {"name": "app-1", "version": "v1"} ')
def main(kind, data):
log('K8s yaml policy file generator', 'blue')
kube_policy_gen = KubePolicyGen(kind, data)
response = kube_policy_gen.populate_config()
if response['status'] == 'error':
return log('Error occurred: ->' + json.dumps(response['error']), 'red')
is_valid, policy = validate_yaml(response['data'])
if not is_valid:
return log('Error occurred: ->' + policy, 'red')
filename = kube_policy_gen.kind + '.yaml'
build_policy = open(filename, 'w')
build_policy.write(policy)
build_policy.close()
click.echo('successfully generate policy file')
return log('Success: ->' + json.dumps(response['data']), 'green')
if __name__ == '__main__':
main(prog_name="kubegen")
| 2.421875 | 2 |
test/test_ude/communication/test_grpc_auth.py | aws-deepracer/ude | 0 | 12794559 | <reponame>aws-deepracer/ude
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
from unittest import mock, TestCase
from unittest.mock import patch, MagicMock
from ude.communication.grpc_auth import GrpcAuth
class GrpcAuthTest(TestCase):
def setUp(self) -> None:
pass
def test_initialize(self):
key = "hello"
auth = GrpcAuth(key=key)
assert key == auth._key
def test_call(self):
key = "hello"
auth = GrpcAuth(key=key)
context_mock = MagicMock()
callback_mock = MagicMock()
auth(context=context_mock,
callback=callback_mock)
callback_mock.assert_called_with((('rpc-auth-header', key),), None)
| 2.265625 | 2 |
testing/adios2/bindings/python/TestBPSelectSteps_nompi.py | taniabanerjee/ADIOS2 | 190 | 12794560 | <filename>testing/adios2/bindings/python/TestBPSelectSteps_nompi.py
#!/usr/bin/env python
#
# Distributed under the OSI-approved Apache License, Version 2.0. See
# accompanying file Copyright.txt for details.
#
# TestBPSelectSteps_nompi.py: test step selection by reading in Python
# in ADIOS2 File Write
# Created on: Jan 29, 2021
# Author: <NAME> <EMAIL>
import unittest
import shutil
import numpy as np
import adios2
TESTDATA_FILENAME = "steps_int32.bp"
class TestAdiosSelectSteps(unittest.TestCase):
def setUp(self):
total_steps = 10
with adios2.open(TESTDATA_FILENAME, "w") as fh:
for i in range(total_steps):
fh.write("step", np.array([i], dtype=np.int32), [1], [0], [1])
fh.end_step()
def tearDown(self):
shutil.rmtree(TESTDATA_FILENAME)
def test_select_steps_reading_fullAPI(self):
selected_steps = [3, 5, 7]
param_string = ",".join([str(i) for i in selected_steps])
adios = adios2.ADIOS()
ioReadBP = adios.DeclareIO("hellopy")
ioReadBP.SetParameter(TESTDATA_FILENAME, param_string)
fh = ioReadBP.Open(TESTDATA_FILENAME, adios2.Mode.Read)
var = ioReadBP.InquireVariable("step")
var.SetStepSelection([0, len(selected_steps)])
data = np.zeros(len(selected_steps), dtype=np.int32)
fh.Get(var, data, adios2.Mode.Sync)
self.assertTrue(all([data[i] == selected_steps[i] for i in
range(len(selected_steps))]))
if __name__ == '__main__':
unittest.main()
| 2.1875 | 2 |
SF-home-price-prediction/src/learning.py | apthomas/SF-home-price-prediction | 0 | 12794561 | <reponame>apthomas/SF-home-price-prediction<filename>SF-home-price-prediction/src/learning.py<gh_stars>0
import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler, RobustScaler, QuantileTransformer
from sklearn import ensemble, datasets, metrics
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor, AdaBoostRegressor
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
from sklearn.pipeline import Pipeline
import statsmodels.formula.api as sm
from datetime import datetime
def load_processed_ipo_data(datafile, drop_nan_columns, drop_columns):
'''
Import Final IPO csv that was created in wrangling.ipynb. Here we have every IPO in Silicon Valley,
and each zip code in a 10 mile radius from the IPO Zipcode, the demographics of each of those zipcodes,
economic data of the zipcode and the home prices at the Date Filed Time, the Lockup Date, 1 Year after
the Date is Filed and 2 years after the date is filed.
'''
ipo_final_df = pd.read_csv(datafile, encoding="ISO-8859-1")
ipo_final_df = ipo_final_df.dropna(axis=0, subset=drop_nan_columns) # remove row where if there is any 'NaN' value in column 'A'
#ipo_final_df = ipo_final_df.drop(columns=drop_columns)
return ipo_final_df
def normalize_ipo(df_ipo, min_max_list, quantile_scaler_list):
scaler_min_max = MinMaxScaler()
df_ipo[min_max_list] = scaler_min_max.fit_transform(
df_ipo[min_max_list])
scaler_quantile = QuantileTransformer(output_distribution='normal')
df_ipo[quantile_scaler_list] = scaler_quantile.fit_transform(df_ipo[quantile_scaler_list])
df_ipo[quantile_scaler_list] = scaler_min_max.fit_transform(df_ipo[quantile_scaler_list])
return df_ipo
def create_test_train_set(df_ipo, label_attr, ratio_label, ratio_divisor):
# Predicting Median Price of All Homes in a Zipcode, and strucuturing data to do so.
df_ipo[ratio_label] = df_ipo[label_attr] / df_ipo[ratio_divisor]
# dataset that does not have 'All Homes 2 Years After Date Filed'
df_test_set_2_years = df_ipo[df_ipo[label_attr].isna()]
# dataset that I will use to train the model because it does have 'All Homes 2 Years After Date Filed'
df_train_set_2_years = df_ipo[df_ipo[label_attr].notna()]
return df_train_set_2_years, df_test_set_2_years
def create_historical_encoded_df(df, date_field, location_field, time_window, feature_cols, ipo_cols):
'''
:param df: dataframe with ipo data
:param date_field: field that will be used to create time windows
:param location_field: field that denotes the zipcode demographic and economic data. Within radius of 10 miles of IPO
:param time_window: time window used for encoding and prediction. Likely 2 years.
Decisions: weighted average of encoded historical data --> either I can define it or learn it, but here I am defining it.
weighted average is by time differential from beginning of window to the end
:return:
'''
encoded_data = []
df[date_field] = pd.to_datetime(df[date_field], format='%Y-%m-%d')
for index, row in df.iterrows():
dict = row.filter(feature_cols).to_dict()
filtered_rows = df[(df[date_field] > row[date_field]) & (df[date_field] < row[date_field] + np.timedelta64(time_window, 'Y'))]
filtered_rows = filtered_rows[filtered_rows[location_field] == row[location_field]]
filtered_rows.index = filtered_rows.index.map(str)
filtered_rows['date_test'] = (filtered_rows[date_field] -row[date_field])
filtered_rows["time_weight"] = 1.0-(filtered_rows['date_test']/np.timedelta64(time_window, 'Y'))
filtered_rows = filtered_rows.replace(['--'], [1], regex=True)
filtered_rows['Number of Employees'] = pd.to_numeric(filtered_rows['Number of Employees'])
for i in range(0, len(ipo_cols)):
dict[ipo_cols[i] + '_weighted'] = filtered_rows["time_weight"].dot(filtered_rows[ipo_cols[i]])
encoded_data.append(dict)
ipo_final_ecoded_df = pd.DataFrame(encoded_data)
return ipo_final_ecoded_df
def show_correlations_matrix(df, drop_columns, label_attr,correlation_threshold):
train_corr = df.select_dtypes(include=[np.number])
train_corr = train_corr.drop(columns=drop_columns)
train_corr.shape
# Correlation plot
corr = train_corr.corr()
plt.subplots(figsize=(20, 9))
sns.heatmap(corr, annot=True)
plt.show()
top_feature = corr.index[abs(corr[label_attr] > correlation_threshold)]
plt.subplots(figsize=(12, 8))
top_corr = df[top_feature].corr()
sns.heatmap(top_corr, annot=True)
plt.title('Correlation between features');
plt.show()
def view_feature_distributions(df):
# histograms
df.hist(bins=25, figsize=(25, 20), grid=False);
def view_residual_feature_plots(df, label_attr, feature_list):
plt.figure(figsize=(25, 60))
# i: index
for i, col in enumerate(feature_list):
# 3 plots here hence 1, 3
plt.subplot(10, 6, i + 1)
x = df[col]
y = df[label_attr]
plt.plot(x, y, 'o')
# Create regression line
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))
plt.title(col)
plt.xlabel(col)
plt.ylabel('prices')
plt.show()
def prep_train_validation_test_data(df_train, df_test, label_attr, feature_list):
# Split-out validation dataset
X = df_train.loc[:, feature_list]
y = df_train[label_attr]
x_pred_test = df_test.loc[:, feature_list]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.2, random_state=42)
return X_train, X_validation, Y_train, Y_validation, x_pred_test
def plot_single_variable_distribution_and_prob_plot(df, attr):
plt.subplots(figsize=(10, 9))
sns.distplot(df[attr], fit=stats.norm)
# Get the fitted parameters used by the function
(mu, sigma) = stats.norm.fit(df[attr])
# plot with the distribution
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best')
plt.ylabel('Frequency')
# Probablity plot
fig = plt.figure()
stats.probplot(df[attr], plot=plt)
plt.show()
def run_ordinary_least_squares(df_x, df_y):
model = sm.OLS(df_y, df_x)
results = model.fit()
print(results.summary())
plt.figure(figsize=(8, 5))
p = plt.scatter(x=results.fittedvalues, y=results.resid, edgecolor='k')
xmin = min(results.fittedvalues)
xmax = max(results.fittedvalues)
plt.hlines(y=0, xmin=xmin * 0.9, xmax=xmax * 1.1, color='red', linestyle='--', lw=3)
plt.xlabel("Fitted values", fontsize=15)
plt.ylabel("Residuals", fontsize=15)
plt.title("Fitted vs. residuals plot", fontsize=18)
plt.grid(True)
#plt.show()
def run_k_folds(num_folds, algs_to_test, df_train_x, df_train_y):
# Test options and evaluation metric using Root Mean Square error method
seed = 7
RMS = 'neg_mean_squared_error'
pipelines = []
for i in range(0, len(algs_to_test)):
pipelines.append((algs_to_test[i][0], Pipeline([('Scaler', MinMaxScaler()), algs_to_test[i][1]])))
results = []
names = []
for name, model in pipelines:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, df_train_x, df_train_y, cv=kfold, scoring=RMS)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
def build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, seed):
# prepare the model
model = ExtraTreesRegressor(random_state=seed, n_estimators=100)
model.fit(df_train_x, df_train_y)
# transform the validation dataset
predictions = model.predict(df_validation_x)
#print(predictions)
#print(df_test_y)
print(mean_squared_error(df_validation_y, predictions))
print("Accuracy --> ", model.score(df_validation_x, df_validation_y) * 100)
# prepare the model
model_rf = RandomForestRegressor(random_state=seed, n_estimators=100)
model_rf.fit(df_train_x, df_train_y)
# transform the validation dataset
predictions_rf = model_rf.predict(df_validation_x)
print(mean_squared_error(df_validation_y, predictions_rf))
print("Accuracy --> ", model.score(df_validation_x, df_validation_y) * 100)
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
model_gb = ensemble.GradientBoostingRegressor(**params)
model_gb.fit(df_train_x, df_train_y)
# transform the validation dataset
predictions_gb = model_gb.predict(df_validation_x)
print(mean_squared_error(df_validation_y, predictions_gb))
print("Accuracy --> ", model.score(df_validation_x, df_validation_y) * 100)
return [model, model_rf, model_gb]
def make_predictions_model(models, df_test_x):
# prepare the model
predictions = models[0].predict(df_test_x)
predictions_rf = models[1].predict(df_test_x)
predictions_gb = models[2].predict(df_test_x)
return [predictions, predictions_rf, predictions_gb]
def create_predictions(predictions, df_x, label_divider):
df_x["Pred House Price ET"] = predictions[0]
df_x["Pred House Price RF"] = predictions[1]
df_x["Pred House Price GB"] = predictions[2]
df_x["Pred House Price ET Change"] = predictions[0] / df_x[label_divider] - 1
df_x["Pred House Price RF Change"] = predictions[1] / df_x[label_divider] - 1
df_x["Pred House Price GB Change"] = predictions[2] /df_x[label_divider] - 1
return df_x
def main_build_predictions():
ipo_final_with_date_filed_home = load_processed_ipo_data('../data/processed/df_ipo_encoded_test.csv', ['All Homes Date Filed','Number of Employees_weighted'], ['Unnamed: 0', 'CIK', 'Company Name'])
min_max_normalization_list = ['Found_weighted', 'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999', 'Distance to IPO_weighted']
quantile_scaler_normalization_list = ['Offer Amount_weighted', 'Number of Employees_weighted']
ipo_final_with_date_filed_home = normalize_ipo(ipo_final_with_date_filed_home, min_max_normalization_list, quantile_scaler_normalization_list)
print(ipo_final_with_date_filed_home.isnull().sum(axis = 0))
df_train, df_test = create_test_train_set(ipo_final_with_date_filed_home, 'All Homes 2 Years After Date Filed', '2 Year Home Value ratio', 'All Homes Date Filed')
#show_correlations_matrix(df_train, ['All Homes 1 Year After Date Filed', 'All Homes Lockup Expiration Date'], 'All Homes 2 Years After Date Filed', 0.5)
#view_feature_distributions(df_train)
feature_cols = [
'Distance to IPO_weighted', 'Found_weighted',
'Mean Household Income Estimate (dollars)',
'Mean Travel Time to Work Estimate (minutes)', 'Median Age',
'Median Household Income Estimate (dollars)', 'Offer Amount_weighted',
'Per Capita Income Estimate (dollars)', 'Percent of Females',
'Percent of Households With Income Less Than $24,999',
'Percent of Households with Income Greater than $200,000',
'Percent of Males', 'Percent of People 65 years and over',
'Percent of People under 18 years of age',
'Percent of People who are Asian',
'Percent of People who are Black or African American',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Population with no Health Insurance Coverage',
'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'Zipcode for Distance', 'Number of Employees_weighted']
#view_residual_feature_plots(df_train, 'All Homes 2 Years After Date Filed', feature_cols)
#plot_single_variable_distribution_and_prob_plot(df_train,'All Homes 2 Years After Date Filed')
df_train_x, df_validation_x, df_train_y, df_validation_y, df_test_x = prep_train_validation_test_data(df_train, df_test, 'All Homes 2 Years After Date Filed', feature_cols)
#run_ordinary_least_squares(df_train_x, df_train_y)
#k_folds_algorithms =[['ScaledLR', ('LR', LinearRegression())],['ScaledAB', ('AB', AdaBoostRegressor())],['ScaledGBM', ('GBM', GradientBoostingRegressor())],['ScaledRF', ('RF', RandomForestRegressor(n_estimators=100))]]
#run_k_folds(20, k_folds_algorithms,df_train_x, df_train_y)
models = build_models(df_train_x, df_train_y,df_validation_x, df_validation_y, 7)
predictions = make_predictions_model(models, df_test_x)
df_test_x_with_pred = create_predictions(predictions, df_test_x, 'All Homes Date Filed')
df_test_x_with_pred.to_csv("../data/processed/Test_Predictions_encoded.csv", index=False)
def create_encoding_historical_zipcode_data(data):
feature_cols = [
'Mean Household Income Estimate (dollars)',
'Mean Travel Time to Work Estimate (minutes)', 'Median Age',
'Median Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)', 'Percent of Females',
'Percent of Households With Income Less Than $24,999',
'Percent of Households with Income Greater than $200,000',
'Percent of Males', 'Percent of People 65 years and over',
'Percent of People under 18 years of age',
'Percent of People who are Asian',
'Percent of People who are Black or African American',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Population with no Health Insurance Coverage',
'Unemployment Rate', 'All Homes Date Filed','All Homes 1 Year Before Date Filed', 'All Homes 2 Years After Date Filed', 'Date Filed', 'Zipcode for Distance']
ipo_cols = ['Offer Amount', 'Number of Employees', 'Found', 'Distance to IPO']
drop_columns = ['Unnamed: 0', 'CIK', 'Company Name']
ipo_final_with_date_filed_home = load_processed_ipo_data(data, ['All Homes Date Filed','Number of Employees'], drop_columns)
#ipo_final_with_date_filed_home['Date Filed'] = pd.to_datetime(ipo_final_with_date_filed_home['Date Filed'], errors='coerce', format='%Y-%m-%d')
ipo_final_ecoded_df = create_historical_encoded_df(ipo_final_with_date_filed_home, 'Date Filed', 'Zipcode for Distance', 2, feature_cols, ipo_cols)
ipo_final_ecoded_df.to_csv("../data/processed/df_ipo_encoded_test.csv", index=False)
if __name__ == "__main__":
print("we are learning")
create_encoding_historical_zipcode_data('../data/processed/df_ipo_all.csv')
#main_build_predictions()
| 2.578125 | 3 |
web-server.py | filhit/dsoulstest-server | 0 | 12794562 | <reponame>filhit/dsoulstest-server
from http.server import HTTPServer, BaseHTTPRequestHandler
from datetime import datetime
import json
import ssl
last_update = None
last_players = None
class HTTPRequestHandler(BaseHTTPRequestHandler):
def end_headers (self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-Type', 'application/json')
BaseHTTPRequestHandler.end_headers(self)
def create_response(self):
response = {}
if last_update is None:
response['status'] = 'Error'
else:
response['last_update'] = last_update.isoformat()
if last_players is not None:
response['players'] = last_players
return response
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps(self.create_response()).encode())
def do_POST(self):
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
request = json.loads(body.decode())
global last_players
last_players = request['players']
self.send_response(200)
self.end_headers()
self.wfile.write('{}'.encode())
global last_update
last_update = datetime.now()
httpd = HTTPServer(('', 30001), HTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket,
keyfile="/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/privkey.pem",
certfile='/etc/letsencrypt/live/minetest.westeurope.cloudapp.azure.com/fullchain.pem',
server_side=True)
httpd.serve_forever()
| 2.359375 | 2 |
python scripts/detect_winning_team.py | Geoffry-Skionfinschii/Datapack_SurvivalGames | 10 | 12794563 | <reponame>Geoffry-Skionfinschii/Datapack_SurvivalGames
def main():
print("# Generated by python script")
for i in range(1, 22):
print("execute as @r[team=TEAM_{0}, tag=InGame] run tag @a[team=TEAM_{0}] add Winner".format(i))
print("execute as @r[team=TEAM_{0}, tag=InGame] run scoreboard players add @a[team=TEAM_{0}] Wins 1".format(i))
main() | 2.75 | 3 |
server.py | MensahDev/assignment1-im | 0 | 12794564 | <gh_stars>0
# coding: utf-8
import socketserver
# Copyright 2013 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
import mimetypes
from http import HTTPStatus
import re
import os
CWD = os.getcwd()
HOME = os.path.abspath('./www')
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print ("Got a request of: %s\n" % self.data)
self.data = self.data.decode('utf-8')
self.status = HTTPStatus.OK
self.mimetype = mimetypes.types_map['.a'] #unknown type is application/octet-stream
self.content = None
if not self.data.startswith('GET'):
# only handling get for now
print('server doesnt serve non-GET requests yet')
self.status = HTTPStatus.METHOD_NOT_ALLOWED
self.respond()
return
self.handle_get()
self.respond()
def respond(self):
response = 'HTTP/1.1 ' + str(self.status.value) + ' ' + self.status.phrase
response += '\r\nContent-Type: ' + self.mimetype
if self.content:
response += "\n\n" + self.content
self.request.sendall(response.encode('utf-8'))
def handle_get(self):
# reference: extracting URL from HTTP request via a regex
# https://stackoverflow.com/questions/35555427/how-to-extract-url-from-get-http-request
url_pattern = re.compile("^GET (.*)[ ].*")
url = url_pattern.match(self.data).group(1)
if url.endswith('/'):
url += 'index.html'
if url == '/deep':
# 301 TODO is this hardcoding? If yes, whats the correct way?
self.status = HTTPStatus.MOVED_PERMANENTLY
return
if not os.path.isfile(HOME+url):
self.status = HTTPStatus.NOT_FOUND
return
if os.path.abspath(HOME+url).find(CWD+"/www/") == -1:
# /www/ should be in the current path
self.status = HTTPStatus.NOT_FOUND
return
f = open(HOME+url)
self.content = f.read()
f.close()
if url.endswith('.html'):
self.mimetype = mimetypes.types_map['.html']
elif url.endswith('.css'):
self.mimetype = mimetypes.types_map['.css']
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| 2.828125 | 3 |
loggingUnitTests/LoggingConfigUnitTest.py | jonreding2010/PythonLogging | 0 | 12794565 | import os
import unittest
from baseLogger.ConsoleLogger import ConsoleLogger
from baseLogger.FileLogger import FileLogger
from baseLogger.LoggingConfig import LoggingConfig
from baseLogger.constants.LoggingEnabled import LoggingEnabled
from baseLogger.constants.MessageType import MessageType
from utilities.Config import Config
from utilities.StringProcessor import StringProcessor
# Logging Configuration unit test class.
# Tests running in serial.
# @Test(singleThreaded = true)
class LoggingConfigUnitTest(unittest.TestCase):
# Test getting Logging Enabled Setting.
# Override Config to 'YES'
def test_getLoggingEnabledSettingTest(self):
new_value_map = {"log": "YES"}
config = Config()
config.add_general_test_setting_values(new_value_map, True)
self.assertEquals(LoggingConfig(config).get_logging_enabled_setting(Config()), LoggingEnabled.YES.name,
"Expected Logging Enabled Setting YES.")
# Test getting Logging Enabled Setting.
# Override Config to 'ONFAIL'
def test_getLoggingEnabledOnFailSettingTest(self):
new_value_map = {"Log": "ONFAIL"}
config = Config()
config.add_general_test_setting_values(new_value_map, True)
self.assertEquals(LoggingConfig().get_logging_enabled_setting(config), LoggingEnabled.ONFAIL.name,
"Expected Logging Enabled Setting ONFAIL.")
# Test getting Logging Enabled Setting.
# Override Config to 'NO'
def test_getLoggingDisabledSettingTest(self):
new_value_map = {"Log": "NO"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(LoggingConfig().get_logging_enabled_setting(), LoggingEnabled.NO.name,
"Expected Logging Enabled Setting NO.")
# Test getting Logging Enabled Setting with an Illegal Argument
# Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException
def test_getLoggingSettingIllegalArgumentTest(self):
with self.assertRaises(NotImplementedError):
new_value_map = {"Log": "INVALIDVALUE"}
Config().add_general_test_setting_values(new_value_map, True)
LoggingConfig().get_logging_enabled_setting()
# Test getting Logging Level Setting.
# Override Config to 'VERBOSE'
def test_getLoggingLevelVerboseSettingTest(self):
new_value_map = {"LogLevel": "VERBOSE"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.VERBOSE.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting VERBOSE.")
# Test getting Logging Level Setting.
# Override Config to 'INFORMATION'
def test_getLoggingLevelInformationSettingTest(self):
new_value_map = {"LogLevel": "INFORMATION"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.INFORMATION.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting INFORMATION.")
# Test getting Logging Level Setting.
# Override Config to 'GENERIC'
def test_getLoggingLevelGenericSettingTest(self):
new_value_map = {"LogLevel": "GENERIC"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.GENERIC.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting GENERIC.")
# Test getting Logging Level Setting.
# Override Config to 'SUCCESS'
def test_getLoggingLevelSuccessSettingTest(self):
new_value_map = {"LogLevel": "SUCCESS"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.SUCCESS.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting SUCCESS.")
# Test getting Logging Level Setting.
# Override Config to 'WARNING'
def test_getLoggingLevelWarningSettingTest(self):
new_value_map = {"LogLevel": "WARNING"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.WARNING.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting WARNING.")
# Test getting Logging Level Setting.
# Override Config to 'ERROR'
def test_getLoggingLevelErrorSettingTest(self):
new_value_map = {"LogLevel": "ERROR"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.ERROR.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting ERROR.")
# Test getting Logging Level Setting.
# Override Config to 'SUSPENDED'
def test_getLoggingLevelSuspendedSettingTest(self):
new_value_map = {"LogLevel": "SUSPENDED"}
Config().add_general_test_setting_values(new_value_map, True)
self.assertEquals(MessageType.SUSPENDED.name, LoggingConfig().get_logging_level_setting(),
"Expected Logging Level Setting SUSPENDED.")
# Test getting Logging Level Setting with Illegal Argument.
# Override Config to 'INVALIDVALUE' - Expect IllegalArgumentException
def test_getLoggingLevelIllegalArgumentTest(self):
with self.assertRaises(AttributeError):
new_value_map = {"LogLevel": "INVALIDVALUE"}
config = Config().add_general_test_setting_values(new_value_map, True)
LoggingConfig(config).get_logging_level_setting()
# Test getting File Logger.
# Override Config LogType to 'TXT' which creates FileLogger.
def test_getFileLoggerTest(self):
new_value_map = {"LogType": "TXT", "Log": "YES"}
config = Config().add_general_test_setting_values(new_value_map, True)
file_name = "TestLog.txt"
logging_config = LoggingConfig(config).get_logger(file_name)
self.assertTrue(isinstance(logging_config, FileLogger), "Expected Logger to be of Type FileLogger.")
# Test getting File Logger.
# Override Config LogType to 'CONSOLE' which creates ConsoleLogger.
def test_getConsoleLoggerTest(self):
new_value_map = {"LogType": "CONSOLE", "Log": "YES"}
logging_config = LoggingConfig()
logging_config.add_general_test_setting_values(new_value_map, True)
file_name = "TestLog.txt"
logger = logging_config.get_logger(file_name)
instance = isinstance(logger, ConsoleLogger)
self.assertTrue(instance, "Expected Logger to be of Type ConsoleLogger.")
# Test getting File Logger.
# Override Config Log to 'NO' which creates ConsoleLogger by default.
def test_getConsoleLoggerLoggingDisabledTest(self):
new_value_map = {"Log": "NO"}
Config().add_general_test_setting_values(new_value_map, True)
file_name = "TestLog.txt"
logging_config = LoggingConfig().get_logger(file_name)
instance = isinstance(logging_config, ConsoleLogger)
self.assertTrue(instance, "Expected Logger to be of Type ConsoleLogger.")
# Test getting Log Directory.
def test_getLogDirectoryTest(self):
default_path = os.path.abspath(os.path.dirname(__file__)) + "\\Logs"
self.assertEquals(LoggingConfig().get_log_directory(), default_path,
StringProcessor.safe_formatter("Expected Default Path '{}'.", default_path))
| 2.546875 | 3 |
fuzzytest.py | skylers27/heartsound | 0 | 12794566 | #fuzzytest.py
#<NAME>
#<NAME>
#fuzzy clustering for testFun.dat
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import skfuzzy as fuzz
colors = ['b', 'orange', 'g', 'r', 'c', 'm', 'y', 'k', 'Brown', 'ForestGreen']
# Insert his test data instead !!!!
# Then our data
# Collect Test Data
with open("testFun.dat") as textFile:
y = [line.split() for line in textFile]
y = np.array(y)
X = np.zeros(shape=(200,2))
# stores test data as number in array X (converts from strings)
for i in range(0,len(y)): # num rows
for j in range(0,len(y[0])): # num columns
X[i,j] = float(y[i,j])
xpts = np.zeros(len(y))
ypts = np.zeros(len(y))
labels = np.zeros(len(y)) # no labels
# xpts = x[all rows][0]
for i in range (0, len(y)):
xpts[i] = X[i][0]
# ypts = x[all rows][1]
for i in range (0, len(y)):
ypts[i] = X[i][1]
# Visualize the test data
fig0, ax0 = plt.subplots()
for label in range(2): # need 2 different kinds of labels, only have 1 cuz theyre not labeled...
ax0.plot(xpts[labels == label], ypts[labels == label], '.',
color=colors[label])
ax0.set_title('Test data: 200 points x2 clusters.')
plt.show()
# Set up the loop and plot
fig1, axes1 = plt.subplots(2, 1, figsize=(8, 8)) #number of figures
alldata = np.vstack((xpts, ypts))
fpcs = []
for ncenters, ax in enumerate(axes1.reshape(-1), 2):
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(
alldata, ncenters, 2, error=0.005, maxiter=1000, init=None)
print("Centers = ", str(ncenters), "\n") # u0 is the array of the memberiship functions
for i in range (len(y)): # columns
print ("Data point: ",xpts[i], ",", ypts[i]) #data point
print("Membership: ")
for j in range(ncenters): #number of clusters
print("Cluster: ", j, "\n", u0[j][i]) #membership for cluster
print()
# Store fpc values for later
fpcs.append(fpc)
# Plot assigned clusters, for each data point in training set
cluster_membership = np.argmax(u, axis=0)
for j in range(ncenters):
ax.plot(xpts[cluster_membership == j],
ypts[cluster_membership == j], '.', color=colors[j])
# Mark the center of each fuzzy cluster
for pt in cntr:
ax.plot(pt[0], pt[1], 'rs')
ax.set_title('Centers = {0}; FPC = {1:.2f}'.format(ncenters, fpc))
ax.axis('off')
fig1.tight_layout()
plt.show()
| 3.171875 | 3 |
Rest-Api/app.py | jigarkcj/Booyah- | 0 | 12794567 | import os
import json
from os import path as pth
from pytube import YouTube
from pydub import AudioSegment
from dejavu import Dejavu
from dejavu.recognize import FileRecognizer
from flask import Flask, request, abort, jsonify
from werkzeug.utils import secure_filename
from celery import Celery, states
from celery.exceptions import Ignore
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.mysql import MEDIUMINT
from marshmallow import Schema, fields, ValidationError, pre_dump
from flask_jwt_extended import create_access_token, get_jwt_identity, jwt_required, JWTManager
TMP_DOWNLOAD_FOLDER = '.tmp-download/'
TMP_UPLOAD_FOLDER = '.tmp-upload/'
DOWNLOAD_AUDIO_FORMAT = 'audio/webm'
ALLOWED_EXTENSIONS = set(['mp3', 'webm', '3gp', 'ogg'])
MEDIA_TYPES = ['television', 'movie', 'music']
def init_config(configpath):
"""
Load config from a JSON file
"""
try:
with open(configpath) as f:
config = json.load(f)
except IOError as err:
print("Cannot open configuration: %s. Exiting" % (str(err)))
return config
config = init_config("CONFIG.json")
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = TMP_UPLOAD_FOLDER
app.config['CELERY_BROKER_URL'] = 'amqp://{0}:{1}@localhost:5672/vbooyah'.format(config['rabbitmq']['user'], config['rabbitmq']['passwd'])
app.config['CELERY_RESULT_BACKEND'] = 'db+mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host'])
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{0}:{1}@{2}/dejavu'.format(config['database']['user'], config['database']['passwd'], config['database']['host'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['JWT_SECRET_KEY'] = 'super-secret' #TODO: Generate using os
db = SQLAlchemy(app)
djv = Dejavu(config)
jwt = JWTManager(app)
clry = Celery(app.name, backend=app.config['CELERY_RESULT_BACKEND'], broker=app.config['CELERY_BROKER_URL'])
clry.conf.update(app.config)
# create upload folders on app load
if (pth.isdir(TMP_UPLOAD_FOLDER) == False):
print "Creating upload folder"
os.mkdir(TMP_UPLOAD_FOLDER)
# SQLAlchemy models
class Users(db.Model):
"""
Users model.
"""
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
signature = db.Column(db.String(255), primary_key=True)
def __init__(self, signature):
"""
Initialize class.
"""
self.signature = signature
class IndexedMedia(db.Model):
"""
Map existing songs table to a db Model.
"""
table = db.Table("songs", db.metadata, autoload=True, autoload_with=db.engine)
__table__ = table
id = table.c.song_id
name = table.c.song_name
class Media(db.Model):
"""
Media model.
"""
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=False)
duration = db.Column(db.BigInteger, nullable=False)
author = db.Column(db.String(255), nullable=False)
mtype = db.Column(db.String(255), nullable=False)
sid = db.Column(MEDIUMINT(unsigned=True), db.ForeignKey('songs.song_id'))
def __init__(self, name, duration, author, mtype, sid):
"""
Initialize class.
"""
self.name = name
self.duration = duration
self.author = author
self.mtype = mtype
self.sid = sid
class Likes(db.Model):
"""
Likes model.
"""
media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req
user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
seconds = db.Column(db.JSON, nullable=False)
def __init__(self, user, media, seconds):
"""
Initialize class.
"""
self.user = user
self.media = media
self.seconds = seconds
class Dislikes(db.Model):
"""
Dislikes model.
"""
media = db.Column(db.Integer, db.ForeignKey('media.id'), primary_key=True) #circumvent int primary key req
user = db.Column(db.Integer, db.ForeignKey('users.id'), primary_key=True)
seconds = db.Column(db.JSON, nullable=False)
def __init__(self, user, media, seconds):
"""
Initialize class.
"""
self.user = user
self.media = media
self.seconds = seconds
db.create_all()
# marshmallow schemas
def userSignatureValidator(data):
"""
Validate user signature.
"""
user = Users.query.filter_by(signature=data).first()
if user != None:
raise ValidationError('Please provide another signature.')
def mediaTypeValidator(data):
"""
Validate media type.
"""
if data and data.lower() not in MEDIA_TYPES:
raise ValidationError('Mtype is invalid.')
def emptyLikesValidator(data):
"""
Ensure likes is not empty.
"""
if not data or len(data) == 0:
raise ValidationError('Seconds cannot be empty.')
class UserSchema(Schema):
"""
User serialization/deserialization schema.
"""
signature = fields.Str(required=True, load_only=True, validate=userSignatureValidator)
class MediaSchema(Schema):
"""
Media serialization/deserialization schema.
"""
id = fields.Int(required=True, dump_only=True)
name = fields.Str(required=True)
author = fields.Str(required=True)
duration = fields.Int(default=0, missing=0)
mtype = fields.Str(required=True, validate=mediaTypeValidator)
url = fields.Url(load_only=True)
indexed = fields.Method('check_indexed', dump_only=True)
def check_indexed(self, media):
"""
Return Boolean indicator if media is indexed.
"""
return not media.sid == None
class LikesDislikesSchema(Schema):
"""
Likes & dislikes serialization/deserialization schema.
"""
#Discard seconds out of timer window
user = fields.Int(required=True, dump_only=True)
media = fields.Int(required=True, dump_only=True)
seconds = fields.List(fields.Int(), required=True, validate=emptyLikesValidator)
@pre_dump
def process_json(self, data):
"""
Convert json string to array before
passing it to dump().
"""
data.seconds = json.loads(data.seconds)
return data
user_schema = UserSchema()
media_schema = MediaSchema()
media_list_schema = MediaSchema(many=True)
user_likes_schema = LikesDislikesSchema()
media_likes_schema = LikesDislikesSchema(many=True)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
#TODO: Increase no. workers
@clry.task
def testInstall():
"""
Test installation.
"""
return "Hello " + get_jwt_identity()
@clry.task(bind=True)
def fingerprintMedia(self, media):
"""
Fingerprint and add a given media.
"""
url = media.get("url", None)
sid = None
if url != None: #fingerprint
try:
yt = YouTube(url)
except Exception as err:
return {"data":{"msg":"Media unavailable."}, "code": 500}
media['duration'] = int(yt.length)
stream_list = yt.streams.filter(only_audio=True).all()
stream = None
for i in xrange(0, len(stream_list)):
if stream_list[i].mime_type == DOWNLOAD_AUDIO_FORMAT:
stream = stream_list[i]
break;
if stream == None:
return {"data":{"msg":"Media stream unavailable."}, "code": 500}
if (pth.isdir(TMP_DOWNLOAD_FOLDER) == False):
os.mkdir(TMP_DOWNLOAD_FOLDER)
try:
filepath = stream.download(TMP_DOWNLOAD_FOLDER)
sid = djv.fingerprint_file(filepath)
#os.remove(filepath) # rmv file after use
except Exception as err:
return {"data":{"msg":"Unable to index media."}, "code": 500}
if sid <= 0:
return {"data":{"msg":"Media already exists."}, "code": 409}
row = Media(name=media['name'], duration=media['duration'], author=media['author'], mtype=media['mtype'], sid=sid)
db.session.add(row)
db.session.commit()
db.session.refresh(row)
return {"data": media_schema.dump(row), "code": 201}
@clry.task(bind=True)
def recognizeMedia(self, filepath):
#TODO: Use sth better than filenames
result = {}
try:
song = djv.recognize(FileRecognizer, filepath)
media = Media.query.filter_by(sid=song['song_id']).first()
if media:
print song['song_id']
result = {
"id": media.id,
"offset": song['offset_seconds'],
"duration": media.duration,
"match_time": song['match_time']
}
except Exception as e:
return {"data":{"msg":"Recognition failed."}, "code": 500}
if not song:
return {"data":{"msg":"Media not found."}, "code": 404}
return {"data":result, "code": 200}
@app.route('/hello', methods=['GET'])
@jwt_required
def helloApi():
"""
Installation test.
"""
asynctask = testInstall.apply()
if asynctask.ready() and asynctask.successful():
return jsonify({"msg": "Success!"})
abort("Bad installation", 500)
@app.route('/register', methods=['POST'])
def registerApi():
"""
Add a user to the database.
"""
if not request.is_json or request.get_json() == None:
abort(400, "Json data not provided.")
json_data = request.get_json()
try:
data = user_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 400
user = Users(signature=data['signature'])
db.session.add(user)
db.session.commit()
db.session.refresh(user)
token = create_access_token(identity=data['signature'], expires_delta=False)
return jsonify({"uid":user.id, "access_token":token})
@app.route('/media', methods=['GET','POST'])
@jwt_required
def mediaApi():
"""
Add & retrieve media.
"""
if request.method == 'GET':
media_list = Media.query.order_by(Media.name).all()
data = media_list_schema.dump(media_list)
return jsonify(data), 200
elif request.method == 'POST':
if not request.is_json or request.get_json() == None:
abort(400, "Json data not provided.")
json_data = request.get_json()
try:
data = media_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 400
asynctask = fingerprintMedia.delay(data) #TODO: Ensure celery always recieves task b4 returning
return jsonify({"uuid": asynctask.task_id}), 202
@app.route('/media/status/<uuid:sid>', methods=['GET'])
@jwt_required
def fingerprintStatusApi(sid):
"""
Retrieve the status of a fingerprinting task.
"""
fingerprinter = fingerprintMedia.AsyncResult(sid) #TODO: Handle sids that don't exist
if fingerprinter.ready():
if fingerprinter.successful():
result = fingerprinter.get()
return jsonify(result['data']), result['code']
if fingerprinter.failed():
return abort(500, "Error indexing media.")
return jsonify({"uuid": str(sid)}), 202
@app.route('/media/<int:mid>', methods=['GET'])
def mediaItemApi(mid):
"""
Retrieve the details for the media mid.
"""
media = Media.query.get(mid)
if not media:
abort(404, "Media not found.")
return jsonify(media_schema.dump(media))
@app.route('/media/recognize', methods=['POST'])
@jwt_required
def mediaRecognitionApi():
"""
Retrieve the resource id, name, author
and time index of a sampled media.
"""
#TODO: Improve recognition
if 'file' not in request.files:
abort(400, "No file.")
file = request.files['file']
if file.filename == '':
abort(400, "No selected file")
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = pth.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
asynctask = recognizeMedia.delay(filepath)
return jsonify({"uuid": asynctask.task_id}), 202
abort(400, "Bad request")
@app.route('/media/recognize/status/<uuid:sid>', methods=['GET'])
@jwt_required
def recognitionStatusApi(sid):
"""
Retieve the status of a recognition activity.
"""
recognizer = recognizeMedia.AsyncResult(sid)
if recognizer.ready():
if recognizer.successful():
result = recognizer.get()
return jsonify(result['data']), result['code']
if recognizer.failed():
return abort(500, "Error recognizing media.")
return jsonify({"uuid": str(sid)}), 202
@app.route('/media/<int:mid>/likes', methods=['GET'])
@app.route('/media/<int:mid>/dislikes', methods=['GET'])
@jwt_required
def mediaLikesApi(mid):
"""
Retrieve list of user likes for a media.
"""
try:
if Media.query.get(mid) == None:
abort(404, "Media not found.")
except Exception as e:
abort(404, "Ratings not found.")
likes = (str(request.url_rule).split("/")[-1] == "likes")
if likes:
rating = Likes.query.filter_by(media=mid).order_by(Likes.user).all()
else:
rating = Dislikes.query.filter_by(media=mid).order_by(Dislikes.user).all()
if not rating:
jsonify([])
return jsonify(media_likes_schema.dump(rating))
@app.route('/media/<int:mid>/likes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@app.route('/media/<int:mid>/dislikes/<int:uid>', methods=['GET', 'POST', 'PUT', 'DELETE'])
@jwt_required
def userLikesApi(mid, uid):
"""
Retrieve, add & modify the user likes
for a particular media.
"""
try:
user = Users.query.filter_by(signature=get_jwt_identity()).first()
if user == None or user.id != uid:
raise Exception
except Exception as e:
abort(401)
try:
if Media.query.get(mid) == None:
raise Exception
except Exception as e:
abort(404, "Media not found.")
likes = (str(request.url_rule).split("/")[-2] == "likes")
if likes:
qresult = Likes.query.filter_by(user=uid, media=mid)
else:
qresult = Dislikes.query.filter_by(user=uid, media=mid)
existingRatings = qresult.first()
if request.method == 'GET':
if not existingRatings:
return jsonify({})
return jsonify(user_likes_schema.dump(existingRatings))
elif request.method == 'DELETE':
if not existingRatings:
abort(404, "Ratings not found.")
qresult.delete()
db.session.commit()
return jsonify({"success": True})
else:
if not request.is_json or request.get_json() == None:
abort(400, "Json data not provided.")
json_data = request.get_json()
try:
data = user_likes_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 400
if request.method == 'POST':
if existingRatings:
abort(409, "User ratings exists for media.")
else: #create
if likes:
newRatings = Likes(user=uid, media=mid, seconds=json.dumps(data["seconds"]))
else:
newRatings = Dislikes(user=uid, media=mid, seconds=json.dumps(data["seconds"]))
db.session.add(newRatings)
db.session.commit()
return jsonify({"user": uid, "media": mid, "seconds": data["seconds"]}), 201
elif request.method == 'PUT':
if not existingRatings:
abort(404, "Ratings not found.")
else: #modify
existingRatings.seconds = json.dumps(data["seconds"])
db.session.commit()
return jsonify({"user": uid, "media": mid, "seconds": data["seconds"]}), 200 | 1.960938 | 2 |
social_graph/models.py | suselrd/django-social-graph | 0 | 12794568 | # coding=utf-8
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.sites.managers import CurrentSiteManager
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from .fields import JSONField
from .consistency_enforcers import *
class EdgeTypeManager(models.Manager):
# Cache to avoid re-looking up EdgeType objects all over the place.
_cache = {}
def get(self, *args, **kwargs):
et = None
if 'id' in kwargs:
try:
et = self.__class__._cache[self.db][kwargs['id']]
except KeyError:
pass
elif 'pk' in kwargs:
try:
et = self.__class__._cache[self.db][kwargs['pk']]
except KeyError:
pass
elif 'name' in kwargs:
try:
et = self.__class__._cache[self.db][kwargs['name']]
except KeyError:
pass
if et is None:
et = super(EdgeTypeManager, self).get(*args, **kwargs)
self._add_to_cache(self.db, et)
return et
def _add_to_cache(self, using, et):
self.__class__._cache.setdefault(using, {})[et.id] = et
self.__class__._cache.setdefault(using, {})[et.name] = et
def rem_from_cache(self, using, et):
try:
del self.__class__._cache.setdefault(using, {})[et.id]
del self.__class__._cache.setdefault(using, {})[et.name]
except KeyError:
pass
def clear_cache(self):
"""
Clear out the edge-type cache.
"""
self.__class__._cache.clear()
class EdgeType(models.Model):
name = models.CharField(_(u'name'), max_length=100, unique=True)
read_as = models.CharField(_(u'read as'), max_length=100)
objects = EdgeTypeManager()
class Meta(object):
ordering = ['name']
verbose_name = _(u'Edge type')
verbose_name_plural = _(u'Edge types')
def __unicode__(self):
return u'%s' % self.name
def setting_name(self):
return self.name.upper()
def delete(self, using=None):
self.__class__.objects.rem_from_cache(using, self)
super(EdgeType, self).delete(using)
class EdgeTypeAssociationManager(models.Manager):
# Cache to avoid re-looking up EdgeTypeAssociation objects all over the place.
_cache = {}
_direct_cache = {}
_inverse_cache = {}
def get(self, *args, **kwargs):
eta = None
if 'id' in kwargs:
try:
eta = self.__class__._cache[self.db][kwargs['id']]
except KeyError:
pass
elif 'pk' in kwargs:
try:
eta = self.__class__._cache[self.db][kwargs['pk']]
except KeyError:
pass
if eta is None:
eta = super(EdgeTypeAssociationManager, self).get(*args, **kwargs)
self._add_to_cache(self.db, eta)
return eta
def get_for_direct_edge_type(self, et):
try:
eta = self.__class__._direct_cache[self.db][et.id]
except KeyError:
eta = self.get(direct=et)
self._add_to_cache(self.db, eta)
return eta
def get_for_inverse_edge_type(self, et):
try:
eta = self.__class__._inverse_cache[self.db][et.id]
except KeyError:
eta = self.get(inverse=et)
self._add_to_cache(self.db, eta)
return eta
def _add_to_cache(self, using, eta):
self.__class__._cache.setdefault(using, {})[eta.id] = eta
self.__class__._direct_cache.setdefault(using, {})[eta.direct.id] = eta
self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id] = eta
def rem_from_cache(self, using, eta):
try:
del self.__class__._cache.setdefault(using, {})[eta.id]
del self.__class__._direct_cache.setdefault(using, {})[eta.direct.id]
del self.__class__._inverse_cache.setdefault(using, {})[eta.inverse.id]
except KeyError:
pass
def clear_cache(self):
"""
Clear out the edge-type-association cache.
"""
self.__class__._cache.clear()
class EdgeTypeAssociation(models.Model):
direct = models.ForeignKey(EdgeType, unique=True, related_name='is_direct_in')
inverse = models.ForeignKey(EdgeType, unique=True, related_name='is_inverse_in')
objects = EdgeTypeAssociationManager()
def __unicode__(self):
return u"%(direct)s <-> %(inverse)s" % {
'direct': self.direct,
'inverse': self.inverse
}
def delete(self, using=None):
self.__class__.objects.rem_from_cache(using, self)
super(EdgeTypeAssociation, self).delete(using)
class Edge(models.Model):
# fromNode field
fromNode_type = models.ForeignKey(ContentType,
verbose_name=_(u'from node type'),
related_name="from_node_type_set_for_%(class)s")
fromNode_pk = models.TextField(_(u'fromNode ID'))
fromNode = generic.GenericForeignKey(ct_field="fromNode_type", fk_field="fromNode_pk")
# toNode field
toNode_type = models.ForeignKey(ContentType,
verbose_name=_(u'to node type'),
related_name="to_node_type_set_for_%(class)s")
toNode_pk = models.TextField(_(u'toNode ID'))
toNode = generic.GenericForeignKey(ct_field="toNode_type", fk_field="toNode_pk")
# edge attributes
type = models.ForeignKey(EdgeType)
attributes = JSONField(_(u'attributes'), default='{}')
# edge metadata
time = models.DateTimeField(_(u'time'), auto_now_add=True)
site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edges')
auto = models.BooleanField(_(u'auto created'), default=False)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta(object):
unique_together = ['fromNode_type', 'fromNode_pk', 'toNode_type', 'toNode_pk', 'type', 'site']
ordering = ['-time']
def __unicode__(self):
return (
_(u'%(from)s %(verb)s %(to)s') % {
'from': self.fromNode if self.fromNode else '',
'verb': self.type.read_as,
'to': self.toNode if self.toNode else ''
}
)
@receiver(models.signals.pre_save, sender=Edge, dispatch_uid='pre_save_edge')
def pre_save_handler(instance, **kwargs):
if not instance.site_id:
instance.site = getattr(instance.fromNode, 'site', getattr(instance.toNode, 'site', Site.objects.get_current()))
class EdgeCount(models.Model):
# fromNode field
fromNode_type = models.ForeignKey(ContentType,
verbose_name=_(u'from node type'))
fromNode_pk = models.TextField(_(u'fromNode ID'))
fromNode = generic.GenericForeignKey(ct_field="fromNode_type", fk_field="fromNode_pk")
# edge attributes
type = models.ForeignKey(EdgeType)
# count
count = models.IntegerField(_(u'count'), default=0)
site = models.ForeignKey(Site, verbose_name=_(u'site'), related_name='edge_counters')
objects = models.Manager()
on_site = CurrentSiteManager()
def __unicode__(self):
return (
_(u'%(from)s has %(count)d %(type)s edge(s)') % {
'from': self.fromNode if self.fromNode else '',
'count': self.count,
'type': self.type
}
)
class Meta(object):
unique_together = ['fromNode_type', 'fromNode_pk', 'type', 'site']
@receiver(models.signals.pre_save, sender=EdgeCount, dispatch_uid='pre_save_edge_count')
def pre_save_count_handler(instance, **kwargs):
if not instance.site_id:
instance.site = getattr(instance.fromNode, 'site', Site.objects.get_current())
# CONNECT LISTENERS TO ENFORCE GRAPH CONSISTENCY
models.signals.post_save.connect(
SymmetricEdgeManager.create_symmetric_edge,
sender=Edge,
dispatch_uid='create_symmetric_edge'
)
models.signals.post_delete.connect(
SymmetricEdgeManager.delete_symmetric_edge,
sender=Edge,
dispatch_uid='delete_symmetric_edge'
)
models.signals.post_save.connect(
SymmetricEdgeTypeAssociationManager.create_symmetric_association,
sender=EdgeTypeAssociation,
dispatch_uid='create_symmetric_edge_type_association'
)
models.signals.post_delete.connect(
SymmetricEdgeTypeAssociationManager.delete_symmetric_association,
sender=EdgeTypeAssociation,
dispatch_uid='delete_symmetric_edge_type_association'
)
models.signals.post_save.connect(
EdgeCounter.increase_count,
sender=Edge,
dispatch_uid='increase_edge_count'
)
models.signals.post_delete.connect(
EdgeCounter.decrease_count,
sender=Edge,
dispatch_uid='decrease_edge_count'
)
models.signals.pre_delete.connect(
EdgeCleaner.clean_edges,
dispatch_uid='clean_edges'
)
# Clear the EdgeType cache
EdgeType.objects.clear_cache()
| 1.867188 | 2 |
typish/classes/_subscriptable_type.py | georgeharker/typish | 16 | 12794569 | <reponame>georgeharker/typish<gh_stars>10-100
class _SubscribedType(type):
"""
This class is a placeholder to let the IDE know the attributes of the
returned type after a __getitem__.
"""
__origin__ = None
__args__ = None
class SubscriptableType(type):
"""
This metaclass will allow a type to become subscriptable.
>>> class SomeType(metaclass=SubscriptableType):
... pass
>>> SomeTypeSub = SomeType['some args']
>>> SomeTypeSub.__args__
'some args'
>>> SomeTypeSub.__origin__.__name__
'SomeType'
"""
def __init_subclass__(mcs, **kwargs):
mcs._hash = None
mcs.__args__ = None
mcs.__origin__ = None
def __getitem__(self, item) -> _SubscribedType:
body = {
**self.__dict__,
'__args__': item,
'__origin__': self,
}
bases = self, *self.__bases__
result = type(self.__name__, bases, body)
if hasattr(result, '_after_subscription'):
# TODO check if _after_subscription is static
result._after_subscription(item)
return result
def __eq__(self, other):
self_args = getattr(self, '__args__', None)
self_origin = getattr(self, '__origin__', None)
other_args = getattr(other, '__args__', None)
other_origin = getattr(other, '__origin__', None)
return self_args == other_args and self_origin == other_origin
def __hash__(self):
if not getattr(self, '_hash', None):
self._hash = hash('{}{}'.format(self.__origin__, self.__args__))
return self._hash
| 2.46875 | 2 |
09_road_limit/model.py | yeodongbin/2020AIChallengeCode | 0 | 12794570 | <reponame>yeodongbin/2020AIChallengeCode<gh_stars>0
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from efficientnet_pytorch import EfficientNet
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection import MaskRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from custom_model.faster_rcnn import fasterrcnn_resnet50_fpn
from custom_model.mask_rcnn import maskrcnn_resnet50_fpn
def get_model_instance_segmentation_custom0(num_classes):
model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
print("fasterrcnn_resnet50_fpn custom call - 41,755,286 (resnet50) / 28,730,006 (resnet18) / 28,730,006 resnet / 22,463,126 / 오잉..light resnet : 22,468,758/ 19,333,398 / custom resent (64 쭉..) 17,664,662")
return model
def get_model_instance_segmentation0(num_classes):
model = fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
print("fasterrcnn_resnet50_fpn custom call - 41,755,286 / ")
return model
def get_model_instance_segmentation(num_classes):
# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다
#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
#backbone = torchvision.models.mobilenet_v2(pretrained=False).features
#backbone.out_channels = 1280
#anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
# aspect_ratios=((0.5, 1.0, 2.0),))
#roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
# output_size=1,
# sampling_ratio=2)
#model = FasterRCNN(backbone,
# num_classes=num_classes,
# rpn_anchor_generator=anchor_generator,
# box_roi_pool=roi_pooler)
print("fasterrcnn_resnet50_fpn call - 41,401,661 / 41,532,886")
# 분류를 위한 입력 특징 차원을 얻습니다
#in_features = model.roi_heads.box_predictor.cls_score.in_features
# 미리 학습된 헤더를 새로운 것으로 바꿉니다
#model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
#hidden_layer = 1
# and replace the mask predictor with a new one
#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
# hidden_layer,
# num_classes)
return model
def get_model_instance_segmentation_custom1(num_classes):
# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다
model = maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
#backbone = torchvision.models.mobilenet_v2(pretrained=False).features
#backbone.out_channels = 1280
#anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
# aspect_ratios=((0.5, 1.0, 2.0),))
#roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
# output_size=1,
# sampling_ratio=2)
#model = FasterRCNN(backbone,
# num_classes=num_classes,
# rpn_anchor_generator=anchor_generator,
# box_roi_pool=roi_pooler)
print("maskrcnn_resnet50_fpn custom call1 - resnet : 24,743,507 mobilenet : 87,366,291 squeezenet : 33,161,683 densnet : 43,702,739, resnet basicblock 3*3 -> 1*1 : 20,549,203 / basic : 20,543,571 / basicblock con1 : 20,195,411 / 채널 : 강제로 128 지정시 13,033,555 / 128 all 변경 : 9,465,555 ")
# 분류를 위한 입력 특징 차원을 얻습니다
in_features = model.roi_heads.box_predictor.cls_score.in_features
# 미리 학습된 헤더를 새로운 것으로 바꿉니다
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 128
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
return model
def get_model_instance_segmentation2(num_classes):
# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다
#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
backbone = torchvision.models.mobilenet_v2(pretrained=False).features
#backbone.out_channels = 1
backbone.out_channels = 1280
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
output_size=1,
sampling_ratio=2)
model = FasterRCNN(backbone,
num_classes=num_classes,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler)
print("mobilenet_v2 call2 - out_channels :1280, 19,540,921")
# 분류를 위한 입력 특징 차원을 얻습니다
#in_features = backbone
# 미리 학습된 헤더를 새로운 것으로 바꿉니다
#model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
#hidden_layer = 1
# and replace the mask predictor with a new one
#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
# hidden_layer,
# num_classes)
return model
def get_model_instance_segmentation4(num_classes):
# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다
#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
backbone = torchvision.models.squeezenet1_1(pretrained=False).features
#backbone.out_channels = 1
backbone.out_channels = 512
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
output_size=7,
sampling_ratio=2)
mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],
output_size=14,
sampling_ratio=2)
model = MaskRCNN(backbone,
num_classes=num_classes,
box_roi_pool =roi_pooler,
mask_roi_pool = mask_roi_pooler
)
#print("squeezenet1_0 call2 - out_channels :1280, 18,052,473 / 72M")
#print("squeezenet1_0 call2 - out_channels :516, 4,862,777 / 19.5M")
#print("squeezenet1_1 call2 - out_channels :516, 4,849,849 4,862,777 / 19.5M")
print("squeezenet1_1 call2 - out_channels :256, 2,757,369 / 11M (15,000,000 / 15,000,000)")
print("squeezenet1_1 call2 - out_channels :512, 4,808,441 / 19.2M (15,000,000)")
print("squeezenet1_1 call2 - out_channels :512, 33,192,463 33,161,683 / 172M (15,000,000)")
#
# 분류를 위한 입력 특징 차원을 얻습니다
#in_features = backbone
# 미리 학습된 헤더를 새로운 것으로 바꿉니다
#model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
#hidden_layer = 1
# and replace the mask predictor with a new one
#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
# hidden_layer,
# num_classes)
return model
def get_model_instance_segmentation5(num_classes):
# COCO 에서 미리 학습된 인스턴스 분할 모델을 읽어옵니다
#model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
#model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
backbone = torchvision.models.densenet161(pretrained=False).features
#backbone.out_channels = 1
backbone.out_channels = 256
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
output_size=1,
sampling_ratio=2)
model = FasterRCNN(backbone,
num_classes=num_classes,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler)
print("densenet161 call2 - out_channels :256, 28,506,873 / 150M")
# 분류를 위한 입력 특징 차원을 얻습니다
#in_features = backbone
# 미리 학습된 헤더를 새로운 것으로 바꿉니다
#model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
#hidden_layer = 1
# and replace the mask predictor with a new one
#model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
# hidden_layer,
# num_classes)
return model
def get_model_instance_segmentation6(num_classes):
backbone = torchvision.models.squeezenet1_1(pretrained=False).features
backbone.out_channels = 512
anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),))
roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
output_size=1,
sampling_ratio=2)
model = FasterRCNN(backbone,
num_classes=num_classes,
rpn_anchor_generator=anchor_generator,
box_roi_pool=roi_pooler)
print("get_model_instance_segmentation6 call6 - out_channels :512, 4,808,441 / (15,000,000) ")
return model
| 2.125 | 2 |
steamprofile/__init__.py | aaronlyy/steamprofile | 0 | 12794571 | <gh_stars>0
from .steamprofile import * | 1.203125 | 1 |
sygicmaps/client.py | Sygic/sygic-maps-services-python | 2 | 12794572 | <gh_stars>1-10
import json
import time
import requests
from sygicmaps.input import Input
SERVICES_URL = "https://{}-geocoding.api.sygic.com"
GEOCODE_URL_PATH = "/{}/api/geocode"
GEOCODE_BATCH_URL_PATH = "/{}/api/batch/geocode"
REVERSE_GEOCODE_URL_PATH = "/{}/api/reversegeocode"
REVERSE_GEOCODE_BATCH_URL_PATH = "/{}/api/batch/reversegeocode"
class Client(object):
def __init__(self, key=None, region='eu', custom_url=None, version='v0'):
if not key:
raise ValueError("API key is not set.")
self.custom_url = custom_url
self.services_url = SERVICES_URL.format(region)
self.version = version
self.session = requests.Session()
self.key = key
@staticmethod
def __to_inputs_data(input):
if type(input) is str:
return Input(input)
return input
@staticmethod
def __remove_nulls(d):
return {k: v for k, v in d.items() if v is not None}
def __get_services_url_geocode(self):
return self.services_url + GEOCODE_URL_PATH.format(self.version)
def __get_services_url_reverse_geocode_batch(self):
return self.services_url + REVERSE_GEOCODE_BATCH_URL_PATH.format(self.version)
def __get_services_url_geocode_batch(self):
return self.services_url + GEOCODE_BATCH_URL_PATH.format(self.version)
def __get_services_url_reverse_geocode(self):
return self.services_url + REVERSE_GEOCODE_URL_PATH.format(self.version)
@staticmethod
def __make_coords_dict_helper(line_of_coords):
lat, lon = line_of_coords.split(',')
return dict(lat=lat, lon=lon)
def geocode(self, location=None, country=None, city=None, suburb=None, street=None, house_number=None,
zip=None, admin_level_1=None):
params = {"key": self.key}
if location:
params["location"] = location
if country:
params["country"] = country
if city:
params["city"] = city
if suburb:
params["suburb"] = suburb
if street:
params["street"] = street
if house_number:
params["house_number"] = house_number
if zip:
params["zip"] = zip
if admin_level_1:
params["admin_level_1"] = admin_level_1
requests_method = self.session.get
url = self.__get_services_url_geocode()
response = requests_method(url, params=params)
body = response.json()
api_status = body["status"]
if api_status == "OK" or api_status == "NO_RESULTS":
return body.get("results", [])
def reverse_geocode(self, location=None):
params = {"key": self.key}
if location:
params["location"] = location
requests_method = self.session.get
url = self.__get_services_url_reverse_geocode()
response = requests_method(url, params=params)
body = response.json()
api_status = body["status"]
if api_status == "OK" or api_status == "NO_RESULTS":
return body.get("results", [])
def __geocode_batch_base(self, post_data, services_url):
url = services_url
params = {"key": self.key}
post_body = json.dumps(post_data)
r = requests.post(url, data=post_body, params=params, headers={'Content-type': 'application/json'})
results_url = r.headers.get('location')
r = requests.get(results_url)
while True:
retry_after = r.headers.get('retry-after')
if retry_after is not None:
time.sleep(int(retry_after))
r = requests.get(results_url)
continue
break
body = r.json()
api_status = body["status"]
if api_status == "OK" or api_status == "NO_RESULTS":
return body.get("results", [])
def reverse_geocode_batch(self, locations: list):
inputs = locations
if type(inputs) is str:
inputs = [inputs]
if len(inputs) == 0:
raise ValueError("Param locations has to contain some items.")
if len(inputs) >= 10000:
raise ValueError("Param locations has to be less than 10000.")
if ',' not in inputs[0]:
raise ValueError("No comma delimiter found, please verify that location input format is list of LAT,LON")
inputs = list(map(lambda line_of_coords: self.__make_coords_dict_helper(line_of_coords), inputs))
json_string = json.dumps(inputs)
post_data = list(json.loads(json_string))
services_url = self.__get_services_url_reverse_geocode_batch()
return self.__geocode_batch_base(post_data, services_url)
def geocode_batch(self, locations: list):
inputs = locations
if type(inputs) is str:
inputs = [inputs]
if len(inputs) == 0:
raise ValueError("Param locations has to contain some items.")
if len(inputs) >= 10000:
raise ValueError("Param locations has to be less than 10000.")
inputs_data = list(map(self.__to_inputs_data, inputs))
json_string = json.dumps(inputs_data, default=lambda x: x.__dict__)
post_data = list(json.loads(json_string))
post_data = list(map(self.__remove_nulls, post_data))
services_url = self.__get_services_url_geocode_batch()
return self.__geocode_batch_base(post_data, services_url)
| 2.59375 | 3 |
tests/integration/conftest.py | eruvanos/warehouse14 | 2 | 12794573 | from uuid import uuid4
import boto3
import pytest
from moto import mock_dynamodb2, mock_s3
from tests.local_login import MockAuthenticator
from warehouse14 import DBBackend, PackageStorage
from warehouse14.repos_dynamo import DynamoDBBackend
from warehouse14.storage import S3Storage
@pytest.fixture
def bucket():
"""Pytest fixture that creates the bucket in
the fake moto AWS account
"""
with mock_s3():
s3 = boto3.resource("s3", region_name="us-east-1")
bucket = s3.Bucket(str(uuid4()))
bucket.create()
yield bucket
@pytest.fixture
def table():
"""Pytest fixture that creates the table in
the fake moto AWS account
"""
with mock_dynamodb2():
dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
yield dynamodb.create_table(
TableName=str(uuid4()),
AttributeDefinitions=[
{"AttributeName": "pk", "AttributeType": "S"},
{"AttributeName": "sk", "AttributeType": "S"},
],
KeySchema=[
{"AttributeName": "pk", "KeyType": "HASH"},
{"AttributeName": "sk", "KeyType": "RANGE"},
],
BillingMode="PAY_PER_REQUEST",
GlobalSecondaryIndexes=[
{
"IndexName": "sk_gsi",
"KeySchema": [
{"AttributeName": "sk", "KeyType": "HASH"},
{"AttributeName": "pk", "KeyType": "RANGE"},
],
"Projection": {
"ProjectionType": "ALL",
},
}
],
)
@pytest.fixture
def authenticator():
return MockAuthenticator()
@pytest.fixture
def db(table) -> DBBackend:
return DynamoDBBackend(table)
@pytest.fixture
def storage(bucket) -> PackageStorage:
return S3Storage(bucket)
@pytest.fixture
async def page():
from pyppeteer import launch
browser = await launch({"headless": True})
yield (await browser.pages())[0]
await browser.close()
| 2.109375 | 2 |
admin_enhancer/tests/admin.py | JanOosting/django-admin-enhancer | 1 | 12794574 | from django.contrib import admin
from .. import admin as enhanced_admin
from .models import Author, Book, Character, Theme
class EnhancedModelAdmin(enhanced_admin.EnhancedModelAdminMixin,
admin.ModelAdmin):
pass
class CharacterInline(enhanced_admin.EnhancedAdminMixin,
admin.TabularInline):
model = Character
class BookAdmin(EnhancedModelAdmin):
inlines = (CharacterInline,)
filter_horizontal = ('themes',)
admin.site.register(Author, EnhancedModelAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Theme, EnhancedModelAdmin)
| 1.78125 | 2 |
eulerproject.py | xiangyuwu/euler | 0 | 12794575 | <filename>eulerproject.py<gh_stars>0
import math
class PrimeNumbers(object):
def __init__(self):
pass
def determinePrime(self, testnumber):
primestate = True
list1 = range(3, int(math.sqrt(testnumber)) + 1)
divisor_list = [element for element in list1 if element % 2 != 0]
divisor_list.insert(0, 2)
for divisor in divisor_list:
modulus = testnumber % divisor
if modulus == 0:
primestate = False
divisor = testnumber
return primestate
def primeList(self, index_limit):
listofprimes = [2]
test_integer = 3
index_j = 0
while index_j < index_limit:
state = self.determinePrime(test_integer)
if state is True:
listofprimes.append(test_integer)
else:
pass
test_integer += 1
index_j = len(listofprimes)
return listofprimes
| 3.59375 | 4 |
zardoz/zroll.py | kpow-jp/zardoz | 1 | 12794576 | import typing
import discord
from discord.ext import commands
from .database import fetch_guild_db
from .logging import LoggingMixin
from .rolls import (RollHandler, QuietRollHandler, SekretRollHandler,
RollList, DiceDelta)
from .utils import handle_http_exception
class RollCommands(commands.Cog, LoggingMixin):
def __init__(self, bot, db):
self.bot = bot
self.db = db
super().__init__()
@commands.command(name='z', help='Evaluate a dice roll.')
@fetch_guild_db
@handle_http_exception
async def zardoz_roll(self, ctx, *, args):
try:
roll = RollHandler(ctx, self.log, ctx.variables, args,
game_mode=ctx.game_mode)
except ValueError as e:
self.log.error(f'Roll handling failed: {e}')
await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}')
else:
await roll.add_to_db(ctx.guild_db)
await ctx.message.reply(roll.msg())
@commands.command(name='zq', help='Evaluate a dice roll, quietly.')
@fetch_guild_db
@handle_http_exception
async def zardoz_quiet_roll(self, ctx, *, args):
try:
roll = QuietRollHandler(ctx, self.log, ctx.variables, args,
game_mode=ctx.game_mode)
except ValueError as e:
self.log.error(f'Roll handling failed: {e}')
await ctx.message.reply(f'You fucked up your roll, {ctx.author}. {e}')
else:
await roll.add_to_db(ctx.guild_db)
await ctx.message.reply(roll.msg())
@commands.command(name='zs', help='Make a secret roll and DM to member.')
@fetch_guild_db
@handle_http_exception
async def zardoz_secret_roll(self, ctx, member: typing.Optional[discord.Member], *, args):
if member is None:
member = ctx.author
try:
roll = SekretRollHandler(ctx, self.log, ctx.variables, args,
game_mode=ctx.game_mode, require_tag=True)
except ValueError as e:
self.log.error(f'Roll handling failed: {e}')
await ctx.author.send(f'You fucked up your roll, {ctx.author}. {e}')
else:
await roll.add_to_db(ctx.guild_db)
await member.send(roll.msg())
@commands.command(name='zr', help='Reroll previous roll')
@fetch_guild_db
@handle_http_exception
async def zroll_reroll(self, ctx, member: typing.Optional[discord.Member]):
if member is None:
member = ctx.author
saved = await ctx.guild_db.get_last_user_roll(member.id)
if saved is None:
await ctx.message.reply(f'Ope, no roll history for {member}.')
else:
cmd = saved['roll']
roll = RollHandler(ctx, self.log, ctx.variables, cmd,
game_mode=ctx.game_mode)
await roll.add_to_db(ctx.guild_db)
await ctx.message.reply(f'Reroll {roll.msg()}')
| 2.359375 | 2 |
setup.py | rth/py-speedyfx | 4 | 12794577 | <reponame>rth/py-speedyfx
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import re
from setuptools import setup, find_packages, Extension
from Cython.Distutils import build_ext
#from distutils.core import setup
#from distutils.extension import Extension
import numpy as np
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
# a define the version sting inside the package
# see https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
VERSIONFILE="speedyfx/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
version = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
if sys.platform != 'win32':
compile_args = dict( extra_compile_args=['-O2', '-march=native', '-mtune=native'],
extra_link_args=['-O2', '-march=native', '-mtune=native'])
else:
compile_args = {}
libraries = []
if os.name == 'posix':
libraries.append('m')
include_dirs= [ np.get_include() ]
ext_modules=[
Extension("speedyfx._hashing",
["speedyfx/_hashing.pyx"],
libraries=libraries,
**compile_args),
]
setup(name='speedyfx',
version=version,
description='',
author='',
cmdclass= {'build_ext': build_ext},
ext_modules= ext_modules,
include_dirs=include_dirs,
packages=find_packages(),
include_package_data=True,
)
# conda create -n speedyfx-nskl-env setuptools six cython scipy numpy pytest python=3.5
# conda env remove -n speedyfx-nskl-env
| 1.835938 | 2 |
pybarreleye/barrele_agent.py | LiXi-storage/barreleye | 10 | 12794578 | <reponame>LiXi-storage/barreleye
"""
Library for Barreleye agent.
Barreleye is a performance monitoring system for Lustre.
"""
import json
from http import HTTPStatus
from pycoral import utils
from pycoral import lustre_version
from pycoral import ssh_host
from pybarreleye import barrele_collectd
class BarreleAgent():
"""
Each agent has an object of this type
"""
# pylint: disable=too-few-public-methods,too-many-instance-attributes
def __init__(self, host, barreleye_server,
enable_disk=False, enable_lustre_oss=True,
enable_lustre_mds=True, enable_lustre_client=False,
enable_infiniband=False):
# Barreleye server with thye of BarreleServer
self.bea_barreleye_server = barreleye_server
# Host to run commands.
self.bea_host = host
# Whether to collect disk metrics from this agent.
self.bea_enable_disk = enable_disk
# Whether to collect Lustre OSS metrics from this agent.
self.bea_enable_lustre_oss = enable_lustre_oss
# Whether to collect Lustre MDS metrics from this agent.
self.bea_enable_lustre_mds = enable_lustre_mds
# Whether to collect Lustre client metrics from this agent.
self.bea_enable_lustre_client = enable_lustre_client
# Whether to collect Infiniband metrics from this agent.
self.bea_enable_infiniband = enable_infiniband
# Lustre version on this host.
self.bea_lustre_version = None
# Collectd RPMs needed to be installed in this agent.
self.bea_needed_collectd_rpm_types = \
[barrele_collectd.LIBCOLLECTDCLIENT_TYPE_NAME,
barrele_collectd.COLLECTD_TYPE_NAME]
# The last timestamp when a measurement has been found to be updated.
self.bea_influxdb_update_time = None
# Collectd config for test. Type: CollectdConfig
self.bea_collectd_config_for_test = None
# Collectd config for production. Type: CollectdConfig
self.bea_collectd_config_for_production = None
def _bea_check_connection_with_server(self, log):
# The client might has problem to access Barreyele server, find the
# problem as early as possible.
barreleye_server = self.bea_barreleye_server
command = ("ping -c 1 %s" % barreleye_server.bes_server_host.sh_hostname)
retval = self.bea_host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
self.bea_host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
return 0
def _bea_sanity_check(self, log):
"""
Sanity check of the host before installation
"""
ret = self._bea_check_connection_with_server(log)
if ret:
log.cl_error("failed to check the connection of Barreleye agent "
"[%s] with server",
self.bea_host.sh_hostname)
return -1
distro = self.bea_host.sh_distro(log)
if distro not in [ssh_host.DISTRO_RHEL7, ssh_host.DISTRO_RHEL8]:
log.cl_error("host [%s] has unsupported distro [%s]",
self.bea_host.sh_hostname, distro)
return -1
cpu_target = self.bea_host.sh_target_cpu(log)
if cpu_target is None:
log.cl_error("failed to get target cpu on host [%s]",
self.bea_host.sh_hostname)
return -1
if cpu_target != "x86_64":
log.cl_error("host [%s] has unsupported CPU type [%s]",
self.bea_host.sh_hostname, cpu_target)
return -1
command = ("hostname")
retval = self.bea_host.sh_run(log, command)
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
self.bea_host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
# If the hostname is inconsistent with the configured hostname,
# fqdn tag of the data points will be unexpected.
hostname = retval.cr_stdout.strip()
if hostname != self.bea_host.sh_hostname:
log.cl_error("inconsistent hostname [%s] of Barreleye agent "
"host [%s]", hostname, self.bea_host.sh_hostname)
return -1
return 0
def _bea_check_lustre_version(self, log, lustre_fallback_version):
"""
Check the Lustre version according to the installed RPMs
"""
# pylint: disable=too-many-return-statements,too-many-branches
# Old Lustre kernel RPM might not be uninstalled ye, so ignore
# kernel RPMs.
command = ("rpm -qa | grep lustre | grep -v kernel")
retval = self.bea_host.sh_run(log, command)
if (retval.cr_exit_status == 1 and retval.cr_stdout == "" and
retval.cr_stderr == ""):
log.cl_info("Lustre RPM is not installed on host [%s], "
"using default [%s]",
self.bea_host.sh_hostname,
lustre_fallback_version.lv_name)
self.bea_lustre_version = lustre_fallback_version
return 0
if retval.cr_exit_status:
log.cl_error("failed to run command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
self.bea_host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
rpm_names = retval.cr_stdout.split()
rpm_fnames = []
for rpm_name in rpm_names:
rpm_fnames.append(rpm_name + ".rpm")
version, _ = lustre_version.match_lustre_version_from_rpms(log,
rpm_fnames,
skip_kernel=True,
skip_test=True)
if version is None:
log.cl_warning("failed to match Lustre version according to RPM "
"names on host [%s], using default [%s]",
self.bea_host.sh_hostname,
lustre_fallback_version.lv_name)
self.bea_lustre_version = lustre_fallback_version
else:
log.cl_info("detected Lustre version [%s] on host [%s]",
version.lv_name,
self.bea_host.sh_hostname)
self.bea_lustre_version = version
return 0
def _bea_generate_collectd_config(self, log, barreleye_instance,
collectd_test=False):
"""
Generate Collectd config
"""
if collectd_test:
interval = barrele_collectd.COLLECTD_INTERVAL_TEST
else:
interval = barreleye_instance.bei_collect_interval
collectd_config = \
barrele_collectd.CollectdConfig(self, interval,
barreleye_instance.bei_jobstat_pattern)
if (self.bea_enable_lustre_oss or self.bea_enable_lustre_mds or
self.bea_enable_lustre_client):
ret = collectd_config.cdc_plugin_lustre(log,
self.bea_lustre_version,
enable_lustre_oss=self.bea_enable_lustre_oss,
enable_lustre_mds=self.bea_enable_lustre_mds,
enable_lustre_client=self.bea_enable_lustre_client,
enable_lustre_exp_ost=barreleye_instance.bei_enable_lustre_exp_ost,
enable_lustre_exp_mdt=barreleye_instance.bei_enable_lustre_exp_mdt)
if ret:
log.cl_error("failed to config Lustre plugin of Collectd")
return None
if self.bea_enable_infiniband:
collectd_config.cdc_plugin_infiniband()
return collectd_config
def bea_generate_configs(self, log, barreleye_instance):
"""
Steps before configuring Barreleye agent
"""
ret = self._bea_sanity_check(log)
if ret:
log.cl_error("Barreleye agent host [%s] is insane",
self.bea_host.sh_hostname)
return -1
ret = self._bea_check_lustre_version(log,
barreleye_instance.bei_lustre_fallback_version)
if ret:
log.cl_error("failed to check the Lustre version on Barreleye "
"agent [%s]",
self.bea_host.sh_hostname)
return -1
collectd_config = self._bea_generate_collectd_config(log, barreleye_instance,
collectd_test=True)
if collectd_config is None:
log.cl_error("failed to generate Collectd config for test")
return -1
self.bea_collectd_config_for_test = collectd_config
collectd_config = self._bea_generate_collectd_config(log, barreleye_instance,
collectd_test=False)
if collectd_config is None:
log.cl_error("failed to generate Collectd config for production "
"usage")
return -1
self.bea_collectd_config_for_production = collectd_config
# Check that needed collectd RPMs are installed
for rpm_type in self.bea_needed_collectd_rpm_types:
if rpm_type not in barreleye_instance.bei_collectd_rpm_type_dict:
log.cl_error("needed Collectd RPM [%s] of agent [%s] does not "
"exist",
rpm_type, self.bea_host.sh_hostname)
return -1
return 0
def _bea_influxdb_measurement_check(self, log, measurement_name, tags):
# pylint: disable=bare-except,too-many-return-statements
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
"""
Check whether the datapoint is recieved by InfluxDB
"""
tag_string = ""
for key, value in tags.items():
if tag_string != "":
tag_string += " AND"
else:
tag_string = " WHERE"
tag_string += (" %s = '%s'" % (key, value))
query = ('SELECT * FROM "%s"%s ORDER BY time DESC LIMIT 1;' %
(measurement_name, tag_string))
influxdb_client = self.bea_barreleye_server.bes_influxdb_client
response = influxdb_client.bic_query(log, query, epoch="s")
if response is None:
log.cl_debug("failed to with query Influxdb with query [%s]",
query)
return -1
if response.status_code != HTTPStatus.OK:
log.cl_debug("got InfluxDB status [%d] with query [%s]",
response.status_code, query)
return -1
data = response.json()
json_string = json.dumps(data, indent=4, separators=(',', ': '))
log.cl_debug("data: [%s]", json_string)
if "results" not in data:
log.cl_debug("got wrong InfluxDB data [%s], no [results]",
json_string)
return -1
results = data["results"]
if len(results) != 1:
log.cl_debug("got wrong InfluxDB data [%s], [results] is not a "
"array with only one element", json_string)
return -1
result = results[0]
if "series" not in result:
log.cl_debug("got wrong InfluxDB data [%s], no [series] in one "
"of the result", json_string)
return -1
series = result["series"]
if len(series) != 1:
log.cl_debug("got wrong InfluxDB data [%s], [series] is not a "
"array with only one element", json_string)
return -1
serie = series[0]
if "columns" not in serie:
log.cl_debug("got wrong InfluxDB data [%s], no [columns] in one "
"of the series", json_string)
return -1
columns = serie["columns"]
if "values" not in serie:
log.cl_debug("got wrong InfluxDB data [%s], no [values] in one "
"of the series", json_string)
return -1
serie_values = serie["values"]
if len(serie_values) != 1:
log.cl_debug("got wrong InfluxDB data [%s], [values] is not a "
"array with only one element", json_string)
return -1
value = serie_values[0]
time_index = -1
i = 0
for column in columns:
if column == "time":
time_index = i
break
i += 1
if time_index == -1:
log.cl_debug("got wrong InfluxDB data [%s], no [time] in "
"the columns", json_string)
return -1
timestamp = int(value[time_index])
if self.bea_influxdb_update_time is None:
self.bea_influxdb_update_time = timestamp
elif timestamp > self.bea_influxdb_update_time:
return 0
log.cl_debug("timestamp [%d] is not updated with query [%s]",
timestamp, query)
return -1
def bea_influxdb_measurement_check(self, log, measurement_name, **tags):
"""
Check whether influxdb has datapoint
"""
if "fqdn" not in tags:
tags["fqdn"] = self.bea_host.sh_hostname
ret = utils.wait_condition(log, self._bea_influxdb_measurement_check,
(measurement_name, tags))
if ret:
log.cl_error("Influxdb gets no data point for measurement [%s] "
"from agent [%s]", measurement_name,
self.bea_host.sh_hostname)
return -1
return 0
def bea_collectd_send_config(self, log, barreleye_instance,
test_config=False):
"""
Dump and send the collectd.conf to the agent host
"""
host = self.bea_host
fpath = barreleye_instance.bei_workspace + "/"
if test_config:
fpath += barrele_collectd.COLLECTD_CONFIG_TEST_FNAME
collectd_config = self.bea_collectd_config_for_test
else:
fpath += barrele_collectd.COLLECTD_CONFIG_FINAL_FNAME
collectd_config = self.bea_collectd_config_for_production
fpath += "." + host.sh_hostname
collectd_config.cdc_dump(fpath)
etc_path = "/etc/collectd.conf"
ret = host.sh_send_file(log, fpath, etc_path)
if ret:
log.cl_error("failed to send file [%s] on local host [%s] to "
"directory [%s] on host [%s]",
fpath, etc_path,
barreleye_instance.bei_local_host.sh_hostname,
host.sh_hostname)
return -1
return 0
def bea_config_agent(self, log, barreleye_instance):
"""
Configure agent
"""
host = self.bea_host
log.cl_info("configuring Collectd on host [%s]",
host.sh_hostname)
ret = self.bea_collectd_send_config(log, barreleye_instance,
test_config=True)
if ret:
log.cl_error("failed to send test config to Barreleye agent "
"on host [%s]",
self.bea_host.sh_hostname)
return -1
service_name = "collectd"
ret = host.sh_service_restart(log, service_name)
if ret:
log.cl_error("failed to restart Collectd service on host [%s]",
host.sh_hostname)
return -1
log.cl_info("checking whether Influxdb can get data points from "
"agent [%s]", host.sh_hostname)
ret = self.bea_collectd_config_for_test.cdc_check(log)
if ret:
log.cl_error("Influxdb doesn't have expected data points from "
"agent [%s]",
host.sh_hostname)
return -1
ret = self.bea_collectd_send_config(log, barreleye_instance,
test_config=False)
if ret:
log.cl_error("failed to send final Collectd config to Barreleye "
"agent on host [%s]",
host.sh_hostname)
return -1
ret = host.sh_service_restart(log, service_name)
if ret:
log.cl_error("failed to restart Barreleye agent on host [%s]",
host.sh_hostname)
return -1
ret = host.sh_service_enable(log, service_name)
if ret:
log.cl_error("failed to enable service [%s] on host [%s]",
service_name, host.sh_hostname)
return -1
return 0
def bea_collectd_running(self, log):
"""
Check whether the Collectd is running.
Return 1 if running. Return -1 if failure.
"""
command = "systemctl is-active collectd"
retval = self.bea_host.sh_run(log, command)
if retval.cr_stdout == "active\n":
return 1
if retval.cr_stdout == "unknown\n":
return 0
if retval.cr_stdout == "inactive\n":
return 0
log.cl_error("unexpected stdout of command [%s] on host [%s], "
"ret = [%d], stdout = [%s], stderr = [%s]",
command,
self.bea_host.sh_hostname,
retval.cr_exit_status,
retval.cr_stdout,
retval.cr_stderr)
return -1
def bea_collectd_stop(self, log):
"""
Stop Collectd service.
"""
service_name = "collectd"
host = self.bea_host
ret = host.sh_service_stop(log, service_name)
if ret:
log.cl_error("failed to stop [%s] service on agent host [%s]",
service_name, host.sh_hostname)
return -1
return 0
def bea_collectd_start(self, log):
"""
Start Collectd service.
"""
service_name = "collectd"
host = self.bea_host
ret = host.sh_service_start(log, service_name)
if ret:
log.cl_error("failed to start [%s] service on agent host [%s]",
service_name, host.sh_hostname)
return -1
return 0
def bea_collectd_version(self, log):
"""
Return the Collectd version, e.g. 5.12.0.barreleye0-1.el7.x86_64
"""
host = self.bea_host
version = host.sh_rpm_version(log, "collectd-")
if version is None:
log.cl_error("failed to get the Collectd RPM version on host [%s]",
host.sh_hostname)
return version
| 2.3125 | 2 |
triptodos/bootstrap_etcd.py | mikalstill/productivity | 0 | 12794579 | #!/usr/bin/python2.7
# This is a simple script which copies old tripit auth details into etcd.
# If you've never run the scraper, you don't need to run this script.
# $1 is a job tag, which identifies the tripit user authentication details
# in etcd.
import json
import os
import sys
import etcd
etcd_path = '/todoist/%s' % sys.argv[1]
etcd_client = etcd.Client(host='192.168.50.1', port=2379)
# Copy across our auth details
with open(os.path.expanduser('~/.todoist')) as f:
etcd_client.write('%s/auth' % etcd_path, f.read())
# Dump the finished state
def dumpdir(path):
dir = etcd_client.get(path)
for result in dir.children:
if result.dir:
dumpdir(result.key)
else:
print('%s: %s' %(result.key, result.value))
dumpdir(etcd_path)
| 2.65625 | 3 |
DQN/AtariBreakoutEnv.py | viv92/wildML_RLimplementations | 1 | 12794580 | <reponame>viv92/wildML_RLimplementations
import gym
import numpy as np
from matplotlib import pyplot as plt
env = gym.envs.make("Breakout-v0")
print("Action space size: {}".format(env.action_space.n))
#print(env.get_action_meanings())
observation = env.reset()
print("Observation space shape: {}".format(observation.shape))
plt.figure()
plt.imshow(env.render(mode='rgb_array'))
for ep in range(100):
print "ep: ", ep
state = env.reset()
while True:
action = np.random.choice(np.arange(env.action_space.n))
_, _, done, _ = env.step(action)
#plt.figure()
plt.imshow(env.render(mode='rgb_array'))
if done:
break
env.render(close=True)
| 2.71875 | 3 |
cnn_model.py | bibofeng/CNN-TA-1 | 6 | 12794581 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras import Model
class CNN(Model):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = Conv2D(32, 3, padding='same', activation='relu')
self.conv2 = Conv2D(64, 3, padding='same', activation='relu')
self.pool1 = MaxPooling2D(pool_size=(2, 2), padding='same')
self.dropout1 = Dropout(0.25)
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.dropout2 = Dropout(0.5)
self.d2 = tf.keras.layers.Dense(3, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.pool1(x)
x = self.dropout1(x)
x = self.flatten(x)
x = self.d1(x)
x = self.dropout2(x)
x = self.d2(x)
return x
def model(self):
x = keras.Input(shape=(15, 15, 1))
return Model(inputs=[x], outputs=self.call(x))
| 3.265625 | 3 |
wbepi/parameter_estimation.py | Song921012/2021Waste_Water_Project | 0 | 12794582 | <reponame>Song921012/2021Waste_Water_Project
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from wbepi import basic_models as md
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from lmfit import Parameters, minimize, report_fit
import pandas as pd
# Method One: Nonlinear Least Square Method
## (option) test_data generation
para_test_model = md.SIR(beta=0.2, gamma=0.1, t0=0, dt=5, tend=150)
test_data = para_test_model.ode_sol()
plt.figure(1)
sns.set_theme(style="darkgrid")
plt.plot(test_data["tspan"], test_data["solution"][:, 1])
plt.show()
## parameter estimation by using lmfit
para_estimated = Parameters() #https://lmfit.github.io/lmfit-py/parameters.html
para_estimated.add('beta', value=0.01, min=0, max=1)
para_estimated.add('gamma', value=0.02, min=0, max=1)
"""
from lmfit import Parameters, minimize, report_fit
para_test=Parameters()
para_test.add_many(('amp', 10, True, None, None, None, None),
('cen', 4, True, 0.0, None, None, None),
('wid', 1, False, None, None, None, None),
('frac', 0.5))
A1={"te":1}.items()
A2=para_test.valuesdict().items()
print(type(A2)==type(A1))
para={key:value for key, value in A2}
print(para)
"""
# define error function
def error(para):
para_model = md.SIR(beta=para["beta"], gamma=para["gamma"], t0=0, dt=5, tend=150)
model_data = para_model.ode_sol()
mse = model_data["solution"][:, 1] - test_data["solution"][:, 1] # only data-data needed
return mse
out = minimize(error, para_estimated)
report_fit(out.params)
print(error(out.params))
# Show fitting results
result_model = md.SIR(beta=out.params["beta"], gamma=out.params["gamma"], t0=0, dt=1, tend=150)
result_data = result_model.ode_sol()
plt.figure(2)
sns.set_theme(style="darkgrid")
plt.plot(test_data["tspan"], test_data["solution"][:, 1], "o")
plt.plot(result_data["tspan"], result_data["solution"][:, 1])
plt.show()
| 2.78125 | 3 |
radiopadre/__init__.py | ratt-ru/radiopadre | 9 | 12794583 | <reponame>ratt-ru/radiopadre<filename>radiopadre/__init__.py<gh_stars>1-10
import json
import nbformat
import os
import pkg_resources
import radiopadre_kernel
from IPython.display import display, HTML, Javascript
from radiopadre_utils.notebook_utils import scrub_cell
from radiopadre import settings_manager
from radiopadre.render import render_error, show_exception, TransientMessage, render_status_message, render_table
# this stuff is setup by the kernel, pull from it
from radiopadre_kernel import SESSION_ID, VERBOSE, HOSTNAME, \
LOGFILE, ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, \
SHADOW_BASEDIR, SHADOW_ROOTDIR, SHADOW_URL_PREFIX, \
FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \
SESSION_DIR, SESSION_URL, NBCONVERT
# init settings
settings = settings_manager.RadiopadreSettingsManager()
try:
__version__ = pkg_resources.require("radiopadre")[0].version
except pkg_resources.DistributionNotFound:
__version__ = "development"
## various notebook-related init
try:
import astropy
astropy.log.setLevel('ERROR')
except ImportError:
radiopadre_kernel.log.warning("Failed to import astropy")
# NONE OF THE DIR NAMES ABOVE SHALL HAVE A TRALING SLASH!!!
def _strip_slash(path):
return path if path == "/" or path is None else path.rstrip("/")
def _is_subdir(subdir, parent):
return subdir == parent or subdir.startswith(parent+"/")
from radiopadre_kernel import _make_symlink
def display_status():
# setup status
data = [ ("cwd", os.getcwd()) ]
for varname in """SESSION_ID ROOTDIR ABSROOTDIR DISPLAY_ROOTDIR SHADOW_HOME
SERVER_BASEDIR SHADOW_BASEDIR SHADOW_ROOTDIR
SHADOW_URL_PREFIX FILE_URL_ROOT CACHE_URL_BASE CACHE_URL_ROOT
SESSION_DIR SESSION_URL""".split():
data.append((varname, globals()[varname]))
data += [("", "startup log follows:")]
data += radiopadre_kernel.log_handler.get_records()
from IPython.display import HTML
display(HTML(render_table(data, ["", ""], numbering=False)))
def display_log(debug=False):
from IPython.display import HTML
data = radiopadre_kernel.log_handler.get_records("DEBUG" if debug else "INFO")
display(HTML(render_table(data, ["", ""], numbering=False)))
show_status = display_status
show_log = display_log
def get_cache_dir(path, subdir=None):
"""
Creates directory for caching radiopadre stuff associated with the given file.
Returns tuple of (real_path, url_path). The former is the (shadow) filesystem location of the directory.
The latter is the URL to this directory.
"""
if ABSROOTDIR is None:
raise RuntimeError("radiopadre.init() must be called first")
basedir = _strip_slash(os.path.abspath(os.path.dirname(path)))
if _is_subdir(basedir, ABSROOTDIR):
# if in a subdirectory off the root, this becomes the relative path to it, else ""
reldir = basedir[len(ABSROOTDIR):]
elif _is_subdir(basedir, SHADOW_HOME+ABSROOTDIR):
reldir = basedir[len(SHADOW_HOME)+len(ABSROOTDIR):]
else:
raise RuntimeError("Trying to access {}, which is outside the {} hierarchy".format(basedir, ABSROOTDIR))
cacheurl = CACHE_URL_ROOT + reldir + "/.radiopadre"
shadowdir = SHADOW_HOME + basedir
cachedir = None
# if we can write to the basedir, make a .radiopadre dir within, and make a symlink to it in the shadow tree.
if os.access(basedir, os.W_OK):
cachedir = basedir + "/.radiopadre"
if not os.path.exists(cachedir):
os.mkdir(cachedir)
if os.access(cachedir, os.W_OK):
if not os.path.exists(shadowdir):
os.system("mkdir -p {}".format(shadowdir))
shadowdir += "/.radiopadre"
_make_symlink(cachedir, shadowdir)
cachedir = shadowdir
else:
cachedir = None
# if cachedir remains None, we weren't able to make a writeable one in the main tree -- use shadow tree
# if this fails, we're stuck, so may as well bomb out
if cachedir is None:
if not SHADOW_URL_PREFIX:
raise RuntimeError("Trying to view non-writeable directory, but access to the shadow tree is not set up. This is a bug.")
cachedir = shadowdir + "/.radiopadre"
if not os.path.exists(cachedir):
os.system("mkdir -p {}".format(cachedir))
if not os.access(cachedir, os.W_OK):
raise RuntimeError("Cache directory {} not user-writeable. Try removing it?".format(cachedir))
# make a cache subdir, if so required
if subdir:
cacheurl += "/" + subdir
cachedir += "/" + subdir
if not os.path.exists(cachedir):
os.mkdir(cachedir)
return cachedir, cacheurl
_init_js_side_done = None
def _display_reset():
display(Javascript("document.radiopadre.reset_display_settings();"))
def _init_js_side():
"""Checks that Javascript components of radiopadre are initialized, does various other init"""
global _init_js_side_done
if _init_js_side_done:
print("init_js_side already done")
return
_init_js_side_done = True
try:
get_ipython
except:
print("get_ipython not found")
return None
get_ipython().magic("matplotlib inline")
settings.display.reset = _display_reset, settings_manager.DocString("call this to reset sizes explicitly")
html = """<script type='text/javascript'>
document.radiopadre.register_user('{}');
document.radiopadre.reset_display_settings();
</script>
""".format(os.environ['USER'])
# reload styles -- loaded from radiopadre-kernel.js already, but reloading is useful for debugging
styles_file = os.path.join(os.path.dirname(__file__), "html/radiopadre.css")
html += f"""<style>
{open(styles_file).read()}
</style>"""
html += """<DIV onload=radiopadre.document.reset_display_settings></DIV>"""
from radiopadre import layouts
html += layouts.init_html
from radiopadre_kernel import js9
if not js9.JS9_ERROR:
html += js9.JS9_INIT_HTML_DYNAMIC
# get buttons from various modules
if not NBCONVERT:
from . import fitsfile
html += fitsfile.add_general_buttons()
# get list of warnings and errors from init
errors = radiopadre_kernel.log_handler.get_records('WARNING')
if errors:
html += render_table(errors, ["", ""], numbering=False)
display(HTML(html))
def hide_cell_code(hide=True):
display(Javascript(f"document.radiopadre.set_show_code({int(not hide)});"))
def set_window_sizes(cell_width, window_width, window_height):
if settings.display.auto_reset:
settings.display.cell_width, settings.display.window_width, settings.display.window_height = \
cell_width, window_width, window_height
# def protect(author=None):
# """Makes current notebook protected with the given author name. Protected notebooks won't be saved
# unless the user matches the author."""
# author = author or os.environ['USER']
# display(Javascript("document.radiopadre.protect('%s')" % author))
# display(HTML(render_status_message("""This notebook is now protected, author is "%s".
# All other users will have to treat this notebook as read-only.""" % author)))
#
#
# def unprotect():
# """Makes current notebook unprotected."""
# display(Javascript("document.radiopadre.unprotect()"))
# display(HTML(render_status_message("""This notebook is now unprotected.
# All users can treat it as read-write.""")))
#
def copy_current_notebook(oldpath, newpath, cell=0, copy_dirs='dirs', copy_root='root'):
# read notebook data
data = open(oldpath).read()
version = json.loads(data)['nbformat']
nbdata = nbformat.reads(data, version)
nbdata.keys()
# convert to current format
current_version = nbformat.current_nbformat
nbdata = nbformat.convert(nbdata, current_version)
current_format = getattr(nbformat, 'v' + str(current_version))
# accommodate worksheets, if available
if hasattr(nbdata, 'worksheets'):
raise (RuntimeError, "copy_current_notebook: not compatible with worksheets")
metadata = nbdata['metadata']
cells = nbdata['cells']
# strip out all cells up to and including indicated one
del cells[:cell + 1]
# scrub cell output
for c in cells:
scrub_cell(c)
# insert boilerplate code
code = "import radiopadre\n" + \
"%s = radiopadre.DirList('.')" % copy_dirs
if copy_root:
code += "\n%s = %s[0]" % (copy_root, copy_dirs)
code += "\n%s.show()" % copy_dirs
# insert output
output = current_format.new_output("display_data", data={
"text/html": ["<b style='color: red'>Please select Cell|Run all from the menu to render this notebook.</b>"]
})
cells.insert(0, current_format.new_code_cell(code, outputs=[output]))
# insert markdown
cells.insert(0, current_format.new_markdown_cell("""# %s\nThis
radiopadre notebook was automatically generated from ``%s``
using the 'copy notebook' feature. Please select "Cell|Run all"
from the menu to render this notebook.
""" % (newpath, oldpath),
))
# cleanup metadata
metadata['radiopadre_notebook_protect'] = 0
metadata['radiopadre_notebook_scrub'] = 0
if 'signature' in metadata:
metadata['signature'] = ""
# save
nbformat.write(nbdata, open(newpath, 'w'), version)
return newpath
__init = False
# print("importing radiopadre")
if not __init:
from radiopadre_kernel import casacore_tables
radiopadre_kernel.log.info("initializing radiopadre JS side")
# print("initializing radiopadre")
_init_js_side()
__init = True
# import stuff
from .file import autodetect_file_type
from .datadir import DataDir, ls, lsR, lst, lsrt
from .filelist import FileList
from .fitsfile import FITSFile
from .imagefile import ImageFile
from .casatable import CasaTable
from .htmlfile import HTMLFile, URL
from .table import tabulate
from .render import render_table, render_preamble, render_refresh_button, render_status_message, rich_string, render_url, render_title
| 2 | 2 |
code/141.Linked-List-Cycle-v2.py | Aden-Q/leetcode | 1 | 12794584 | from collections import Counter
from collections import deque
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
wordList = set(wordList)
if endWord not in wordList:
return 0
q = deque([beginWord])
step = 0
wordList.discard(beginWord)
while len(q) != 0:
sz = len(q)
step += 1
for _ in range(sz):
cur_node = q.popleft()
if cur_node == endWord:
return step
for i in range(len(cur_node)):
for c in 'abcdefghijklmnopqrstuvwxyz':
next_node = cur_node[:i] + c + cur_node[i+1:]
if next_node in wordList:
wordList.remove(next_node)
q.append(next_node)
return 0 | 3.453125 | 3 |
main.py | mrprompt/CriptosIOT | 2 | 12794585 | <filename>main.py
from screen import oled
from config import CRIPTOS
import criptos
import time
while True:
oled.fill(0)
oled.text("Consultando", 1, 1, 1)
oled.show()
for moeda in CRIPTOS:
cotacao = criptos.do_criptos(moeda)
oled.fill(0)
oled.text("Moeda: {}".format(cotacao['moeda']) , 1, 1, 1)
oled.text("Compra: {}".format(cotacao['compra']), 1, 10, 1)
oled.text("Venda: {}".format(cotacao['venda']), 1, 19, 1)
oled.show()
time.sleep(5)
| 2.609375 | 3 |
saticl/trainer/base.py | edornd/multimodal-icl | 6 | 12794586 | <reponame>edornd/multimodal-icl
from __future__ import annotations
import time
from collections import OrderedDict, defaultdict
from enum import Enum
from posix import listdir
from typing import TYPE_CHECKING, Any, Dict, Iterable
import numpy as np
import torch
from accelerate import Accelerator
from torch import nn
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from saticl.logging import BaseLogger
from saticl.logging.empty import EmptyLogger
from saticl.losses.regularization import MultiModalScaling
from saticl.metrics import Metric
from saticl.models.encoders import MultiEncoder
from saticl.tasks import Task
from saticl.utils.common import get_logger, progressbar
from saticl.utils.decorators import get_rank
if TYPE_CHECKING:
from saticl.trainer.callbacks import BaseCallback
LOG = get_logger(__name__)
class TrainerStage(str, Enum):
train = "train"
val = "val"
test = "test"
class Trainer:
def __init__(self,
accelerator: Accelerator,
task: Task,
new_model: nn.Module,
old_model: nn.Module,
optimizer: Optimizer,
scheduler: Any,
old_classes: Dict[int, str],
new_classes: Dict[int, str],
seg_criterion: nn.Module,
kdd_criterion: nn.Module,
kde_criterion: nn.Module = None,
kdd_lambda: float = 0.0,
kde_lambda: float = 0.0,
train_metrics: Dict[str, Metric] = None,
val_metrics: Dict[str, Metric] = None,
logger: BaseLogger = None,
samples: int = None,
stage: str = "train",
debug: bool = False) -> None:
assert task.step == 0 or old_model is not None or stage == "test", "ICL steps require the old model for KD"
self.accelerator = accelerator
self.debug = debug
self.model = new_model
self.old_model = old_model
self.criterion = seg_criterion
# knowledge distillation: KDD = KD on decoder, KDE = KD on encoder
self.criterion_kdd = kdd_criterion
self.criterion_kde = kde_criterion
self.kdd_lambda = kdd_lambda
self.kde_lambda = kde_lambda
self.multimodal = isinstance(new_model.encoder, MultiEncoder)
self.criterion_mmd = MultiModalScaling()
# optimizer, scheduler and logger, scaler for AMP
self.optimizer = optimizer
self.scheduler = scheduler
self.logger = logger or EmptyLogger()
# setup metrics, if any
self.metrics = dict()
if train_metrics is not None:
self.add_metrics(stage=TrainerStage.train, metrics=train_metrics)
if val_metrics is not None:
self.add_metrics(stage=TrainerStage.val, metrics=val_metrics)
# ICL information for loggers
self.task = task
self.old_classes = old_classes
self.new_classes = new_classes
self.all_classes = OrderedDict(list(old_classes.items()) + list(new_classes.items()))
# internal state
self.rank = get_rank()
self.is_main = self.rank == 0
self.current_epoch = -1
self.current_loss = None
self.global_step = -1
# internal monitoring
self.current_scores = {TrainerStage.train.value: dict(), TrainerStage.val.value: dict()}
self.best_epoch = None
self.best_score = None
self.best_state_dict = None
self.sample_batches = samples
self.sample_content = list()
self.callbacks: listdir[BaseCallback] = list()
def _prepare(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None) -> None:
self.model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer)
train_dataloader = self.accelerator.prepare(train_dataloader)
if val_dataloader is not None:
val_dataloader = self.accelerator.prepare(val_dataloader)
# we need to do this here, because of the prepare
# we swap an integer of num samples with a list of indices with same length
if self.sample_batches is not None and self.sample_batches > 0:
self.sample_batches = np.random.choice(len(val_dataloader), self.sample_batches, replace=False)
else:
self.sample_batches = np.array([])
return train_dataloader, val_dataloader
def _update_metrics(self,
y_true: torch.Tensor,
y_pred: torch.Tensor,
stage: TrainerStage = TrainerStage.train) -> None:
with torch.no_grad():
for metric in self.metrics[stage.value].values():
metric(y_true, y_pred)
def _compute_metrics(self, stage: TrainerStage = TrainerStage.train) -> None:
result = dict()
with torch.no_grad():
for name, metric in self.metrics[stage.value].items():
result[name] = metric.compute()
self.current_scores[stage.value] = result
def _reset_metrics(self, stage: TrainerStage = TrainerStage.train) -> None:
for metric in self.metrics[stage.value].values():
metric.reset()
def _log_metrics(self, stage: TrainerStage = TrainerStage.train, exclude: Iterable[str] = None) -> None:
log_strings = []
exclude = exclude or []
scores = self.current_scores[stage.value]
classwise = dict()
# first log scalars
for metric_name, score in scores.items():
if metric_name in exclude:
continue
if score.ndim > 0:
# store for later
classwise[metric_name] = score
continue
self.logger.log_scalar(f"{stage.value}/{metric_name}", score)
log_strings.append(f"{stage.value}/{metric_name}: {score:.4f}")
# log the full string once completed
LOG.info(", ".join(log_strings))
# then log class-wise results in a single table
if classwise:
LOG.debug("Classwise: %s", str(classwise))
header = list(self.all_classes.values())
self.logger.log_results(f"{stage.value}/results", headers=header, results=classwise)
def _debug_training(self, **kwargs: dict) -> None:
LOG.debug("[Epoch %2d] - iteration: %d", self.current_epoch, self.global_step)
for name, item in kwargs.items():
LOG.debug("%8s: %s", name, str(item))
def _store_samples(self, images: torch.Tensor, outputs: torch.Tensor, targets: torch.Tensor) -> None:
for i in range(images.size(0)):
image = images[i].detach().cpu()
true_mask = targets[i].detach().cpu()
pred_mask = outputs[i].detach().cpu()
self.sample_content.append((image, true_mask, pred_mask))
def add_callback(self, callback: BaseCallback) -> Trainer:
self.callbacks.append(callback)
return self
def setup_callbacks(self) -> None:
for callback in self.callbacks:
callback.setup(self)
def dispose_callbacks(self) -> None:
for callback in self.callbacks:
callback.dispose(self)
def add_metrics(self, stage: TrainerStage, metrics: Dict[str, Metric]) -> Trainer:
assert stage.value not in self.metrics, "stage already present in metrics"
self.metrics[stage.value] = metrics
def step(self) -> None:
self.global_step += 1
self.logger.step()
def train_epoch_start(self):
self._reset_metrics(stage=TrainerStage.train)
def train_batch(self, batch: Any) -> torch.Tensor:
# init losses and retrieve x, y
x, y = batch
# forward and loss on segmentation task
with self.accelerator.autocast():
new_out, _ = self.model(x)
seg_loss = self.criterion(new_out, y)
# this only has effect from step 1 onwards
kdd_loss = torch.tensor(0, device=seg_loss.device, dtype=seg_loss.dtype)
if self.task.step > 0:
old_out, _ = self.old_model(x)
kdd_loss = self.criterion_kdd(new_out, old_out)
# sum up losses
total = seg_loss + self.kdd_lambda * kdd_loss
# gather and update metrics
# we group only the 'standard' images, not the rotated ones
y_true = self.accelerator.gather(y)
y_pred = self.accelerator.gather(new_out)
self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.train)
# debug if active
if self.debug:
self._debug_training(x=x.dtype, y=y.dtype, pred=new_out.dtype, seg_loss=seg_loss, kdd_loss=kdd_loss)
return {"tot_loss": total, "seg_loss": seg_loss, "kdd_loss": kdd_loss}
def train_epoch(self, epoch: int, train_dataloader: DataLoader) -> Any:
timings = []
losses = defaultdict(list)
train_tqdm = progressbar(train_dataloader,
epoch=epoch,
stage=TrainerStage.train.value,
disable=not self.is_main)
self.model.train()
for batch in train_tqdm:
start = time.time()
self.optimizer.zero_grad()
data = self.train_batch(batch=batch)
loss = data["tot_loss"]
# backward pass
self.accelerator.backward(loss)
self.optimizer.step()
# measure elapsed time
elapsed = (time.time() - start)
# store training info
self.current_loss = loss.mean()
loss_val = loss.mean().item()
train_tqdm.set_postfix({"loss": f"{loss_val:.4f}"})
self.logger.log_scalar("train/loss_iter", loss_val)
self.logger.log_scalar("train/lr", self.optimizer.param_groups[0]["lr"])
self.logger.log_scalar("train/time_iter", elapsed)
# store results
for name, val in data.items():
losses[name].append(val.mean().item())
timings.append(elapsed)
# step the logger
self.step()
return losses, timings
def train_epoch_end(self, train_losses: dict, train_times: list):
with torch.no_grad():
self._compute_metrics(stage=TrainerStage.train)
for name, values in train_losses.items():
self.logger.log_scalar(f"train/{name}", np.mean(values))
self.logger.log_scalar("train/time", np.mean(train_times))
self._log_metrics(stage=TrainerStage.train)
def validation_epoch_start(self):
self.sample_content.clear()
self._reset_metrics(stage=TrainerStage.val)
def validation_batch(self, batch: Any, batch_index: int):
# init losses and retrieve x, y
x, y = batch
seg_loss, kdd_loss = torch.tensor(0.0), torch.tensor(0.0)
# forward and loss on main task, using AMP
with self.accelerator.autocast():
new_out, new_features = self.model(x)
seg_loss = self.criterion(new_out, y)
# forward and loss for KD
if self.task.step > 0:
old_out, old_features = self.old_model(x)
kdd_loss = self.criterion_kdd(new_out, old_out)
total = seg_loss + self.kdd_lambda * kdd_loss
y_true = self.accelerator.gather(y)
y_pred = self.accelerator.gather(new_out)
# store samples for visualization, if present. Requires a plot callback
# better to unpack now, so that we don't have to deal with the batch size later
# also, we take just the first one, a lil bit hardcoded i know
if self.sample_batches is not None and batch_index in self.sample_batches:
images = self.accelerator.gather(x)
self._store_samples(images[:1], y_pred[:1], y_true[:1])
# update metrics and return losses
self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.val)
return {"tot_loss": total, "seg_loss": seg_loss, "kdd_loss": kdd_loss}
def validation_epoch(self, epoch: int, val_dataloader: DataLoader) -> Any:
val_tqdm = progressbar(val_dataloader, epoch=epoch, stage=TrainerStage.val.value, disable=not self.is_main)
timings = []
losses = defaultdict(list)
with torch.no_grad():
self.model.eval()
for i, batch in enumerate(val_tqdm):
start = time.time()
data = self.validation_batch(batch=batch, batch_index=i)
loss = data["tot_loss"]
elapsed = (time.time() - start)
# gather info
loss_val = loss.mean().item()
val_tqdm.set_postfix({"loss": f"{loss_val:.4f}"})
# we do not log 'iter' versions for loss and timings, since we do not advance the logger step
# during validation (also, it's kind of useless)
# store results
for name, val in data.items():
losses[name].append(val.mean().item())
timings.append(elapsed)
return losses, timings
def validation_epoch_end(self, val_losses: list, val_times: list):
with torch.no_grad():
self._compute_metrics(stage=TrainerStage.val)
for name, values in val_losses.items():
self.logger.log_scalar(f"val/{name}", np.mean(values))
self.logger.log_scalar("val/time", np.mean(val_times))
self._log_metrics(stage=TrainerStage.val)
def fit(self, train_dataloader: DataLoader, val_dataloader: DataLoader = None, max_epochs: int = 100):
train_dataloader, val_dataloader = self._prepare(train_dataloader, val_dataloader)
self.best_state_dict = self.model.state_dict()
self.setup_callbacks()
self.global_step = 0
for curr_epoch in range(max_epochs):
self.current_epoch = curr_epoch
LOG.info(f"[Epoch {self.current_epoch:>2d}]")
try:
self.train_epoch_start()
t_losses, t_times = self.train_epoch(epoch=self.current_epoch, train_dataloader=train_dataloader)
# not the best place to call it, but it's best to call it every epoch instead of iteration
self.scheduler.step()
self.train_epoch_end(t_losses, t_times)
if val_dataloader is not None:
self.validation_epoch_start()
v_losses, v_times = self.validation_epoch(epoch=self.current_epoch, val_dataloader=val_dataloader)
self.validation_epoch_end(v_losses, v_times)
for callback in self.callbacks:
callback(self)
except KeyboardInterrupt:
LOG.info("[Epoch %2d] Interrupting training", curr_epoch)
break
self.dispose_callbacks()
return self
def test_batch(self, batch: Any, batch_index: int):
x, y = batch
x = x.to(self.accelerator.device)
y = y.to(self.accelerator.device)
# forward and loss on main task, using AMP
with self.accelerator.autocast():
preds, _ = self.model(x)
loss = self.criterion(preds, y)
# gather info
images = self.accelerator.gather(x)
y_true = self.accelerator.gather(y)
y_pred = self.accelerator.gather(preds)
# store samples for visualization, if present. Requires a plot callback
# better to unpack now, so that we don't have to deal with the batch size later
if self.sample_batches is not None and batch_index in self.sample_batches:
self._store_samples(images, y_pred, y_true)
# update metrics and return losses
self._update_metrics(y_true=y_true, y_pred=y_pred, stage=TrainerStage.test)
return loss, (images.cpu(), y_true.cpu(), torch.argmax(y_pred, dim=1).cpu())
def predict(self,
test_dataloader: DataLoader,
metrics: Dict[str, Metric],
logger_exclude: Iterable[str] = None,
return_preds: bool = False):
logger_exclude = logger_exclude or []
self.metrics[TrainerStage.test.value] = metrics
self._reset_metrics(stage=TrainerStage.test)
test_tqdm = progressbar(test_dataloader, stage=TrainerStage.test.value, disable=not self.is_main)
losses, timings, results = [], [], []
# prepare model and loader, pass as val loader to store num samples
_, test_dataloader = self._prepare(train_dataloader=None, val_dataloader=test_dataloader)
with torch.no_grad():
self.model.eval()
for i, batch in enumerate(test_tqdm):
start = time.time()
loss, data = self.test_batch(batch=batch, batch_index=i)
elapsed = (time.time() - start)
loss_value = loss.item()
test_tqdm.set_postfix({"loss": f"{loss_value:.4f}"})
# we do not log 'iter' versions, as for validation
losses.append(loss_value)
timings.append(elapsed)
if return_preds:
results.append(data)
self.logger.log_scalar("test/loss", np.mean(losses))
self.logger.log_scalar("test/time", np.mean(timings))
self._compute_metrics(stage=TrainerStage.test)
self._log_metrics(stage=TrainerStage.test, exclude=logger_exclude)
# iteration on callbacks for the test set (e.g. display images)
for callback in self.callbacks:
callback(self)
return losses, results
| 1.820313 | 2 |
python/woofpicbot/main.py | intothevoid/bikeshed | 0 | 12794587 | from telegram import update
from telegram.ext import Updater, CommandHandler
import requests
import re
import os
URL = "https://random.dog/woof.json"
def get_url():
contents = requests.get(URL).json()
url = contents["url"]
return url
def woof(update, context):
url = get_url()
chat_id = update.message.chat_id
context.bot.send_photo(chat_id=chat_id, photo=url)
def main():
token_id = os.environ["TELEGRAM_WOOF_TOKEN"] or "NA"
updater = Updater(token=token_id, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("woof", woof))
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| 2.578125 | 3 |
opt/program/src/features/build_features.py | fbomb111/full-stack-mlops | 0 | 12794588 | <reponame>fbomb111/full-stack-mlops
import pandas as pd
import numpy as np
import keras
import os
prefix = '/' if "IS_CONTAINER" in os.environ else './'
data_path = os.path.join(prefix, 'opt/ml/input/data')
train_path = os.path.join(data_path, 'processed')
def main():
print(os.getcwd())
train = pd.read_csv(os.path.join(data_path, 'external/train.csv'))
test = pd.read_csv(os.path.join(data_path, 'external/test.csv'))
# Convert df to values
train_values = train.values[:, 1:]
test_values = test.values
# Reshape and normalize training data
X_train = reshapeAndNormalizeXValues(train_values)
X_test = reshapeAndNormalizeXValues(test_values)
# one hot encoding
number_of_classes = 10
y_train = train.values[:,0]
y_train = keras.utils.to_categorical(y_train, number_of_classes)
np.save(os.path.join(train_path, 'X_train.npy'), X_train)
np.save(os.path.join(train_path, 'X_test.npy'), X_test)
np.save(os.path.join(train_path, 'y_train.npy'), y_train)
def reshapeAndNormalizeXValues(array):
array = array.reshape(array.shape[0], 28, 28, 1)
array = array.astype( 'float32' )
array = array / 255.0
return array
if __name__ == "__main__":
main() | 2.9375 | 3 |
test_utilities/src/d1_test/instance_generator/format_id.py | DataONEorg/d1_python | 15 | 12794589 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate random formatId."""
import random
import d1_common.const
import d1_test.test_files
class Generate(object):
def __init__(self):
self._format_id_list = None
def __call__(self):
if self._format_id_list is None:
format_id_set = {
o.formatId
for o in d1_test.test_files.load_xml_to_pyxb(
"objectFormatList_v2_0.xml"
).objectFormat
}
# Remove the formatIds for object types that are parsed by GMN
format_id_set.remove(d1_common.const.ORE_FORMAT_ID)
format_id_set -= set(
d1_test.test_files.load_json("scimeta_format_id_list.json")
)
self._format_id_list = sorted(format_id_set)
return random.choice(self._format_id_list)
generate = Generate()
| 1.96875 | 2 |
scripts/task_space_force_control.py | MatthiasDR96/industrial_robotics_simulator | 1 | 12794590 | import math
from src.Simulator import *
from src.arms import *
from src.controllers.task_space_force_controller import *
""" This script uses force control and simulates contact with a wall at x=1.1m."""
# Create robot
q_init = np.array([[math.pi / 2], [-math.pi / 2]])
robot = TwoDofArm()
robot.set_q_init(q_init)
# Create controller
controller = Control(robot)
# Create desired force in task space (3N in positive x-direction)
f_des = np.array([[3.0], [0.0], [0.0], [0.0], [0.0], [0.0]])
controller.set_force_target(f_des)
# Run animation
joint_of_interest = 1
sim = Simulator(robot, controller, joint_of_interest)
sim.simulate()
| 2.5 | 2 |
Homework/Homework III/src/SpectralClustering.py | SS47816/3D-PointCloud | 1 | 12794591 | <filename>Homework/Homework III/src/SpectralClustering.py<gh_stars>1-10
# 文件功能: 实现 Spectral Clustering 算法
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class SpectralClustering(object):
"""
SpectralClustering
Parameters
----------
n_clusters: int
Number of clusters
Attributes
----------
"""
# k是分组数;tolerance‘中心点误差’;max_iter是迭代次数
def __init__(self, n_clusters=2, **kwargs):
self.__K = n_clusters
self.__labels = None
def fit(self, data):
"""
Estimate the K centroids
Parameters
----------
data: numpy.ndarray
Training set as N-by-D numpy.ndarray
Returns
----------
None
"""
# TODO 01: implement SpectralClustering fit
from sklearn.neighbors import kneighbors_graph
from sklearn.metrics import pairwise_distances
from scipy.sparse import csgraph
from scipy.sparse import linalg
N, _ = data.shape
# create affinity matrix -- kNN for connectivity:
A = pairwise_distances(data)
# TODO: use better gamma estimation
gamma = np.var(A)/4
A = np.exp(-A**2/(2*gamma**2))
# get laplacian matrix:
L = csgraph.laplacian(A, normed=True)
# spectral decomposition:
eigval, eigvec = np.linalg.eig(L)
# get features:
idx_k_smallest = np.where(eigval < np.partition(eigval, self.__K)[self.__K])
features = np.hstack([eigvec[:, i] for i in idx_k_smallest])
# cluster using KMeans++
k_means = KMeans(init='k-means++', n_clusters=self.__K, tol=1e-6)
k_means.fit(features)
# get cluster ids:
self.__labels = k_means.labels_
def predict(self, data):
"""
Get cluster labels
Parameters
----------
data: numpy.ndarray
Testing set as N-by-D numpy.ndarray
Returns
----------
result: numpy.ndarray
data labels as (N, ) numpy.ndarray
"""
return np.copy(self.__labels)
def generate_dataset(N=300, noise=0.07, random_state=42, visualize=False):
"""
Generate dataset for spectral clustering
Parameters
----------
visualize: boolean
Whether to visualize the generated data
"""
from sklearn.datasets import make_moons
X, y = make_moons(N, noise=noise, random_state=random_state)
if visualize:
fig, ax = plt.subplots(figsize=(16,9))
ax.set_title('Test Dataset for Spectral Clustering', fontsize=18, fontweight='demi')
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='viridis')
plt.show()
return X
if __name__ == '__main__':
# create dataset:
K = 2
X = generate_dataset(visualize=False)
# spectral clustering estimation:
sc = SpectralClustering(n_clusters=K)
sc.fit(X)
category = sc.predict(X)
# visualize:
color = ['red','blue','green','cyan','magenta']
labels = [f'Cluster{k:02d}' for k in range(K)]
for k in range(K):
plt.scatter(X[category == k][:,0], X[category == k][:,1], c=color[k], label=labels[k])
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.title('Spectral Clustering Testcase')
plt.show()
| 3.640625 | 4 |
piccolo/utils/sync.py | telerytech/piccolo | 1 | 12794592 | <gh_stars>1-10
from __future__ import annotations
import asyncio
import typing as t
from concurrent.futures import ThreadPoolExecutor
def run_sync(coroutine: t.Coroutine):
"""
Run the coroutine synchronously - trying to accommodate as many edge cases
as possible.
1. When called within a coroutine.
2. When called from ``python -m asyncio``, or iPython with %autoawait
enabled, which means an event loop may already be running in the
current thread.
"""
try:
# We try this first, as in most situations this will work.
return asyncio.run(coroutine)
except RuntimeError:
# An event loop already exists.
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(asyncio.run, coroutine)
return future.result()
| 3.59375 | 4 |
Fun Excercise/rovarskparket.py | NirmalSilwal/Python- | 32 | 12794593 | <filename>Fun Excercise/rovarskparket.py
sen=input('enter text: ')
def translate(txt):
result=[]
vowel=['a','e','i','o','u']
for value in txt.lower():
if value in vowel:
result.append(value)
else:
result.append(value)
result.append('o')
result.append(value)
return result
print("".join(translate(sen)))
# print(translate(sen))
| 3.640625 | 4 |
day07/puzzle2.py | spgill/AdventOfCode2019 | 0 | 12794594 | <gh_stars>0
# stdlib imports
import enum
import itertools
# vendor imports
import click
class IntCodeMachineState(enum.Enum):
CLEAN = enum.auto()
HALTED = enum.auto()
WAITING_FOR_INPUT = enum.auto()
class IntCodeMachine:
def __init__(self, instructions):
# Machine starts in a clean state
self.state = IntCodeMachineState.CLEAN
# Convert the comma-delimited string of numbers into a list of ints
self.memory = list(
map(lambda op: int(op), instructions.strip().split(","))
)
# Input value is None
self.inputValue = None
# Empty list to capture output values
self.outputValues = []
# We will start reading the opcodes at position 0
self.position = 0
# Recursive function to split a decimal into digits
def getDigits(self, n):
if n < 10:
return [n]
else:
r = self.getDigits(n // 10)
return r + [n % 10]
def splitInstruction(self, n):
# Split the instruction into digits and reverse them
digits = list(reversed(self.getDigits(n)))
# Zero fill the digits array
for i in range(5 - len(digits)):
digits.append(0)
# Consolidate the ones and tens place into an opcode
opcode = digits[0] + 10 * digits[1]
# Return the opcode and param modes
return (opcode, digits[2], digits[3], digits[4])
def resolveValue(self, memory, param, paramMode):
# If in immediate mode, return the value directly
if paramMode == 1:
return param
# Else, treat it as a pointer
else:
return memory[param]
def execute(self):
# Make surethe machine isn't already halted
if self.state is IntCodeMachineState.HALTED:
raise RuntimeError("Machine is already halted")
# Loop infinitely until we reach the termination instruction
while True:
# Get the code at the current read position
instruction = self.memory[self.position]
# Split the opcode and params apart
opcode, paramModeA, paramModeB, paramModeC = self.splitInstruction(
instruction
)
# Code 99 means immediate termination
if opcode == 99:
self.state = IntCodeMachineState.HALTED
break
# Code 1 is addition
elif opcode == 1:
# Get memory values
paramA = self.resolveValue(
self.memory, self.memory[self.position + 1], paramModeA
)
paramB = self.resolveValue(
self.memory, self.memory[self.position + 2], paramModeB
)
sumPointer = self.memory[self.position + 3]
# print("ADD", paramA, paramB, "->", sumPointer)
# Perform the addition
self.memory[sumPointer] = paramA + paramB
# Advance the code position by 4
self.position += 4
# Code 2 is multiplication
elif opcode == 2:
# Get memory values
paramA = self.resolveValue(
self.memory, self.memory[self.position + 1], paramModeA
)
paramB = self.resolveValue(
self.memory, self.memory[self.position + 2], paramModeB
)
productPointer = self.memory[self.position + 3]
# Perform the addition
self.memory[productPointer] = paramA * paramB
# Advance the code position by 4
self.position += 4
# Code 3 is input
elif opcode == 3:
# If input is not available, stop execution
if self.inputValue is None:
self.state = IntCodeMachineState.WAITING_FOR_INPUT
break
# Store the value at the indicated pointer position
outPointer = self.memory[self.position + 1]
self.memory[outPointer] = self.inputValue
# Zero out the input value
self.inputValue = None
# Advance the code position
self.position += 2
# Code 4 is output
elif opcode == 4:
# Determine the value and print it
value = self.resolveValue(
self.memory, self.memory[self.position + 1], paramModeA
)
self.outputValues.append(value)
# Advance the code position
self.position += 2
# Code 5 and 6 are conditional jumps
elif opcode in [5, 6]:
# Get memory values
paramA = self.resolveValue(
self.memory, self.memory[self.position + 1], paramModeA
)
paramB = self.resolveValue(
self.memory, self.memory[self.position + 2], paramModeB
)
# If non-zero, set the position pointer
if (opcode == 5 and paramA != 0) or (
opcode == 6 and paramA == 0
):
self.position = paramB
# Else, do nothing and advance the position naturally
else:
self.position += 3
# Code 7 and 8 are comparison
elif opcode in [7, 8]:
# Get memory values
paramA = self.resolveValue(
self.memory, self.memory[self.position + 1], paramModeA
)
paramB = self.resolveValue(
self.memory, self.memory[self.position + 2], paramModeB
)
outputPointer = self.memory[self.position + 3]
# Determine the value based on the opcode
if opcode == 7:
flag = paramA < paramB
elif opcode == 8:
flag = paramA == paramB
# Write the value to memory
self.memory[outputPointer] = int(flag)
# Advance the code position by 4
self.position += 4
# Unknown opcode means there was an error
else:
raise RuntimeError(
f"Unknown opcode {opcode} ({instruction}) at position {self.position}"
)
@click.command()
@click.argument("input_file", type=click.File("r"))
def main(input_file):
"""Put your puzzle execution code here"""
# Load the amplifier software instructions
amplifierSoftware = input_file.read().strip()
# List to catch all output signals
outputSignals = []
# Iterate through all permutations of phase signals
for permutation in itertools.permutations(range(5, 10)):
amplifiers = []
# Use the phase value to create an intcode machine
for phase in permutation:
machine = IntCodeMachine(amplifierSoftware)
# First execution is for the phase setting
machine.inputValue = phase
machine.execute()
amplifiers.append(machine)
# Loop through each machine in order until execution halts on the last
previousValue = 0
while True:
for i, machine in enumerate(amplifiers):
machine.inputValue = previousValue
machine.execute()
previousValue = machine.outputValues.pop(0)
# When the last amp halts, that's the end
if i == 4 and machine.state is IntCodeMachineState.HALTED:
break
else:
continue
break
outputSignals.append(previousValue)
# Result is the highest output signal
print("RESULT:", max(outputSignals))
# Execute cli function on main
if __name__ == "__main__":
main()
| 3.25 | 3 |
test_tifffolder.py | tlambert03/tifffolder | 7 | 12794595 | <gh_stars>1-10
import os
from itertools import product
import numpy as np
import pytest
import tifffile
from tifffolder import LLSFolder
ADD_REL = 423345
@pytest.fixture
def lls_folder(tmp_path):
template = "cell1_ch{c}_stack{t:04}_{w}nm_{ab:07}msec_{rel:010}msecAbs.tif"
wave = [488, 560]
time = range(10)
i = 100
im = np.random.rand(16, 16, 16)
for w, t in product(wave, time):
fname = template.format(
c=wave.index(w), w=w, t=t, ab=t * i, rel=t * i + ADD_REL
)
tifffile.imsave(tmp_path / fname, im)
return tmp_path
def test_a(lls_folder):
names = LLSFolder(lls_folder).select_filenames(t=0)
assert [os.path.basename(i) for i in names] == [
f"cell1_ch0_stack0000_488nm_0000000msec_{ADD_REL:010}msecAbs.tif",
f"cell1_ch1_stack0000_560nm_0000000msec_{ADD_REL:010}msecAbs.tif",
]
def test_b(lls_folder):
tf = LLSFolder(lls_folder)
assert tf.asarray().shape == (10, 2, 16, 16, 16)
assert tf.asarray(t=0).shape == (2, 16, 16, 16)
assert tf.asarray(t=0, c=0).shape == (16, 16, 16)
assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16)
assert tf.asarray(t=0, c=0, z=range(0, 16, 2)).shape == (8, 16, 16)
assert tf[0].shape == (2, 16, 16, 16)
assert tf[0, 0].shape == (16, 16, 16)
assert tf[0, 0, :16:2].shape == (8, 16, 16)
np.testing.assert_allclose(tf.asarray(t=0), tf[0])
np.testing.assert_allclose(tf.asarray(t=0, c=0), tf[0, 0])
np.testing.assert_allclose(tf.asarray(t=0, c=0, z=range(0, 16, 2)), tf[0, 0, :16:2])
| 1.828125 | 2 |
rlutils/gym/envs/reset_obs/__init__.py | vermouth1992/rl-util | 0 | 12794596 | from .ant import AntEnv as AntResetObsEnv
from .half_cheetah import HalfCheetahEnv as HalfCheetahResetObsEnv
from .hopper import HopperEnv as HopperResetObsEnv
from .inverted_pendulum import InvertedPendulumEnv as InvertedPendulumResetObsEnv
from .pendulum import PendulumEnv as PendulumResetObsEnv
from .swimmer import SwimmerEnv as SwimmerResetObsEnv
from .walker2d import Walker2dEnv as Walker2dResetObsEnv
| 1.117188 | 1 |
Prognostic/bin/train.py | wangxiaodong1021/HCC_Prognostic | 13 | 12794597 | import argparse
import os
import sys
import torch
import torch.utils.data
from tensorboardX import SummaryWriter
import torch.backends.cudnn as cudnn
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from utils.Survival_Aanlysis import SurvivalAnalysis
from utils.RiskLayer import cox_cost
from Prognostic.data.image_producer import ImageDataset
from Prognostic.model import MODELS
from lifelines.utils import concordance_index
from utils.LaycaOptimizer import MinimalLaycaSGD, LaycaSGD
parser = argparse.ArgumentParser(description='Predicting survival time')
parser.add_argument('--data_path', '-d_p', default='./data/patch_prognostic', type=str,
help='data path')
parser.add_argument('--use_cuda', '-use_cuda', default='True', type=bool, help='use cuda')
parser.add_argument('--lr', '-lr', default='1e-4', type=float, help='learning rate')
parser.add_argument('--momentum', '-mom', default='0.9', type=float, help='SGD momentum')
parser.add_argument('--batch_size', '-b', default='5', type=int, help='batch size')
parser.add_argument('--num_worker', '-nw', default='2', type=int, help='num_worker')
parser.add_argument('--start', '-s', default='0', type=int, help='start epoch')
parser.add_argument('--end', '-e', default='10000', type=int, help='end epoch')
parser.add_argument('--experiment_id', '-eid', default='0', help='experiment id')
parser.add_argument('--experiment_name', '-name', default='prognostic_res_101_mixup', help='experiment name')
parser.add_argument('--ckpt_path_save', '-ckpt_s', default='./model/', help='checkpoint path to save')
parser.add_argument('--log_path', '-lp', default='./log/', help='log path to save')
parser.add_argument('--ckpt', '-ckpt', default='./', help='checkpoint path to load')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--way', '-way', default='10', type=str, help='train way, 40 10 or combinate')
parser.add_argument('--load_pth_train', '-lpth_t', default='./tensor_path', help='train tensor path to load')
parser.add_argument('--load_pth_valid', '-lpth_v', default='./tensor_path', help='valid tensor path to load')
parser.add_argument('--alpha', '-a', default='1.0', type=float, help='mixup alpha')
parser.add_argument('--device_ids', default='0,1,2,3,4', type=str, help='comma separated indices of GPU to use,'
' e.g. 0,1 for using GPU_0'
' and GPU_1, default 0.')
parser.add_argument('--drop_group', '-drop_group', default='3,4', help='drop groups')
parser.add_argument('--drop_prob', '-drop_prob', default='0.1', type=float, help='drop prob')
parser.add_argument('--freeze', '-f', action='store_true', help='Freeze convolutional layer parameters')
parser.add_argument('--type-key', '-type-key', default='tumor', type=str, help='tumor or tumor_beside or fibrous_tissue')
parser.add_argument('--experimentway', '-eway', default='prognosis', type=str, help='prognosis or replase')
parser.add_argument('--use_std', '-std', default='use', type=str, help='use std as feature, u:use, o:only, n:not use ')
parser.add_argument('--optimizer', '-o', default='a', type=str, help='choose optimizer:a(adam), s(sgd), '
'Adadelta(Adadelta), m(MinimalLaycaSGD) '
'or l(LaycaSGD)')
args = parser.parse_args()
cudnn.benchmark = True
log_path = os.path.join(args.log_path, args.experiment_name + "_" + str(args.experiment_id))
if not os.path.isdir(log_path):
os.mkdir(log_path)
ckpt_path_save = os.path.join(args.ckpt_path_save, args.experiment_name + "_" + str(args.experiment_id))
if not os.path.exists(ckpt_path_save):
os.mkdir(ckpt_path_save)
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_ids
device = torch.device("cuda" if args.use_cuda else "cpu")
num_GPU = len(args.device_ids.split(','))
batch_size_train = args.batch_size * num_GPU
batch_size_valid = args.batch_size * num_GPU
print("batch_size:",batch_size_train)
num_workers = args.num_worker * num_GPU
SA = SurvivalAnalysis()
def load_checkpoint(args, net):
print("Use ckpt: ", args.ckpt)
assert len(args.ckpt) != 0, "Please input a valid ckpt_path"
checkpoint = torch.load(args.ckpt)
pretrained_dict = checkpoint['state_dict']
net.load_state_dict(pretrained_dict)
return net
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""decrease the learning rate at 200 and 300 epoch"""
lr = args.lr
if epoch >= 20:
lr /= 10
if epoch >= 40:
lr /= 10
if epoch >= 80:
lr /= 10
'''warmup'''
if epoch < 5:
lr = lr * float(1 + step + epoch * len_epoch) / (5. * len_epoch)
print('epoch = {}, step = {}, lr = {}'.format(epoch, step, lr))
elif step == 0:
print('epoch = {}, lr={}'.format(epoch, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
drop_prob = [0.] * 4
if args.drop_group:
drop_probs = args.drop_prob
drop_group = [int(x) for x in args.drop_group.split(',')]
for block_group in drop_group:
if block_group < 1 or block_group > 4:
raise ValueError(
'drop_group should be a comma separated list of integers'
'between 1 and 4(drop_group:{}).'.format(args.drop_group)
)
drop_prob[block_group - 1] = drop_probs / 4.0 ** (4 - block_group)
if args.freeze:
net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob, require_grad=False).to(device)
for param in net.fc.parameters():
param.requires_grad = True
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=args.lr, weight_decay=1e-2)
else:
net = MODELS[('resnet50')](factor=args.way, drop_prob=drop_prob).to(device)
if args.optimizer == 'a':
print('use adam')
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=1e-4)
if args.optimizer == 's':
print('use SGD')
optimizer = torch.optim.SGD(net.parameters(), momentum=0.9, lr=args.lr, weight_decay=5e-4)
if args.optimizer == 'l':
print('use LaycaSGD')
optimizer = LaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
if args.optimizer == 'm':
print('use MinimalLaycaSGD')
optimizer = MinimalLaycaSGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
if args.optimizer == 'Adadelta':
print('use Adadelta')
optimizer = torch.optim.Adadelta(net.parameters(), lr=args.lr, rho=0.9, eps=1e-06, weight_decay=1e-4)
net = torch.nn.DataParallel(net, device_ids=None)
if args.resume:
net = load_checkpoint(args, net)
def train(epoch, dataloader, summary):
loss_sum = 0
acc_sum = 0
net.train()
pth = ""
length = len(dataloader)
Prediction = torch.Tensor().to(device)
Survival = torch.Tensor().to(device)
Observed = torch.Tensor().to(device)
for idx, (img, T, O, _, count) in enumerate(dataloader):
if O.sum() == 0:
continue
N = O.shape[0]
print('T:', T)
print('O:', O)
if args.optimizer != 'Adadelta':
lr = adjust_learning_rate(optimizer, epoch, idx, len(dataloader))
img = img.to(device)
output = net(img)
output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O)
print('ties:', ties)
T = T.to(device)
O = O.to(device)
loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties)
loss.register_hook(lambda g: print(g))
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 5)
optimizer.step()
Prediction = torch.cat((Prediction, output))
Survival = torch.cat((Survival, T.float()))
Observed = torch.cat((Observed, O.float()))
Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu())
CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(),
Observed.cpu().detach().numpy())
loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties)
print("loss:", loss.item(), "CI:", CI.item())
summary['loss'] = loss.item()
summary['CI'] = CI.item()
summary['lr'] = optimizer.param_groups[0]['lr']
return summary
def valid(dataloader, summary):
net.eval()
length = len(dataloader)
Prediction = torch.Tensor().to(device)
Survival = torch.Tensor().to(device)
Observed = torch.Tensor().to(device)
with torch.no_grad():
for idx, (img, T, O, _, count) in enumerate(dataloader):
N = O.shape[0]
print('T:', T)
print('O:', O)
img = img.to(device)
output = net(img)
output, T, O, at_risk, failures, ties, _ = SA.calc_at_risk(output, T, O)
T = T.to(device)
O = O.to(device)
loss = cox_cost(output, at_risk, O.reshape((N, 1)), failures, ties)
print("loss:", loss.item())
Prediction = torch.cat((Prediction, output))
Survival = torch.cat((Survival, T.float()))
Observed = torch.cat((Observed, O.float()))
Prediction, Survival, Observed, at_risk, failures, ties, _ = SA.calc_at_risk(Prediction, Survival.cpu(), Observed.cpu())
CI = concordance_index(Survival.cpu().detach().numpy(), -Prediction.cpu().detach().numpy(),
Observed.cpu().detach().numpy())
loss = cox_cost(Prediction, at_risk, Observed.reshape((Observed.shape[0],1)).to(device), failures, ties)
print("loss:", loss.item(), "CI:", CI.item())
summary['loss'] = loss.item()
summary['CI'] = CI.item()
return summary
d_pth = args.data_path
sp = ckpt_path_save + '/' + str(args.way)
if not os.path.exists(sp):
os.mkdir(sp)
print(d_pth)
train_data = ImageDataset(d_pth, factor=args.way, val=False, type_key=args.type_key,
ExperimentWay=args.experimentway)
valid_data = ImageDataset(d_pth, way="valid", factor=args.way, val=False, type_key=args.type_key,
ExperimentWay=args.experimentway)
print(len(train_data))
print(len(valid_data))
train_dataloader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size_train,
num_workers=num_workers,
drop_last=True,
shuffle=True)
valid_dataloader = torch.utils.data.DataLoader(valid_data,
batch_size=batch_size_valid,
num_workers=num_workers,
drop_last=False,
shuffle=False)
print("length:", len(train_dataloader))
summary_train = {'epoch': 0, 'fp': 0, 'tp': 0, 'Neg': 0, 'Pos': 0}
summary_valid = {'loss': float('inf'), 'acc': 0}
summary_writer = SummaryWriter(log_path)
loss_valid_best = float('inf')
for epoch in range(args.start, args.end):
summary_train = train(epoch, train_dataloader, summary_train)
summary_writer.add_scalar(
'train/loss', summary_train['loss'], epoch)
summary_writer.add_scalar(
'train/CI', summary_train['CI'], epoch)
if epoch % 1 == 0:
torch.save({'epoch': summary_train['epoch'],
'state_dict': net.state_dict()},
(sp + '/' + str(epoch) + '.ckpt'))
summary_valid = valid(valid_dataloader, summary_valid)
summary_writer.add_scalar(
'valid/loss', summary_valid['loss'], epoch)
summary_writer.add_scalar(
'valid/CI', summary_valid['CI'], epoch)
summary_writer.add_scalar(
'learning_rate', summary_train['lr'], epoch
)
print('train/loss', summary_train['loss'], epoch)
print('train/CI', summary_train['CI'], epoch)
print('valid/loss', float(summary_valid['loss']), epoch)
print('valid/CI', summary_valid['CI'], epoch)
if summary_valid['loss'] < loss_valid_best:
loss_vd_best = summary_valid['loss']
torch.save({'epoch': summary_train['epoch'],
'optimizer': optimizer.state_dict(),
'state_dict': net.state_dict()},
os.path.join(sp, 'best.ckpt'))
summary_writer.close() | 1.773438 | 2 |
lpjguesstools/__init__.py | lukasbaumbach/lpjguesstools | 2 | 12794598 | import logging
from logging.handlers import RotatingFileHandler
import numpy as np
import sys
try: # python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# TODO: create sublogger for different scripts
logPath = '.'
fileName = 'lpjguesstools'
class MultiLineFormatter(logging.Formatter):
""" A custom multi-line logging formatter """
def format(self, record):
str = logging.Formatter.format(self, record)
header, footer = str.split(record.message)
str = str.replace('\n', '\n' + ' ' * len(header))
return str
# optional colored console logger (nice!)
try:
import colorlog
class MultiLineFormatterColor(colorlog.ColoredFormatter):
def format(self, record):
record.__dict__.update(colorlog.escape_codes)
record.log_color = self.color(self.log_colors, record.levelname)
str = logging.Formatter.format(self, record)
header, footer = str.split(record.message)
str = str.replace('\n', '\n' + ' ' * len(header))
return str
CONS_FORMAT = "[%(log_color)s%(levelname)-8s%(reset)s] %(log_color)s%(message)s%(reset)s"
except ImportError:
# both formatters should use the default (non-color)
MultiLineFormatterColor = MultiLineFormatter
CONS_FORMAT = "[%(levelname)-8s] %(message)s"
FILE_FORMAT = "%(asctime)s [%(levelname)-8s] %(message)s (%(filename)s:%(lineno)s)"
lfCons = MultiLineFormatterColor(CONS_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
lfFile = MultiLineFormatter(FILE_FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
rootLogger = logging.getLogger(__name__)
rootLogger.setLevel(logging.DEBUG)
hCons = logging.StreamHandler()
hCons.setFormatter(lfCons)
hCons.setLevel(logging.DEBUG)
rootLogger.addHandler(hCons)
hFile = RotatingFileHandler("{0}/{1}.log".format(logPath, fileName), maxBytes=10000)
hFile.setFormatter(lfFile)
hFile.setLevel(logging.DEBUG)
rootLogger.addHandler(hFile)
EPILOG = """<NAME>, SENCKENBERG Biodiversity and Climate Research Centre (BiK-F)
email: <EMAIL>
2017/09/26"""
| 2.8125 | 3 |
tests/bundles/security/_app/config.py | achiang/flask-unchained | 0 | 12794599 | from flask_unchained import AppBundleConfig
class Config(AppBundleConfig):
SECRET_KEY = 'not-secret-key'
SECURITY_SEND_REGISTER_EMAIL = True
SECURITY_SEND_PASSWORD_CHANGED_EMAIL = True
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL = True
| 1.507813 | 2 |
utils/prepare_for_fasttext.py | pdufter/staticlama | 11 | 12794600 | import argparse
from transformers import BertTokenizer
from tqdm import tqdm
def main(args):
"""Tokenize a corpus and write one sentence per line in order to be able to train
fastText on it.
Args:
args (TYPE)
"""
tok = BertTokenizer.from_pretrained(args.vocab)
with open(args.corpus, "r") as fin, open(args.outfile, "w") as feng:
for line in tqdm(fin):
tokenized = tok.tokenize(line.strip())
feng.write(" ".join([args.prefix + x for x in tokenized]) + "\n")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", default=None, type=str, required=True, help="")
parser.add_argument("--vocab", default=None, type=str, required=True, help="")
parser.add_argument("--prefix", default=None, type=str, required=True, help="")
parser.add_argument("--outfile", default=None, type=str, required=True, help="")
args = parser.parse_args()
main(args)
| 3.125 | 3 |
notepad.py | jakubiszon26/simple-notepad | 1 | 12794601 | <gh_stars>1-10
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
builder = Gtk.Builder();
builder.add_from_file("UI.glade")
handlers = {
"onDestroy": Gtk.main_quit,
"onExitActivate": Gtk.main_quit
}
builder.connect_signals(handlers)
window = builder.get_object("MainWindow")
window.show_all()
Gtk.main() | 1.757813 | 2 |
source/config.sample.py | Stegoo/ogame-caller | 1 | 12794602 | <filename>source/config.sample.py<gh_stars>1-10
og_login = 'your_login'
og_password = '<PASSWORD>'
og_domain = 'fr'
og_universe = '10'
og_id_mother_planet = '1234567'
og_planets = {'340001', '340002', '340003'} #every planets except the target one
og_main_planet_coordonates = ['1', '300', '6']
og_metal_threshold = 1000000
og_large_cargo_to_send = '100'
mobile_number = '0607080910'
default_message = 'Hello Alex. Your empire is under attack. Log in as soon as possible!'
message_file = '/tmp/message'
call_file = '/tmp/ogame_attack_warning.call'
asterisk_outgoing = '/var/spool/asterisk/outgoing/'
| 1.875 | 2 |
_erwin/main.py | der2b2/erwin | 1 | 12794603 | from _erwin import build
from _erwin import serve
from _erwin import clean
from _erwin import initialize
def run(argv):
if argv[0] == "clean" or argv[0] == "c":
print("Cleaning output folder")
clean.run_clean()
elif argv[0] == "build" or argv[0] == "b":
print("Build")
build.main()
elif argv[0] == "serve" or argv[0] == "s":
print("Serve")
serve.run_server()
elif argv[0] == "init" or argv[0] == "i":
print("Initialize")
print("")
read = input("Initialize will override templates, sure you want to proceed? [Y|n] ")
if read == "Y":
initialize.run_init()
else:
print("Aborted")
else:
print("usage: python erwin.py build|serve|clean|init b|s|c|i")
| 2.609375 | 3 |
project_name/settings/apps/system.py | thnee/django-template | 1 | 12794604 | <reponame>thnee/django-template
from ..base import * # noqa
INSTALLED_APPS += [
'django.contrib.admin',
'apps.admin_site',
'apps.backoffice',
'apps.frontoffice',
]
ROOT_URLCONF = 'apps.system.urls'
| 1.234375 | 1 |
test/preprocess_tests/error_correct_intbcs_to_whitelist_test.py | YosefLab/SingleCellLineageTracing | 52 | 12794605 | import os
import unittest
import numpy as np
import pandas as pd
import cassiopeia
class TestErrorCorrectIntBCstoWhitelist(unittest.TestCase):
def setUp(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
test_files_path = os.path.join(dir_path, "test_files")
self.whitelist_fp = os.path.join(test_files_path, "intbc_whitelist.txt")
self.whitelist = ["ACTT", "TAAG"]
self.multi_case = pd.DataFrame.from_dict(
{
"cellBC": [
"A",
"A",
"A",
"B",
"B",
"C",
"C",
"C",
"C",
"D",
"D",
],
"UMI": [
"AACCT",
"AACCG",
"AACCC",
"AACCT",
"AACCG",
"AACCT",
"AACCG",
"AAGGA",
"AACCT",
"AACCT",
"AAGGG",
],
"readCount": [20, 30, 30, 40, 50, 10, 10, 15, 10, 10, 10],
"Seq": [
"AACCTTGG",
"AACCTTGG",
"AACCTTCC",
"AACCTTGG",
"AACCTTGC",
"AACCTTCC",
"AACCTTCG",
"AACCTCAG",
"AACCTTGG",
"AACCTTGG",
"AACCTAAA",
],
"intBC": [
"ACTT",
"AAGG",
"ACTA",
"AAGN",
"TACT",
"TAAG",
"TNNG",
"ANNN",
"GCTT",
"NNNN",
"AAAA",
],
"r1": ["1", "1", "1", "1", "1", "1", "1", "1", "1", "1", "1"],
"r2": ["2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2"],
"r3": ["3", "3", "3", "3", "3", "3", "3", "3", "3", "3", "3"],
"AlignmentScore": [
"20",
"20",
"20",
"20",
"20",
"20",
"20",
"20",
"20",
"20",
"20",
],
"CIGAR": [
"NA",
"NA",
"NA",
"NA",
"NA",
"NA",
"NA",
"NA",
"NA",
"NA",
"NA",
],
}
)
self.multi_case["readName"] = self.multi_case.apply(
lambda x: "_".join([x.cellBC, x.UMI, str(x.readCount)]), axis=1
)
self.multi_case["allele"] = self.multi_case.apply(
lambda x: "_".join([x.r1, x.r2, x.r3]), axis=1
)
self.corrections = {
"ACTT": "ACTT",
"TAAG": "TAAG",
"ACTA": "ACTT",
"TNNG": "TAAG",
"ANNN": "ACTT",
}
def test_correct(self):
df = cassiopeia.pp.error_correct_intbcs_to_whitelist(
self.multi_case, self.whitelist_fp, intbc_dist_thresh=1
)
expected_df = self.multi_case.copy()
expected_df["intBC"] = expected_df["intBC"].map(self.corrections)
expected_df.dropna(subset=["intBC"], inplace=True)
pd.testing.assert_frame_equal(df, expected_df)
def test_correct_whitelist_list(self):
df = cassiopeia.pp.error_correct_intbcs_to_whitelist(
self.multi_case, self.whitelist, intbc_dist_thresh=1
)
expected_df = self.multi_case.copy()
expected_df["intBC"] = expected_df["intBC"].map(self.corrections)
expected_df.dropna(subset=["intBC"], inplace=True)
pd.testing.assert_frame_equal(df, expected_df)
if __name__ == "__main__":
unittest.main()
| 2.515625 | 3 |
tests/test_views.py | yezyilomo/drf-pretty-put | 28 | 12794606 | from django.urls import reverse
from rest_framework.test import APITestCase
from tests.testapp.models import Book, Course, Student, Phone
class ViewTests(APITestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Advanced Data Structures", author="S.Mobit")
self.book2 = Book.objects.create(title="Basic Data Structures", author="S.Mobit")
self.course1 = Course.objects.create(
name="Data Structures", code="CS210"
)
self.course2 = Course.objects.create(
name="Programming", code="CS150"
)
self.course1.books.set([self.book1, self.book2])
self.course2.books.set([self.book1])
self.student = Student.objects.create(
name="Yezy", age=24, course=self.course1
)
self.phone1 = Phone.objects.create(number="076711110", type="Office", student=self.student)
self.phone2 = Phone.objects.create(number="073008880", type="Home", student=self.student)
def tearDown(self):
Book.objects.all().delete()
Course.objects.all().delete()
Student.objects.all().delete()
# **************** POST Tests ********************* #
def test_post_on_pk_nested_foreignkey_related_field(self):
url = reverse("rstudent-list")
data = {
"name": "yezy",
"age": 33,
"course": 2
}
response = self.client.post(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy',
'age': 33,
'course': {
'name': 'Programming',
'code': 'CS150',
'books': [
{"title": "Advanced Data Structures", "author": "S.Mobit"}
]
},
'phone_numbers': []
}
)
def test_post_on_writable_nested_foreignkey_related_field(self):
url = reverse("wstudent-list")
data = {
"name": "yezy",
"age": 33,
"course": {"name": "Programming", "code": "CS50"},
}
response = self.client.post(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy',
'age': 33,
'course': {
'name': 'Programming',
'code': 'CS50',
'books': []
},
'phone_numbers': []
}
)
def test_post_with_add_operation(self):
url = reverse("rcourse-list")
data = {
"name": "Data Structures",
"code": "CS310",
"books": {"add":[1,2]}
}
response = self.client.post(url, data, format="json")
self.assertEqual(
response.data,
{
"name": "Data Structures",
"code": "CS310",
"books": [
{'title': 'Advanced Data Structures', 'author': 'S.Mobit'},
{'title': 'Basic Data Structures', 'author': 'S.Mobit'}
]
}
)
def test_post_with_create_operation(self):
data = {
"name": "Data Structures",
"code": "CS310",
"books": {"create": [
{"title": "Linear Math", "author": "Me"},
{"title": "Algebra Three", "author": "Me"}
]}
}
url = reverse("wcourse-list")
response = self.client.post(url, data, format="json")
self.assertEqual(
response.data,
{
"name": "Data Structures",
"code": "CS310",
"books": [
{"title": "Linear Math", "author": "Me"},
{"title": "Algebra Three", "author": "Me"}
]
}
)
def test_post_on_deep_nested_fields(self):
url = reverse("wstudent-list")
data = {
"name": "yezy",
"age": 33,
"course": {
"name": "Programming",
"code": "CS50",
"books": {"create": [
{"title": "Python Tricks", "author": "<NAME>"}
]}
}
}
response = self.client.post(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy',
'age': 33,
'course': {
'name': 'Programming',
'code': 'CS50',
'books': [
{"title": "Python Tricks", "author": "<NAME>"}
]
},
'phone_numbers': []
}
)
def test_post_on_many_2_one_relation(self):
url = reverse("wstudent-list")
data = {
"name": "yezy",
"age": 33,
"course": {"name": "Programming", "code": "CS50"},
"phone_numbers": {
'create': [
{'number': '076750000', 'type': 'office'}
]
}
}
response = self.client.post(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy',
'age': 33,
'course': {
'name': 'Programming',
'code': 'CS50',
'books': []
},
'phone_numbers': [
{'number': '076750000', 'type': 'office', 'student': 2}
]
}
)
# **************** PUT Tests ********************* #
def test_put_on_pk_nested_foreignkey_related_field(self):
url = reverse("rstudent-detail", args=[self.student.id])
data = {
"name": "yezy",
"age": 33,
"course": 2
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy', 'age': 33,
'course': {
'name': 'Programming', 'code': 'CS150',
'books': [
{"title": "Advanced Data Structures", "author": "S.Mobit"}
]
},
'phone_numbers': [
{'number': '076711110', 'type': 'Office', 'student': 1},
{'number': '073008880', 'type': 'Home', 'student': 1}
]
}
)
def test_put_on_writable_nested_foreignkey_related_field(self):
url = reverse("wstudent-detail", args=[self.student.id])
data = {
"name": "yezy",
"age": 33,
"course": {"name": "Programming", "code": "CS50"}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy', 'age': 33,
'course': {
'name': 'Programming', 'code': 'CS50',
'books': [
{'title': 'Advanced Data Structures', 'author': 'S.Mobit'},
{'title': 'Basic Data Structures', 'author': 'S.Mobit'}
]
},
'phone_numbers': [
{'number': '076711110', 'type': 'Office', 'student': 1},
{'number': '073008880', 'type': 'Home', 'student': 1}
]
}
)
def test_put_with_add_operation(self):
url = reverse("rcourse-detail", args=[self.course2.id])
data = {
"name": "Data Structures",
"code": "CS410",
"books": {
"add": [2]
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
"name": "Data Structures",
"code": "CS410",
"books": [
{'title': 'Advanced Data Structures', 'author': 'S.Mobit'},
{'title': 'Basic Data Structures', 'author': 'S.Mobit'}
]
}
)
def test_put_with_remove_operation(self):
url = reverse("rcourse-detail", args=[self.course2.id])
data = {
"name": "Data Structures",
"code": "CS410",
"books": {
"remove": [1]
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
"name": "Data Structures",
"code": "CS410",
"books": []
}
)
def test_put_with_create_operation(self):
url = reverse("wcourse-detail", args=[self.course2.id])
data = {
"name": "Data Structures",
"code": "CS310",
"books": {
"create": [
{"title": "Primitive Data Types", "author": "S.Mobit"}
]
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
"name": "Data Structures",
"code": "CS310",
"books": [
{'title': 'Advanced Data Structures', 'author': 'S.Mobit'},
{"title": "Primitive Data Types", "author": "S.Mobit"}
]
}
)
def test_put_with_update_operation(self):
url = reverse("wcourse-detail", args=[self.course2.id])
data = {
"name": "Data Structures",
"code": "CS310",
"books": {
"update": {
1: {"title": "React Programming", "author": "M.Json"}
}
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
"name": "Data Structures",
"code": "CS310",
"books": [
{"title": "React Programming", "author": "M.Json"}
]
}
)
def test_put_on_deep_nested_fields(self):
url = reverse("wstudent-detail", args=[self.student.id])
data = {
"name": "yezy",
"age": 33,
"course": {
"name": "Programming",
"code": "CS50",
"books": {
"remove": [1]
}
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy', 'age': 33,
'course': {
'name': 'Programming', 'code': 'CS50',
'books': [
{'title': 'Basic Data Structures', 'author': 'S.Mobit'}
]
},
'phone_numbers': [
{'number': '076711110', 'type': 'Office', 'student': 1},
{'number': '073008880', 'type': 'Home', 'student': 1}
]
}
)
def test_put_on_many_2_one_relation(self):
url = reverse("wstudent-detail", args=[self.student.id])
data = {
"name": "yezy",
"age": 33,
"course": {"name": "Programming", "code": "CS50"},
"phone_numbers": {
'update': {
1: {'number': '073008811', 'type': 'office'}
},
'create': [
{'number': '076750000', 'type': 'office'}
]
}
}
response = self.client.put(url, data, format="json")
self.assertEqual(
response.data,
{
'name': 'yezy', 'age': 33,
'course': {
'name': 'Programming', 'code': 'CS50',
'books': [
{'title': 'Advanced Data Structures', 'author': 'S.Mobit'},
{'title': 'Basic Data Structures', 'author': 'S.Mobit'}
]
},
'phone_numbers': [
{'number': '073008811', 'type': 'office', 'student': 1},
{'number': '073008880', 'type': 'Home', 'student': 1},
{'number': '076750000', 'type': 'office', 'student': 1}
]
}
) | 2.4375 | 2 |
src/trader/analyser.py | edse/bl3ptrader | 1 | 12794607 | <filename>src/trader/analyser.py
from collections import namedtuple
from django.conf import settings
from .storage import Storage
from .base import * # noqa
TrendResult = namedtuple('Trend', ['trend', 'current'])
class Analyser(object):
@staticmethod
def checkTrend():
"""
Check the last 2 records from the last 30m grouped by 1m
Returns:
int(-10): when the trending is down and a sell action is required
int(-1): when the trending is down
int(0): when in no trend or no enough data
int(1): when the trending is up
int(10): when the trending is up and a sell action is required
"""
trend = 0
state = 'No trend'
influx_client = Storage.get_client()
q = """SELECT mean("diff") as diff
FROM "MA1_MA2_DIFF"
WHERE time > now() - 30m
GROUP BY time(1m) fill(previous)"""
rs = influx_client.query(q)
if len(list(rs.get_points(measurement='MA1_MA2_DIFF'))) < 2:
return 0 # no enough data
d1 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-2]
d2 = list(rs.get_points(measurement='MA1_MA2_DIFF'))[-1]
if 'diff' in d1 and 'diff' in d2:
d1 = d1['diff']
d2 = d2['diff']
if d2 > d1:
# up trend
if d1 <= 0 and d2 > 0:
trend = 10 # buy action
state = 'buy'
else:
trend = 1
state = 'up'
elif d2 < d1:
# shrinking
if d2 <= 0 and d1 > 0:
trend = -10 # sell action
state = 'sell'
else:
trend = -1
state = 'down'
Storage.store([{
'measurement': 'TREND',
'tags': {
'state': state,
},
'fields': {
'trend': trend
}
}])
return trend
@staticmethod
def analyse(data):
logger.setLevel(logging.INFO)
# logger.debug('Analysing...')
range = settings.BOT_DATA_SAMPLE_RANGE # 3h
group = settings.BOT_DATA_SAMPLE_GROUP # 1m
ma1 = settings.BOT_DATA_SAMPLE_MA1 # 10
ma2 = settings.BOT_DATA_SAMPLE_MA2 # 20
influx_client = Storage.get_client()
pair = data['measurement']
# tweet = None
# position = ''
current = {
'time': None,
'price': None,
'ma1': None,
'ma2': None,
}
#
# TODO: Replace 3 queries by 1
#
q = """SELECT mean("price") as price
FROM "BTC_EUR"
WHERE time > now() - {range}
GROUP BY time({group}) fill(previous)""".format(
range=range,
group=group
)
rs = influx_client.query(q)
r = list(rs.get_points(measurement=pair))[-1]
if 'price' in r:
current['price'] = r['price']
current['time'] = r['time']
q = """SELECT moving_average(mean("price"), {ma1}) as ma1
FROM "BTC_EUR"
WHERE time > now() - {range}
GROUP BY time({group}) fill(linear)""".format(
ma1=ma1,
range=range,
group=group
)
rs = influx_client.query(q)
r = list(rs.get_points(measurement=pair))[-1]
current['ma1'] = r['ma1']
if 'ma1' in r:
current['ma1'] = r['ma1']
q = """SELECT moving_average(mean("price"), {ma2}) as ma2
FROM "BTC_EUR"
WHERE time > now() - {range}
GROUP BY time({group}) fill(linear)""".format(
ma2=ma2,
range=range,
group=group
)
rs = influx_client.query(q)
r = list(rs.get_points(measurement=pair))[-1]
current['ma2'] = r['ma2']
if 'ma2' in r:
current['ma2'] = r['ma2']
# logger.info(current)
if current['time'] and current['price'] and current['ma1'] and current['ma2']:
# diff
diff = current['ma1'] - current['ma2']
# logger.info('%s MAs diff: %s', pair, diff)
Storage.store([{
'measurement': 'MA1_MA2_DIFF',
'tags': {
'asset': 'MA1',
'currency': 'MA2'
},
'fields': {
'timestamp': current['time'],
'diff': diff,
'ma1': current['ma1'],
'ma2': current['ma2'],
}
}])
trend = Analyser.checkTrend()
logger.info(trend)
return TrendResult(trend, current)
| 2.4375 | 2 |
bruhat/render/doc/run_tests.py | punkdit/bruhat | 3 | 12794608 | <reponame>punkdit/bruhat
#!/usr/bin/env python3
"""
Note: use mkdoc.py to rebuild all docs & images.
"""
import os
import collections
import bruhat.render.doc
from bruhat.render.front import Canvas, Scale, Base
from bruhat.render.box import Box
class TestRun(Base):
def __init__(self, func, start=None, end=None, img=None, result=None):
self.func = func
self.start = start
self.end = end
self.img = img
self.result = result
all_names = set()
counter = 0
def run_test(func, dummy=False):
global counter
items = func()
if not isinstance(items, collections.Iterator):
yield TestRun(func, func.__code__.co_firstlineno, result=items)
return
start = items.gi_frame.f_lineno # index
while 1:
try:
box = None
cvs = None
name = None
result = items.__next__()
if isinstance(result, tuple):
result, name = result
if isinstance(result, Box):
box = result
elif isinstance(result, Canvas):
cvs = result
else:
assert 0, "%r not understood" % (result,)
if not name:
name = "output-%d"%counter
counter += 1
assert name not in all_names, "name dup: %r"%name
all_names.add(name)
svgname = "images/%s.svg"%name
pdfname = "images/%s.pdf"%name
end = items.gi_frame.f_lineno-1 # index
test = TestRun(func, start, end, svgname)
yield test
start = end+1
if dummy:
svgname = "/dev/null"
pdfname = "/dev/null"
try:
print("run_tests: rendering", name, func)
if cvs is None:
cvs = Canvas()
cvs.append(Scale(2.0))
box.render(cvs)
else:
cvs = Canvas([Scale(2.0), cvs])
cvs.writeSVGfile(svgname)
cvs.writePDFfile(pdfname)
print()
except:
print("run_tests: render failed for",
name, func.__name__, "line", end)
raise
except StopIteration:
break
def harvest(path, name, dummy=False):
print("run_tests.harvest", name)
assert name.endswith(".py")
stem = name[:-len(".py")]
desc = "bruhat.render.doc."+stem
__import__(desc)
m = getattr(bruhat.render.doc, stem)
funcs = []
for attr in dir(m):
value = getattr(m, attr)
if attr.startswith("test_") and isinstance(value, collections.Callable):
funcs.append(value)
funcs.sort(key = lambda f : (f.__module__, f.__code__.co_firstlineno))
for func in funcs:
for test in run_test(func, dummy=dummy):
yield test
def run():
path = os.path.dirname(__file__)
names = os.listdir(path)
names = [name for name in names
if name.endswith(".py") and name.startswith("test_")]
names.sort()
for name in names:
for test in harvest(path, name, True):
yield test
def main():
for test in run():
pass
print("run_tests.main: finished")
if __name__ == "__main__":
main()
| 2.3125 | 2 |
machine-learning/QiWei-Python-Chinese/function/function_03.py | yw-fang/MLreadingnotes | 2 | 12794609 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__maintainer__ = "<NAME>"
__email__ = '<EMAIL>'
__license__ = 'Apache License 2.0'
__creation_date__= 'Dec. 25, 2018'
"""
This example shows how to gently
make use of functions
"""
def add(x, y):
return(x+y)
"""
In general, we use this function by this way
"""
sum_01 = add(2, 3)
"""
Gently, we can pass a tuple directly to the function
"""
tuple_value = (3, 4)
sum_02 = add(*tuple_value)
"""or we can dict to pass the parameters"""
values = {"x":3, "y":5}
sum_03 = add(**values)
if __name__ == '__main__':
print('sum_01 is', sum_01)
print('sum_02 is', sum_02)
print('sum_03 is', sum_03)
| 3.78125 | 4 |
scripts/batchOfflineExperiment.py | e73898ms/ForgettingData | 0 | 12794610 | <filename>scripts/batchOfflineExperiment.py
import sys
import os
import subprocess
import shutil
java = "Java-12 Path"
jar = "Path to jar file"
repository = "Path to input ontologies"
outRep = "Output directory"
experimentScript = "Path to the single experiment script"
coverage=100
for i in range(1, 91):
name = "experiment" + str(i)
outDir = outRep + name
os.makedirs(outDir)
try:
os.makedirs(outDir)
except FileExistsError:
print ("Output directory exists. Directory will be emptied and recycled")
for file in os.listdir(outDir):
file_path = os.path.join(outDir, file)
try:
if os.path.isfile(file_path):
if file != "output.log":
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
print(name + " started")
with open(outDir + '/output.log', 'w') as out:
executorCmd = "python {script} -n {name} -c {coverage}}".format(script=experimentScript, name=name,coverage=coverage)
subprocess.call(executorCmd, stdout=out, stderr=out)
print(name + " complete")
summaryPath = outRep + "summary.csv"
if os.path.isfile(summaryPath):
os.unlink(summaryPath)
# print("Summary file exists. Statistics will be appended to file.")
summaryFile = open(summaryPath, "w+")
summaryFile.write("Experiment,O Size,Sig Size,Different Result?,Semantic View Time,Reduction Time,Excluded Clauses,Lethe Time")
summaryFile.close()
summaryCmd = '{java} -cp {jar} {prog} -repo {repo} -outFile {outFile}'.format(java=java,
jar=jar,
prog="uk.ac.man.OfflineSummaryGenerator",
repo=outRep,
outFile=summaryPath)
print(summaryCmd, flush=True)
returned_value = subprocess.call(summaryCmd,shell=True)
| 2.171875 | 2 |
tests/core/test_make_requests.py | WandyYing/mussel | 0 | 12794611 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@email: <EMAIL>
@time: 2021/12/17 20:25
"""
from unittest import mock
import pytest
from mussel.core.make_requests import MakeRequest
from mussel.scheme.api import Interface
class TestMakeAPIRequests:
def test_can_be_instantiated(self):
mr = MakeRequest()
assert isinstance(mr, MakeRequest)
@pytest.mark.parametrize(
"interface",
[
Interface("delete", "url"),
Interface("get", "url"),
Interface("head", "url"),
Interface("options", "url"),
Interface("patch", "url"),
Interface("post", "url"),
Interface("put", "url"),
],
)
@mock.patch("mussel.core.make_requests.Session")
def test_http_method_calls_correct_session_method(self, mocked_session, interface):
mar = MakeRequest()
mar.send(interface)
getattr(mar.session, interface.method).assert_called_once()
| 2.328125 | 2 |
examples/pursuit/cosamp_step_by_step.py | carnot-shailesh/cr-sparse | 42 | 12794612 | <reponame>carnot-shailesh/cr-sparse
"""
CoSaMP step by step
==========================
This example explains the step by step development of
CoSaMP (Compressive Sensing Matching Pursuit) algorithm
for sparse recovery. It then shows how to use the
official implementation of CoSaMP in ``CR-Sparse``.
The CoSaMP algorithm has following inputs:
* A sensing matrix or dictionary ``Phi`` which has been used for data measurements.
* A measurement vector ``y``.
* The sparsity level ``K``.
The objective of the algorithm is to estimate a K-sparse solution ``x``
such that ``y`` is approximately equal to ``Phi x``.
A key quantity in the algorithm is the residual ``r = y - Phi x``. Each
iteration of the algorithm successively improves the estimate ``x`` so
that the energy of the residual ``r`` reduces.
The algorithm proceeds as follows:
* Initialize the solution ``x`` with zero.
* Maintain an index set ``I`` (initially empty) of atoms selected as part of the solution.
* While the residual energy is above a threshold:
* **Match**: Compute the inner product of each atom in ``Phi`` with the current residual ``r``.
* **Identify**: Select the indices of 2K atoms from ``Phi`` with the largest correlation with the residual.
* **Merge**: merge these 2K indices with currently selected indices in ``I`` to form ``I_sub``.
* **LS**: Compute the least squares solution of ``Phi[:, I_sub] z = y``
* **Prune**: Pick the largest K entries from this least square solution and keep them in ``I``.
* **Update residual**: Compute ``r = y - Phi_I x_I``.
It is time to see the algorithm in action.
"""
# %%
# Let's import necessary libraries
import jax
from jax import random
import jax.numpy as jnp
# Some keys for generating random numbers
key = random.PRNGKey(0)
keys = random.split(key, 4)
# For plotting diagrams
import matplotlib.pyplot as plt
# CR-Sparse modules
import cr.sparse as crs
import cr.sparse.dict as crdict
import cr.sparse.data as crdata
# %%
# Problem Setup
# ------------------
# Number of measurements
M = 128
# Ambient dimension
N = 256
# Sparsity level
K = 8
# %%
# The Sparsifying Basis
# ''''''''''''''''''''''''''
Phi = crdict.gaussian_mtx(key, M,N)
print(Phi.shape)
# %%
# Coherence of atoms in the sensing matrix
print(crdict.coherence(Phi))
# %%
# A sparse model vector
# ''''''''''''''''''''''''''
x0, omega = crdata.sparse_normal_representations(key, N, K)
plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')
plt.plot(x0)
# %%
# ``omega`` contains the set of indices at which x is nonzero (support of ``x``)
print(omega)
# %%
# Compressive measurements
# ''''''''''''''''''''''''''
y = Phi @ x0
plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')
plt.plot(y)
# %%
# Development of CoSaMP algorithm
# ---------------------------------
# In the following, we walk through the steps of CoSaMP algorithm.
# Since we have access to ``x0`` and ``omega``, we can measure the
# progress made by the algorithm steps by comparing the estimates
# with actual ``x0`` and ``omega``. However, note that in the
# real implementation of the algorithm, no access to original model
# vector is there.
#
# Initialization
# ''''''''''''''''''''''''''''''''''''''''''''
# %%
# We assume the initial solution to be zero and
# the residual ``r = y - Phi x`` to equal the measurements ``y``
r = y
# %%
# Squared norm/energy of the residual
y_norm_sqr = float(y.T @ y)
r_norm_sqr = y_norm_sqr
print(f"{r_norm_sqr=}")
# %%
# A boolean array to track the indices selected for least squares steps
flags = jnp.zeros(N, dtype=bool)
# %%
# During the matching steps, 2K atoms will be picked.
K2 = 2*K
# %%
# At any time, up to 3K atoms may be selected (after the merge step).
K3 = K + K2
# %%
# Number of iterations completed so far
iterations = 0
# %%
# A limit on the maximum tolerance for residual norm
res_norm_rtol = 1e-3
max_r_norm_sqr = y_norm_sqr * (res_norm_rtol ** 2)
print(f"{max_r_norm_sqr=:.2e}")
# %%
# First iteration
# ''''''''''''''''''''''''''''''''''''''''''''
print("First iteration:")
# %%
# Match the current residual with the atoms in ``Phi``
h = Phi.T @ r
# %%
# Pick the indices of 3K atoms with largest matches with the residual
I_sub = crs.largest_indices(h, K3)
# Update the flags array
flags = flags.at[I_sub].set(True)
# Sort the ``I_sub`` array with the help of flags array
I_sub, = jnp.where(flags)
# Since no atoms have been selected so far, we can be more aggressive
# and pick 3K atoms in first iteration.
print(f"{I_sub=}")
# %%
# Check which indices from ``omega`` are there in ``I_sub``.
print(jnp.intersect1d(omega, I_sub))
# %%
# Select the subdictionary of ``Phi`` consisting of atoms indexed by I_sub
Phi_sub = Phi[:, flags]
# %%
# Compute the least squares solution of ``y`` over this subdictionary
x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y)
# Pick the indices of K largest entries in in ``x_sub``
Ia = crs.largest_indices(x_sub, K)
print(f"{Ia=}")
# %%
# We need to map the indices in ``Ia`` to the actual indices of atoms in ``Phi``
I = I_sub[Ia]
print(f"{I=}")
# %%
# Select the corresponding values from the LS solution
x_I = x_sub[Ia]
# %%
# We now have our first estimate of the solution
x = jnp.zeros(N).at[I].set(x_I)
plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')
plt.plot(x0, label="Original vector")
plt.plot(x, '--', label="Estimated solution")
plt.legend()
# %%
# We can check how good we were in picking the correct indices from the actual support of the signal
found = jnp.intersect1d(omega, I)
print("Found indices: ", found)
# %%
# We found 6 out of 8 indices in the support. Here are the remaining.
missing = jnp.setdiff1d(omega, I)
print("Missing indices: ", missing)
# %%
# It is time to compute the residual after the first iteration
Phi_I = Phi[:, I]
r = y - Phi_I @ x_I
# %%
# Compute the residual and verify that it is still larger than the allowed tolerance
r_norm_sqr = float(r.T @ r)
print(f"{r_norm_sqr=:.2e} > {max_r_norm_sqr=:.2e}")
# %%
# Store the selected K indices in the flags array
flags = flags.at[:].set(False)
flags = flags.at[I].set(True)
print(jnp.where(flags))
# %%
# Mark the completion of the iteration
iterations += 1
# %%
# Second iteration
# ''''''''''''''''''''''''''''''''''''''''''''
print("Second iteration:")
# %%
# Match the current residual with the atoms in ``Phi``
h = Phi.T @ r
# %%
# Pick the indices of 2K atoms with largest matches with the residual
I_2k = crs.largest_indices(h, K2 if iterations else K3)
# We can check if these include the atoms missed out in first iteration.
print(jnp.intersect1d(omega, I_2k))
# %%
# Merge (union) the set of previous K indices with the new 2K indices
flags = flags.at[I_2k].set(True)
I_sub, = jnp.where(flags)
print(f"{I_sub=}")
# %%
# We can check if we found all the actual atoms
print("Found in I_sub: ", jnp.intersect1d(omega, I_sub))
# %%
# Indeed we did. The set difference is empty.
print("Missing in I_sub: ", jnp.setdiff1d(omega, I_sub))
# %%
# Select the subdictionary of ``Phi`` consisting of atoms indexed by ``I_sub``
Phi_sub = Phi[:, flags]
# %%
# Compute the least squares solution of ``y`` over this subdictionary
x_sub, r_sub_norms, rank_sub, s_sub = jnp.linalg.lstsq(Phi_sub, y)
# Pick the indices of K largest entries in in ``x_sub``
Ia = crs.largest_indices(x_sub, K)
print(Ia)
# %%
# We need to map the indices in ``Ia`` to the actual indices of atoms in ``Phi``
I = I_sub[Ia]
print(I)
# %%
# Check if the final K indices in ``I`` include all the indices in ``omega``
jnp.setdiff1d(omega, I)
# %%
# Select the corresponding values from the LS solution
x_I = x_sub[Ia]
# %%
# Here is our updated estimate of the solution
x = jnp.zeros(N).at[I].set(x_I)
plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')
plt.plot(x0, label="Original vector")
plt.plot(x, '--', label="Estimated solution")
plt.legend()
# %%
# The algorithm has no direct way of knowing that it indeed found the solution
# It is time to compute the residual after the second iteration
Phi_I = Phi[:, I]
r = y - Phi_I @ x_I
# %%
# Compute the residual and verify that it is now below the allowed tolerance
r_norm_sqr = float(r.T @ r)
# It turns out that it is now below the tolerance threshold
print(f"{r_norm_sqr=:.2e} < {max_r_norm_sqr=:.2e}")
# %%
# We have completed the signal recovery. We can stop iterating now.
iterations += 1
# %%
# CR-Sparse official implementation
# ----------------------------------------
# The JIT compiled version of this algorithm is available in
# ``cr.sparse.pursuit.cosamp`` module.
# %%
# Import the module
from cr.sparse.pursuit import cosamp
# %%
# Run the solver
solution = cosamp.matrix_solve_jit(Phi, y, K)
# The support for the sparse solution
I = solution.I
print(I)
# %%
# The non-zero values on the support
x_I = solution.x_I
print(x_I)
# %%
# Verify that we successfully recovered the support
print(jnp.setdiff1d(omega, I))
# %%
# Print the residual energy and the number of iterations when the algorithm converged.
print(solution.r_norm_sqr, solution.iterations)
# %%
# Let's plot the solution
x = jnp.zeros(N).at[I].set(x_I)
plt.figure(figsize=(8,6), dpi= 100, facecolor='w', edgecolor='k')
plt.plot(x0, label="Original vector")
plt.plot(x, '--', label="Estimated solution")
plt.legend()
| 2.734375 | 3 |
evalTestSet.py | mandoway/dfp | 0 | 12794613 | import pickle
from pathlib import Path
from datetime import datetime
from tqdm import tqdm
from dfp_main import patch, PatchStats, setVerbose
TEST_SET_PATH = "testSet"
# How many test files should be examined [0,100]
LIMIT = None
def evaluateTestSet():
testFiles = list(Path(TEST_SET_PATH).iterdir())
testPairs = [(testFiles[i], testFiles[i + 1]) for i in range(0, len(testFiles), 2)]
all_stats = []
for dockerfile, violationFile in tqdm(testPairs[:LIMIT]):
stats = patch(str(dockerfile), str(violationFile), "hadolint.exe", quiet=True)
all_stats.append(stats)
for s in all_stats:
print(s)
with open(f"evalStats_{datetime.now().strftime('%d%m%Y_%H%M')}.pkl", "wb") as f:
pickle.dump(all_stats, f, protocol=pickle.HIGHEST_PROTOCOL)
times = list(map(lambda it: it.time, all_stats))
avg_time = sum(times) / len(times)
total = sum(map(lambda it: it.total, all_stats))
fixed = sum(map(lambda it: it.fixed, all_stats))
unfixed = sum(map(lambda it: it.unfixed, all_stats))
verified_patches = [p for stat in all_stats for p in stat.patches]
position_dist = {}
rule_dist = {}
for p in verified_patches:
if p.position not in position_dist:
position_dist[p.position] = 0
position_dist[p.position] += 1
if p.rule not in rule_dist:
rule_dist[p.rule] = 0
rule_dist[p.rule] += 1
setVerbose(True)
PatchStats(total, fixed, unfixed).print()
print(f"Average time: {avg_time}s")
print(f"Position distribution: {position_dist}")
print(f"Rule distribution: {rule_dist}")
if __name__ == "__main__":
evaluateTestSet()
| 2.375 | 2 |
tools/conan/conans/client/generators/env.py | aversiveplusplus/aversiveplusplus | 29 | 12794614 | from conans.model import Generator
from conans.paths import CONANENV
class ConanEnvGenerator(Generator):
@property
def filename(self):
return CONANENV
@property
def content(self):
return self.deps_env_info.dumps()
| 1.8125 | 2 |
Classwork/format.py | rhiggins2308/G00364712-problemSet | 0 | 12794615 | <reponame>rhiggins2308/G00364712-problemSet<filename>Classwork/format.py
for i in range(1, 11):
print('{:2d} {:3d} {:4d} {:5d}'.format(i, i**2, i**3, i**4)) | 2.96875 | 3 |
build/lib/pyconfluent/kafka_streams/processor/serialization/_bytes.py | newellp2019/pyconfluent | 330 | 12794616 | <reponame>newellp2019/pyconfluent
from .deserializer import Deserializer
from .serializer import Serializer
class BytesSerializer(Serializer[bytes]):
def serialize(self, topic: str, data: bytes) -> bytes:
return data
def configure(self, configs, is_key):
pass
def close(self):
pass
class BytesDeserializer(Deserializer[bytes]):
def deserialize(self, topic: str, data: bytes) -> bytes:
return data
def configure(self, configs, is_key):
pass
def close(self):
pass
| 2.5 | 2 |
kulkunen/apps.py | HotStew/respa | 49 | 12794617 | from django.apps import AppConfig
from .signal_handlers import install_signal_handlers
class KulkunenConfig(AppConfig):
name = 'kulkunen'
verbose_name = 'Kulkunen'
def ready(self):
install_signal_handlers()
| 1.28125 | 1 |
task 1.py | Rugvedkaikamwar/result | 0 | 12794618 | <reponame>Rugvedkaikamwar/result
X=input("input the radius of the circle : ")
r=float(X)
a=3.14*r**2
print("area of circle : ",a)
| 3.96875 | 4 |
tests/events/test_schemacache.py | janw/uptimer | 1 | 12794619 | from os import path
from unittest.mock import call
from uptimer.events import SCHEMATA_PATH
from uptimer.events.cache import SchemaCache
def test_schemacache_init(mocker):
mocked_open = mocker.patch.object(SchemaCache, "__missing__")
schema_cache = SchemaCache()
assert schema_cache is not None
mocked_open.assert_not_called()
def test_loading_missing_schema(mocker):
mocked_open = mocker.patch.object(SchemaCache, "__missing__")
schema_cache = SchemaCache()
schema_cache["root.json"]
mocked_open.assert_called_once_with("root.json")
def test_loading_dependant_of_root_json(mocker):
mocked_open = mocker.patch("builtins.open", side_effect=open)
calls = [
call(path.join(SCHEMATA_PATH, "probe-event.json"), "r"),
call(path.join(SCHEMATA_PATH, "root.json"), "r"),
]
# Defaults to resolve $refs in the schema, should open two files.
schema_cache = SchemaCache()
schema_cache["probe-event.json"]
mocked_open.assert_called()
assert mocked_open.call_count == 2
mocked_open.assert_has_calls(calls)
mocked_open.reset_mock()
# Non-resolving cache should only open the asked-for file
schema_cache_non_resolving = SchemaCache(resolve_refs=False)
schema_cache_non_resolving["probe-event.json"]
mocked_open.assert_called_once()
mocked_open.assert_has_calls([calls[0]])
def test_return_cached_result(mocker):
mocked_open = mocker.patch("builtins.open", side_effect=open)
schema_cache = SchemaCache()
schema_cache["probe-event.json"]
mocked_open.assert_called()
assert mocked_open.call_count == 2
# Request the same schema again; call_count stays the same.
schema_cache["probe-event.json"]
assert mocked_open.call_count == 2
# Resolving should have cached root.json as well; call_count stays the same
schema_cache["root.json"]
assert mocked_open.call_count == 2
| 2.4375 | 2 |
python/crawlab/config.py | twinsant/crawlab-sdk | 34 | 12794620 | <reponame>twinsant/crawlab-sdk
import os
from typing import Optional
def get_task_id() -> Optional[str]:
try:
return os.getenv('CRAWLAB_TASK_ID')
except Exception:
return None
| 1.890625 | 2 |
fa.py | BlackDragonF/FurAffinityScraper | 9 | 12794621 | <reponame>BlackDragonF/FurAffinityScraper
from fa_scraper import *
import argparse
import sys
import os
import signal
import pickle
import json
import logging
import logging.config
def signal_handler(signum, frame):
# exit signal received, use pickle to dump scraper
logger.info('exit signal received, saving scrapying progress...')
logger.info('current scraper with %u urls scrapied, and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue)))
with open('scraper.cache', 'wb') as temp:
pickle.dump(scraper, temp)
logger.info('successfully saved scrapying progress to scraper.cache.')
exit(0)
def parse_arguments():
"""
Parse arguments from commandline.
Args:
None
Returns:
arguments - arguments parsed from command line
"""
argparser = argparse.ArgumentParser(
usage = '%s [OPTIONS]' % sys.argv[0],
description = 'A scraper of furaffinity.net written with python.'
)
# scrapy-mode - can be choosen from 'default', 'update'
# default is 'default', set scrapy mode
argparser.add_argument(
'-m', '--scrapy-mode',
nargs = 1,
default = ['default'],
choices = ['default', 'update'],
help = 'sets scrapying mode, default: default'
)
# expire-time - int, set expire time
# only works when scrapy-mode is 'update'
argparser.add_argument(
'--expire-time',
nargs = 1,
type = int,
default = [15],
help = 'sets expire time(days) for scrapied images, default: 15'
)
# scrapy-interval - int ,set scraper's sleep interval between two requests
argparser.add_argument(
'-i', '--scrapy-interval',
nargs = 1,
type = int,
default = [60],
help = 'sets sleep interval(seconds) between two network requests, default: 60'
)
# cookies - filename, use cookies(json) provided to scrape as logined
argparser.add_argument(
'-c', '--cookies',
nargs = 1,
help = 'specify the user cookies(json format file) to be used, needed if you want to scrape as login status'
)
# base-url - sub-url scraper to replace with default '/', must be a valid sub-url defined in constant.py
argparser.add_argument(
'--begin-url',
nargs = 1,
help = 'begin sub-URL to replace default "/", "/user/blackdragonf" for example'
)
# skip-check - when specified, skip integrity check step
argparser.add_argument(
'--skip-check',
action='store_true',
help = 'skip integrity check(ONLY works in default mode) between database and images'
)
# log-level - cen be choosen from 'debug', 'info', 'warning', 'error', 'fatal'
# default is info, set the console log level
argparser.add_argument(
'--log-level',
nargs = 1,
default = ['info'],
choices = ['debug', 'info', 'warning', 'error', 'fatal'],
help = 'sets verbosity level for console log messages, default: info'
)
arguments = argparser.parse_args()
return arguments
def config_logger(console_log_level):
"""
Configure logger, should be called at the very first of program.
Args:
console_log_level - console log level, while log file level is fixed to debug
"""
config = {
'version': 1,
'formatters': {
'standard': {
'format': '%(asctime)s - [%(levelname)s] %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'standard'
},
'file': {
'class': 'logging.FileHandler',
'filename': 'fa_scraper.log',
'level': 'DEBUG',
'formatter': 'standard'
}
},
'loggers': {
'default': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': True
}
}
}
config['handlers']['console']['level'] = console_log_level
logging.config.dictConfig(config)
logger = logging.getLogger('default')
logger.info('set console log level to %s' % console_log_level)
logger.debug('logger configured.')
return logger
def check_and_fix_artworks(db, scraper):
"""
Integrity check step.
Traverse through database and see if for each artwork,
there exists a corresponding image in images sub-directory.
If there are artworks missing, remove them from database, and add there urls
to scraper's scrapying queue.
ONLY works in default mode.
Args:
db - database instance
scraper - scraper instance
"""
# get all artwork IDs from artwork, and initialize a set
artwork_ids = set(db.get_artwork_ids())
# traverse through 'images' sub-directory
os.chdir('images')
logger.debug('changed working directory to images.')
artworks = os.listdir('.')
for artwork in artworks:
if os.path.isfile(artwork):
artwork_id = int(os.path.splitext(os.path.basename(artwork))[0])
# if exists image named 'artwork ID', remove it from set
if artwork_id in artwork_ids:
artwork_ids.remove(artwork_id)
# remove remaining artwork records from database
db.delete_artworks(artwork_ids)
# convert artwork IDs to urls and add to scrapying queue
unscrapied_urls = list(map(util.generate_url_from_id, list(artwork_ids)))
scraper.add_unscrapied_urls(unscrapied_urls)
os.chdir('..')
logger.debug('changed working directory to origin.')
logger.info('%u wrong records removed from database.' % len(artwork_ids))
if __name__ == '__main__':
# parse arguments from command line
arguments = parse_arguments()
# configure logger
log_level = arguments.log_level[0].upper()
logger = config_logger(log_level)
# create images sub-directory if not exists
if not util.create_images_directory():
exit(-1)
# set signal handler
signal.signal(signal.SIGINT, signal_handler)
# initialize database and scraper
db = database.Database('fa_scraper.db')
if util.if_cache_exists():
# trying to load scraper from scraper.cache
with open('scraper.cache', 'rb') as temp:
scraper = pickle.load(temp)
logger.info('continued with last scrapying progress, with %u scrapied urls and %u scrapying urls.' % (len(scraper.scrapied_set), len(scraper.scrapying_queue)))
# os.remove('scraper.cache') commented for potiential error
# fix Scraper lazy load *manually* because pickle will NOT save class variable
scrapy.Scraper.SCRAPIED_BASE = True
# reset scrapy_interval
scraper.scrapy_interval = arguments.scrapy_interval[0]
else:
cookies = {}
if arguments.cookies:
# load provided cookies from file
cookies = util.get_cookies(arguments.cookies[0])
begin_url = None
if arguments.begin_url:
# alternative begin-url specified
begin_url = arguments.begin_url[0]
scraper = scrapy.Scraper(arguments.scrapy_interval[0], cookies, begin_url)
logger.info('initialization completed.')
scrapy_mode = arguments.scrapy_mode[0]
logger.info('scrapy mode set to %s' % scrapy_mode)
# try to perform integrity check
if not arguments.skip_check:
if scrapy_mode == 'default':
check_and_fix_artworks(db, scraper)
logger.info('integrity check completed.')
else:
logger.info('will not perform integrity check in update mode.')
else:
logger.info('skipped integrity check.')
# main body
if scrapy_mode == 'default':
while True:
# scrapy loop
# try to get artwork from scraper
artwork = scraper.scrapy_pending_url()
if artwork:
# extend added time
artwork['Added'] = util.get_current_time()
information = json.dumps(artwork)
logger.info('scrapied artwork information: %s' % information)
# insert into database
db.insert_or_replace_artwork(artwork)
logger.info('completed to scrapy artwork with ID: %u.' % artwork.get('ID'))
else:
logger.info('didn\'t scrapy artwork in current round.')
elif scrapy_mode == 'update':
# get expired artwork IDs from database
expired_artwork_ids = db.get_expired_artwork_ids(arguments.expire_time[0])
logger.info('retrieved all expired artwork IDs.')
for artwork_id in expired_artwork_ids:
# try to artwork attributes
artwork = scraper.scrapy_expired_url(util.generate_url_from_id(artwork_id))
if artwork:
# update added time and set ID
artwork['ID'] = artwork_id
artwork['Added'] = util.get_current_time()
information = json.dumps(artwork)
logger.info('updated artwork information: %s' % information)
# replace record in database
db.insert_or_replace_artwork(artwork)
logger.info('completed to re-scrapy expired artwork(with ID: %u)\'s info .' % artwork.get('ID'))
db.close_db(conn)
logger.info('exiting scraper...')
exit(0)
| 2.53125 | 3 |
visualizer.py | margrietpalm/VisGrid3D-python | 0 | 12794622 | <reponame>margrietpalm/VisGrid3D-python
#!/usr/bin/env python
"""Visualizes data on a cubic lattice
Built specifically to visualize the VTK files created by Morpheus
"""
import os
import glob
import sys
import argparse
import numpy as np
import vtk
from vtk.util import numpy_support as VN
from matplotlib import colors
found_im2movie = True
try:
from im2movie import makeMovie
except ImportError:
found_im2movie = False
__author__ = "<NAME>"
__copyright__ = "Copyright 2016"
__credits__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
# the vtkTimerCallback takes care of updating the visualzation
class vtkTimerCallback():
def __init__(self, update_func, tmax=1, save=False):
self.timer_count = 0
self.update = update_func
self.tmax = tmax
self.update_actors = None
self.save = save
def execute(self, obj, event):
iren = obj
win = iren.GetRenderWindow()
ren = win.GetRenderers().GetFirstRenderer()
# remove all actors that will be updated
for actor in self.update_actors:
ren.RemoveActor(actor)
# set t to correct value
t = self.timer_count
if self.timer_count >= self.tmax:
t = self.timer_count % self.tmax
self.save = False
# get new actors
actors = self.update(t, self.save)
self.update_actors = actors
self.timer_count += 1
class Visualizer3D():
""" Create visualizer object
:param simdir: path to folder containing vtk files
:param steps: steps to visualize
:param winsize: window size
:param bg: background color
:param bbox_color: bounding box wire frame color
:param cam_props: dictionary with camera settings
:param onthefly: read data on the fly instead of all at once
"""
def __init__(self, simdir, steps=None, winsize=(800, 800), bg=(0, 0, 0), bbox_color=(1, 1, 1),
cam_props=None, onthefly=False, storeafterread=True, bnd_colors=None):
self.bbox_color = bbox_color
self.cam_props = cam_props
self.storeafterread = storeafterread
self.bnd_colors = bnd_colors
# read data
get_num = lambda fn: int(fn.split('_')[-1].replace('.vtk', ''))
if steps is not None:
self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir)) if get_num(f) in steps}
else:
self.files = {get_num(f) : f for f in glob.glob('{}/plot_*.vtk'.format(simdir))}
if not onthefly:
self.data = {n : self._load_data(f) for n,f in self.files.iteritems()}
else:
self.data = {self.files.keys()[0] : self._load_data(self.files[self.files.keys()[0]])}
# setup renderer
self._set_renderer(winsize, bg)
def _get_step(self,step):
""" Retrieve vtk data for a specific step """
if step in self.data:
return self.data[step]
else:
if self.storeafterread:
self.data[step] = self._load_data(self.files[step])
return self.data[step]
else:
return self._load_data(self.files[step])
def _set_renderer(self, winsize, bg):
""" Set up vtk renderer """
self.renderer = vtk.vtkRenderer()
self.renderer.SetBackground(bg[0], bg[1], bg[2])
self.renderWindow = vtk.vtkRenderWindow()
self.renderWindow.AddRenderer(self.renderer);
self.renderWindowInteractor = vtk.vtkRenderWindowInteractor()
self.renderWindowInteractor.SetRenderWindow(self.renderWindow)
self.renderWindow.SetSize(winsize[0], winsize[1])
def get_actors(self, step, tau_list, tau_colors=None, tau_alpha=None, bbox=True, bnd=None):
"""
Create actors for a list of cell types and add them to the renderer
:param step: step to visualize
:param tau_list: list of cell types
:param tau_colors: list with color per cell type
:param tau_alpha: list with opacity per cell type
:param bbox: show bounding box
:returns: list of actors with first the actors for tau_list followed by the bounding box (if applicable)
"""
# set default colors and opacity when they are not specified
if tau_colors is None:
tau_colors = [(0.5, 0.5, 0.5) for tau in tau_list]
if tau_alpha is None:
tau_alpha = [1 for tau in tau_list]
# get actors
stepdata = self._get_step(step)
if stepdata is None:
return []
else:
actors = [self._get_actor_for_tau(stepdata, tau, tau_colors[i], tau_alpha[i]) for i, tau in enumerate(tau_list)]
# get bounding box wire frame
if bbox:
actors.append(self._get_box_actor())
if bnd is not None:
for tp,color in bnd.iteritems():
actors.append(self._get_bnd_actor(tp,color))
# add actors to the renderer
for actor in actors:
self.renderer.AddActor(actor)
return actors
def _modify_cam(self):
""" Modify the camera settings for the renderer.
Available options:
- position
- focal point
- pitch
If position and focal point are not given, they will be taken
from the camera in the renderer.
:param renderer: vtk renderer
:param cam_props: dictionary with options (see above) as keys and settings as values
"""
old_cam = self.renderer.GetActiveCamera();
cam = vtk.vtkCamera()
if 'position' in self.cam_props:
cam.SetPosition(self.cam_props['position'])
else:
cam.SetPosition(old_cam.GetPosition())
if 'focal point' in self.cam_props:
cam.SetFocalPoint(self.cam_props['focal point'])
else:
cam.SetFocalPoint(old_cam.GetFocalPoint())
if 'pitch' in self.cam_props:
cam.Pitch(self.cam_props['pitch'])
self.renderer.SetActiveCamera(cam)
def _get_bnd_actor(self,tp,color):
print 'add boundary for {} with color {}'.format(tp,color)
(w, h, d) = self.data[self.data.keys()[0]].GetDimensions()
points = vtk.vtkPoints()
f = 0 if '-' in tp else 1
if 'x' in tp:
points.InsertNextPoint(f*w,0,0)
points.InsertNextPoint(f*w,h,0)
points.InsertNextPoint(f*w,h,d)
points.InsertNextPoint(f*w,0,d)
elif 'y' in tp:
points.InsertNextPoint(0,f*h,0)
points.InsertNextPoint(w,f*h,0)
points.InsertNextPoint(w,f*h,d)
points.InsertNextPoint(0,f*h,d)
elif 'z' in tp:
points.InsertNextPoint(0,0,f*d)
points.InsertNextPoint(w,0,f*d)
points.InsertNextPoint(w,h,f*d)
points.InsertNextPoint(0,h,f*d)
polygon = vtk.vtkPolygon()
polygon.GetPointIds().SetNumberOfIds(4) # make a quad
polygon.GetPointIds().SetId(0, 0)
polygon.GetPointIds().SetId(1, 1)
polygon.GetPointIds().SetId(2, 2)
polygon.GetPointIds().SetId(3, 3)
# Add the polygon to a list of polygons
polygons = vtk.vtkCellArray()
polygons.InsertNextCell(polygon)
# Create a PolyData
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetPolys(polygons)
# Create a mapper and actor
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polygonPolyData)
else:
mapper.SetInputData(polygonPolyData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color[0], color[1], color[2])
return actor
def _get_box_actor(self):
""" Create and return actor for wire frame box of the simulation domain """
(w, h, d) = self.data[self.data.keys()[0]].GetDimensions()
imageData = vtk.vtkImageData()
imageData.SetDimensions(2, 2, 2)
imageData.SetSpacing(w, h, d)
imageData.SetOrigin(0, 0, 0)
mapper = vtk.vtkDataSetMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(imageData)
else:
mapper.SetInputData(imageData)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(self.bbox_color[0], self.bbox_color[1], self.bbox_color[2])
actor.GetProperty().SetRepresentationToWireframe()
return actor
def _get_actor_for_tau(self, stepdata, show_tau, color=(0.5, 0.5, 0.5), opacity=1):
""" Create actor for a cell type """
if isinstance(color, basestring):
# convert color to rgb string
if color in colors.cnames:
color = get_color(color)
else:
color = (0.5, 0.5, 0.5)
dim = stepdata.GetDimensions()
sigma = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.id'))
sigma = sigma.reshape(dim, order='F')
tau = VN.vtk_to_numpy(stepdata.GetPointData().GetArray('cell.type'))
tau = tau.reshape(dim, order='F')
show_idx = np.unique(sigma[tau == show_tau])
points = vtk.vtkPoints()
for s in show_idx:
if s not in sigma:
continue
pix = np.column_stack(np.where(sigma == s))
for p in pix:
points.InsertNextPoint(p[0] - .5, p[1] - .5, p[2] - .5)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
sources = vtk.vtkCubeSource()
sources.Update()
glyph = vtk.vtkGlyph3D()
if vtk.VTK_MAJOR_VERSION <= 5:
glyph.SetInput(polydata)
else:
glyph.SetInputData(polydata)
glyph.SetSourceConnection(sources.GetOutputPort())
glyph.ScalingOff()
glyph.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(glyph.GetOutputPort())
actor = vtk.vtkActor()
actor.GetProperty().SetOpacity(opacity)
actor.GetProperty().SetColor(color[0], color[1], color[2])
actor.SetMapper(mapper)
return actor
def _load_data(self, fn):
""" Load vtk files """
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(fn)
reader.ReadAllScalarsOn()
reader.Update()
data = reader.GetOutput()
if data.GetPointData().HasArray('cell.id') != 1:
print "'cell.id' array missing from {} -> skip file".format(fn)
return None
if data.GetPointData().HasArray('cell.type') != 1:
print "'cell.id' array missing from {} -> skip file".format(fn)
return None
return reader.GetOutput()
def visualize(self, step, tau_list, show=False, save=False, impath=None, imprefix=None, bbox=True,
tau_alpha=None, tau_colors=None, bnd=None):
"""
Visualize a given step.
:param step: step to visualize
:param tau_list: list of cell types
:param show: initialize and start the render window after adding the actors to the renderer, should not be used for animations
:param save: save view to png
:param impath: path to store image
:param imprefix: image prefix
:param bbox: show bounding box
:param tau_alpha: list with opacity per cell type
:param tau_colors: list with color per cell type
"""
self.renderWindow.SetWindowName('step ' + str(int(step)))
actors = self.get_actors(step, tau_list, tau_colors, tau_alpha, bbox=bbox,bnd=bnd)
self.renderWindow.Render()
if self.cam_props is not None:
self._modify_cam()
if show:
self.renderWindowInteractor.Initialize()
self.renderWindowInteractor.Start()
if save:
w2i = vtk.vtkWindowToImageFilter()
w2i.SetInput(self.renderWindow)
w2i.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(w2i.GetOutputPort())
if imprefix is not None and imprefix.endswith('_'):
imprefix = imprefix + '_'
if imprefix is None:
imprefix = ''
if impath is None:
impath = '.'
writer.SetFileName('{}/{}{:03d}.png'.format(impath, imprefix, step))
print 'save image {}/{}{:03d}.png'.format(impath, imprefix, step)
writer.Write()
return actors
def animate(self, tau, tau_colors=None, tau_alpha=None, steps=None, save=False, impath=None, imprefix=None,
fps=5, static_tau=None):
"""
Animate simulation results
:param tau: list of cell types
:param tau_colors: list with color per cell type
:param tau_alpha: list with opacity per cell type
:param steps: steps (all steps are shown when not specified)
:param save: save view to png
:param impath: path to store image
:param imprefix: image prefix
:param fps: frames per second
:param static_tau: static cell types that should not be updated during the animation
"""
if (tau_colors is None) or (len(tau_colors) is not len(tau)):
tau_colors = [(.5, .5, .5) for t in tau]
if (tau_alpha is None) or (len(tau_alpha) is not len(tau)):
tau_alpha = [1 for t in tau]
if steps is None:
steps = self.files.keys()
steps.sort()
self.renderWindowInteractor.Initialize()
actors = self.visualize(steps[0], tau, show=False, save=False, bbox=True, tau_alpha=tau_alpha,
tau_colors=tau_colors,bnd=self.bnd_colors)
if static_tau is None:
static_tau = []
update_tau = [t for t in tau if t not in static_tau]
update_colors = [tau_colors[i] for i, t in enumerate(tau) if t in update_tau]
update_alpha = [tau_alpha[i] for i, t in enumerate(tau) if t in update_tau]
update_func = lambda t, s: self.visualize(steps[t], update_tau, show=False, save=s, bbox=False,
tau_alpha=update_alpha, tau_colors=update_colors,
imprefix=imprefix, impath=impath)
cb = vtkTimerCallback(update_func, len(steps), save)
if len(actors) > 0:
cb.update_actors = [actors[tau.index(t)] for t in tau if t not in static_tau]
else:
cb.update_actors = []
self.renderWindowInteractor.AddObserver('TimerEvent', cb.execute)
timerId = self.renderWindowInteractor.CreateRepeatingTimer(int(1000 / float(fps)))
cb.timerId = timerId
# start the interaction and timer
self.renderWindowInteractor.Start()
def get_color(name):
""" Get color for matplotlib color name """
cc = colors.ColorConverter()
if name in colors.cnames:
return cc.to_rgb(name)
else:
return cc.to_rgb("grey")
def parse_args():
parser = argparse.ArgumentParser()
# parser.description("Animate 3D Morpheus simulations")
parser.add_argument("-i", "--simdir", type=str, default="./", help="Simulation folder")
parser.add_argument("-w", "--winsize", type=int, nargs=2, help="window size", default=(800, 800))
parser.add_argument("-t", "--celltypes", type=int, nargs="*", help="cell types to animate", required=True)
parser.add_argument("-c", "--colors", type=str, nargs="*", help="colors or the cell types")
parser.add_argument("-a", "--alpha", type=float, nargs="*", help="opacity of the cell types")
parser.add_argument("--static", type=int, nargs="*",
help="static cell types (will NOT be updated during animation)")
parser.add_argument("--bboxcolor", type=float, nargs=3, default=(1, 1, 1), help="bounding box color")
parser.add_argument("--bgcolor", type=float, nargs=3, default=(0, 0, 0), help="background color")
parser.add_argument("--camposition", type=float, nargs=3, default=(-200, 200, 200), help="camera position")
parser.add_argument("--camfocus", type=float, nargs=3, default=(100, 100, 50), help="camera focal point")
# parser.add_argument("--campitch", type=float, default=, help="camera pitch")
parser.add_argument("--steps", type=int, nargs="*", help="steps to animate, all steps will be shown if this "
"is not specified")
parser.add_argument("-f", "--fps", type=float, default=5, help="frames per second")
parser.add_argument("-o", "--outdir", type=str, help="output directory")
parser.add_argument("-p", "--imprefix", type=str, help="image prefix")
parser.add_argument("-s", "--saveim", action="store_true", help="save images")
parser.add_argument("-m", "--movie", action="store_true", help="make movie after closing the visualization window")
parser.add_argument("--moviedir", type=str, help="movie directory")
parser.add_argument("--moviename", type=str, help="movie name")
parser.add_argument("--readall", action="store_true", help="read all data at once before the visualization starts")
parser.add_argument("--savemem", action="store_true", help="reread vtk file every time it is used instead of "
"keeping it in memory")
parser.add_argument("--win", action="store_true", help="make movie windows compatible")
parser.add_argument("--mp4", action="store_true", help="make mp4 movie")
parser.add_argument("--color_xmin",type=float, nargs=3)
parser.add_argument("--color_ymin",type=float, nargs=3)
parser.add_argument("--color_zmin",type=float, nargs=3)
parser.add_argument("--color_xmax",type=float, nargs=3)
parser.add_argument("--color_ymax",type=float, nargs=3)
parser.add_argument("--color_zmax",type=float, nargs=3)
return parser.parse_args()
def main():
args = parse_args()
# check if there is something to animate
if not os.path.isdir(args.simdir):
sys.exit("Could not find {}".format(args.simdir))
elif len(glob.glob("{}/*.vtk".format(args.simdir))) == 0:
sys.exit("No vtk files found in {}".format(args.simdir))
# set colors and opacity
if not args.colors:
print "Cell color not specified - default to grey"
args.colors = [get_color("grey") for t in args.celltypes]
elif len(args.colors) == 1:
args.colors = [get_color(args.colors[0]) for t in args.celltypes]
elif len(args.colors) < len(args.celltypes):
print "Number of colors does not match number of cell types - default to grey"
args.colors = [get_color("grey") for t in args.celltypes]
else:
args.colors = [get_color(c) for c in args.colors]
if not args.alpha:
print "Alpha values not specified - default to opaque objects"
args.alpha = [1 for t in args.celltypes]
elif len(args.alpha) == 1:
args.alpha = [args.alpha for t in args.celltypes]
elif len(args.alpha) < len(args.celltypes):
print "Number of alpha values does not match number of cell types - default to opaque objects"
args.alpha = [1 for t in args.celltypes]
bnd = {}
if args.color_xmax is not None:
bnd['x'] = args.color_xmax
if args.color_ymax is not None:
bnd['y'] = args.color_ymax
if args.color_zmax is not None:
bnd['z'] = args.color_zmax
if args.color_xmin is not None:
bnd['-x'] = args.color_xmin
if args.color_ymin is not None:
bnd['-y'] = args.color_ymin
if args.color_zmin is not None:
bnd['-z'] = args.color_zmin
if len(bnd) == 0:
bnd = {}
# set saving options
if args.imprefix or args.outdir or args.movie:
args.saveim = True
if args.saveim:
if not args.outdir:
args.outdir = args.simdir
if not os.path.isdir(args.outdir):
print "Create output directory {}".format(args.outdir)
os.makedirs(args.outdir)
if not args.imprefix:
args.imprefix = "frame"
# set camera
cam_props = {'position': args.camposition, 'focal point': args.camfocus}
# create visualizer
v = Visualizer3D(args.simdir, winsize=args.winsize, bg=args.bgcolor, bbox_color=args.bboxcolor,
cam_props=cam_props, onthefly=(not args.readall), storeafterread=(not args.savemem),
bnd_colors=bnd)
# start animation
v.animate(args.celltypes, tau_colors=args.colors, tau_alpha=args.alpha, steps=args.steps,
save=args.saveim, impath=args.outdir, imprefix=args.imprefix, fps=args.fps, static_tau=args.static)
# create and store movie
if args.movie and found_im2movie:
if args.moviedir is None:
args.moviedir = args.outdir
if args.moviename is None:
args.moviename = args.imprefix
makeMovie(args.imprefix, 'png', args.moviename, args.outdir, args.moviedir, args.fps,
win=args.win, tomp4=args.mp4)
elif not found_im2movie:
print "WARNING: Movie generation is turned of because im2movie was not found"
if __name__ == "__main__":
main()
| 2.640625 | 3 |
python/asteria/source.py | IceCubeOpenSource/ASTERIA | 2 | 12794623 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""CCSN neutrino sources.
This module encapsulates the basic parameters of neutrino fluxes from
supernovae as modeled in the CCSN literature. For each species of neutrino one
requires an estimate of the luminosity vs. time as well as the energy spectrum
of the neutrinos at any given time.
"""
from __future__ import print_function, division
from snewpy.neutrino import Flavor
from .stellardist import FixedDistance, StellarDensity
from .config import parse_quantity
from astropy import units as u
from astropy.table import Table
from abc import ABC, abstractmethod
import numpy as np
from scipy.special import loggamma, gdtr
from scipy.interpolate import PchipInterpolator
class Source:
def __init__(self, name,
spectral_model, progenitor_mass, progenitor_distance,
time={}, luminosity={}, mean_energy={}, pinch={}):
self.name = name
self.model = spectral_model
self.progenitor_mass = progenitor_mass
self.progenitor_distance = progenitor_distance
self.time = time
self.luminosity = luminosity
self.mean_energy = mean_energy
self.pinch = pinch
# Energy PDF function is assumed to be like a gamma function,
# parameterized by mean energy and pinch parameter alpha. True for
# nearly all CCSN models.
self.energy_pdf = lambda a, Ea, E: \
np.exp((1 + a) * np.log(1 + a) - loggamma(1 + a) + a * np.log(E) - \
(1 + a) * np.log(Ea) - (1 + a) * (E / Ea))
self.v_energy_pdf = np.vectorize(self.energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)' )
# Energy CDF, useful for random energy sampling.
self.energy_cdf = lambda a, Ea, E: \
gdtr(1., a + 1., (a + 1.) * (E / Ea))
def parts_by_index(self, x, n):
"""Returns a list of size-n numpy arrays containing indices for the
elements of x, and one size-m array ( with m<n ) if there are remaining
elements of x.
Returns
-------
i_part : list
List of index partitions (partitions are numpy array).
"""
nParts = x.size//n
i_part = [ np.arange( i*n, (i+1)*n ) for i in range(nParts) ]
# Generate final partition of size <n if x.size is not multiple of n
if len(i_part)*n != x.size:
i_part += [ np.arange( len(i_part)*n, x.size ) ]
# Ensure that last partition always has 2 or more elements
if len(i_part[-1]) < 2:
i_part[-2] = np.append(i_part[-2], i_part[-1])
i_part = i_part[0:-1]
return i_part
def get_time(self):
"""Return source time as numpy array.
Returns
-------
time : float
Source time profile (units of s).
"""
return self.time
def get_luminosity(self, t, flavor=Flavor.NU_E_BAR):
"""Return source luminosity at time t for a given flavor.
Parameters
----------
t : float
Time relative to core bounce.
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
Returns
-------
luminosity : float
Source luminosity (units of power).
"""
return np.nan_to_num(self.luminosity[flavor](t)) * (u.erg / u.s)
def get_mean_energy(self, t, flavor=Flavor.NU_E_BAR):
"""Return source mean energy at time t for a given flavor.
Parameters
----------
t : float
Time relative to core bounce.
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
Returns
-------
mean_energy : float
Source mean energy (units of energy).
"""
return np.nan_to_num(self.mean_energy[flavor](t)) * u.MeV
def get_pinch_parameter(self, t, flavor=Flavor.NU_E_BAR):
"""Return source pinching paramter alpha at time t for a given flavor.
Parameters
----------
t : float
Time relative to core bounce.
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
Returns
-------
pinch : float
Source pinching parameter (unitless).
"""
return np.nan_to_num(self.pinch[flavor](t))
def get_flux(self, time, flavor=Flavor.NU_E_BAR):
"""Return source flux at time t for a given flavor.
Parameters
----------
t : float
Time relative to core bounce (units seconds).
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
Returns
-------
flux : float
Source number flux (unit-less, count of neutrinos).
"""
t = time.to(u.s).value
luminosity = self.get_luminosity(t, flavor).to(u.MeV/u.s).value
mean_energy = self.get_mean_energy(t, flavor).value
if isinstance(t, (list, tuple, np.ndarray)):
flux = np.divide(luminosity, mean_energy, where=(mean_energy > 0),
out=np.zeros(len(luminosity)))
else:
if mean_energy > 0.:
flux = luminosity / mean_energy
else:
flux = 0
return flux / u.s
# Where the mean energy is not zero, return rate in units neutrinos
# per second, elsewhere, returns zero.
# flux = np.ediff1d(t, to_end=(t[-1] - t[-2])) * rate
#
# return flux
def energy_spectrum(self, time, E, flavor=Flavor.NU_E_BAR):
"""Compute the PDF of the neutrino energy distribution at time t.
Parameters
----------
t : float
Time relative to core bounce.
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
E : `numpy.ndarray`
Sorted grid of neutrino energies to compute the energy PDF.
Returns
-------
spectrum : `numpy.ndarray`
Table of PDF values computed as a function of energy.
"""
# Given t, get current average energy and pinch parameter.
# Use simple 1D linear interpolation
t = time.to(u.s).value
Enu = E.to(u.MeV).value
if Enu[0] == 0.:
Enu[0] = 1e-10 # u.MeV
a = self.get_pinch_parameter(t, flavor)
Ea = self.get_mean_energy(t, flavor).to(u.MeV).value
if isinstance(t, (list, tuple, np.ndarray)):
# It is non-physical to have a<0 but some model files/interpolations still have this
a[a<0] = 0
cut = (a >= 0) & (Ea > 0)
E_pdf = np.zeros( (Enu.size, t.size), dtype = float )
E_pdf[:, cut] = self.v_energy_pdf( a[cut].reshape(1,-1), Ea[cut].reshape(1,-1), \
E=Enu.reshape(-1,1))
cut = (a < 0) & (Ea > 0)
E_pdf[:, cut] = self.v_energy_pdf(np.zeros_like(a[cut]).reshape(1, -1), Ea[cut].reshape(1, -1), \
E=Enu.reshape(-1, 1))
return E_pdf
else:
if Ea <= 0.:
return np.zeros_like(E)
elif a <= 0.:
return self.energy_pdf(0, Ea, E.value).real
else:
return self.energy_pdf(a, Ea, E.value).real
def sample_energies(self, t, E, n=1, flavor=Flavor.NU_E_BAR):
"""Generate a random sample of neutrino energies at some time t for a
particular neutrino flavor. The energies are generated via inverse
transform sampling of the CDF of the neutrino energy distribution.
Parameters
----------
t : float
Time relative to core bounce.
E : `numpy.ndarray`
Sorted grid of neutrino energies to compute the energy PDF.
n : int
Number of energy samples to produce.
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
Returns
-------
energies : `numpy.ndarray`
Table of energies sampled from the energy spectrum.
"""
cdf = self.energy_cdf(flavor, t, E)
energies = np.zeros(n, dtype=float)
# Generate a random number between 0 and 1 and compare to the CDF
# of the neutrino energy distribution at time t
u = np.random.uniform(n)
j = np.searchsorted(cdf, u)
# Linearly interpolate in the CDF to produce a random energy
energies[j <= 0] = E[0].to('MeV').value
energies[j >= len(E)-1] = E[-1].to('MeV').value
cut = (0 < j) & (j < len(E)-1)
j = j[cut]
en = E[j] + (E[j+1] - E[j]) / (cdf[j+1] - cdf[j]) * (u[cut] - cdf[j])
energies[cut] = en
return energies
def photonic_energy_per_vol(self, time, E, flavor, photon_spectrum, mixing=None, n=1000):
"""Compute the energy deposited in a cubic meter of ice by photons
from SN neutrino interactions.
Parameters
----------
time : float (units s)
Time relative to core bounce.
E : `numpy.ndarray`
Sorted grid of neutrino energies
flavor : :class:`asteria.neutrino.Flavor`
Neutrino flavor.
photon_spectrum : `numpy.ndarray` (Units vary, m**2)
Grid of the product of lepton cross section with lepton mean energy
and lepton path length per MeV, sorted according to parameter E
n : int
Maximum number of time steps to compute at once. A temporary numpy array
of size n x time.size is created and can be very memory inefficient.
Returns
-------
E_per_V
Energy per m**3 of ice deposited by neutrinos of requested flavor
"""
H2O_in_ice = 3.053e28 # 1 / u.m**3
t = time.to(u.s).value
Enu = E.to(u.MeV).value
if Enu[0] == 0:
Enu[0] = 1e-10 * u.MeV
phot = photon_spectrum.to(u.m**2).value.reshape((-1,1)) # m**2
dist = self.progenitor_distance.to(u.m).value # m**2
flux = self.get_flux( time, flavor ) # s**-1
if mixing is None:
def nu_spectrum(t, E, flavor):
return self.energy_spectrum(t, E, flavor) * self.get_flux(t, flavor)
else:
nu_spectrum = mixing(self)
print('Beginning {0} simulation... {1}'.format(flavor.name, ' '*(10-len(flavor.name))), end='')
# The following two lines exploit the fact that astropy quantities will
# always return a number when numpy size is called on them, even if it is 1.
E_per_V = np.zeros( time.size )
if time.size < 2:
raise RuntimeError("Time array size <2, unable to compute energy per volume.")
for i_part in self.parts_by_index(time, n): # Limits memory usage
E_per_V[i_part] += np.trapz( nu_spectrum(time[i_part], E, flavor).value * phot, Enu, axis=0)
E_per_V *= H2O_in_ice / ( 4 * np.pi * dist**2) * np.ediff1d(t, to_end=(t[-1] - t[-2]))
if not flavor.is_electron:
E_per_V *= 2
print('Completed')
return E_per_V * u.MeV / u.m**3
def initialize(config):
"""Initialize a Source model from configuration parameters.
Parameters
----------
config : :class:`asteria.config.Configuration`
Configuration parameters used to create a Source.
Returns
-------
Source
An initialized source model.
"""
# Dictionary of L, <E>, and alpha versus time, keyed by neutrino flavor.
luminosity, mean_energy, pinch = {}, {}, {}
if config.source.table.format.lower() == 'fits':
# Open FITS file, which contains a luminosity table and a pinching
# parameter (alpha) and mean energy table.
fitsfile = '/'.join([config.abs_base_path, config.source.table.path])
sn_data_table = Table.read(fitsfile)
time = sn_data_table['TIME'].to('s')
# Loop over all flavors in the table:
for flavor in Flavor:
fl = flavor.name.upper()
if any( fl in col for col in sn_data_table.keys() ):
L = sn_data_table['L_{:s}'.format(fl)].to('erg/s')
E = sn_data_table['E_{:s}'.format(fl)].to('MeV')
alpha = sn_data_table['ALPHA_{:s}'.format(fl)]
elif fl == 'NU_X_BAR':
L = sn_data_table['L_NU_X'].to('erg/s')
E = sn_data_table['E_NU_X'].to('MeV')
alpha = sn_data_table['ALPHA_NU_X']
else:
raise KeyError("""'{0}'""".format(fl))
luminosity[flavor] = PchipInterpolator(time, L, extrapolate=False)
mean_energy[flavor] = PchipInterpolator(time, E, extrapolate=False)
pinch[flavor] = PchipInterpolator(time, alpha, extrapolate=False )
elif config.source.table.format.lower() == 'ascii':
# ASCII will be supported! Promise, promise.
raise ValueError('Unsupported format: "ASCII"')
else:
raise ValueError('Unknown format {}'.format(config.source.table.format))
# Set up the distance model.
distance_model = None
dmtype = config.source.progenitor.distance.model
if dmtype == 'FixedDistance':
# FixedDistance model.
r = parse_quantity(config.source.progenitor.distance.distance)
dr = parse_quantity(config.source.progenitor.distance.uncertainty)
distance_model = FixedDistance(r, dr)
elif dmtype == 'StellarDensity':
# StellarDensity model, with options to add LMC and SMC.
fitsfile = '/'.join([config.abs_base_path,
config.source.progenitor.distance.path])
lmc = parse_quantity(config.source.progenitor.distance.add_LMC)
smc = parse_quantity(config.source.progenitor.distance.add_SMC)
distance_model = StellarDensity(fitsfile, lmc, smc)
else:
raise ValueError('Unrecognized distance_model: {}'.format(dmtype))
return Source(config.source.name,
config.source.model,
parse_quantity(config.source.progenitor.mass),
distance_model.distance(),
time,
luminosity,
mean_energy,
pinch)
| 2.171875 | 2 |
envpy/error.py | Combofoods/pyenv | 1 | 12794624 | <gh_stars>1-10
class BaseError(Exception): ...
class UnknownError(BaseError): ...
class InvalidIDError(BaseError): ...
class NotFoundIDError(BaseError): ...
class NotFoundEnviromentVariableError(BaseError):
def __init__(self, enviromentVariable):
self.enviromentVariable : str = enviromentVariable
self.message = f'The enviroment variable {enviromentVariable} was not found.'
super().__init__(self.message)
def __str__(self):
return f'{self.enviromentVariable} -> {self.message}' | 2.796875 | 3 |
problems/sqrtx/solution-2.py | MleMoe/LeetCode-1 | 2 | 12794625 | <filename>problems/sqrtx/solution-2.py
class Solution:
def mySqrt(self, x: int) -> int:
l = 0
# x = 1 特例
r = x // 2 + 1
while l < r:
mid = l + (r - l + 1) // 2
square = mid * mid
if square > x:
r = mid - 1
else:
l = mid
return l | 3.71875 | 4 |
mtp_api/apps/security/migrations/0011_delete_securitydataupdate.py | ministryofjustice/mtp-api | 5 | 12794626 | <reponame>ministryofjustice/mtp-api
# Generated by Django 1.10.5 on 2017-03-01 15:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('security', '0010_prisoner_profile_uniqueness'),
]
operations = [
migrations.DeleteModel(name='SecurityDataUpdate'),
]
| 1.515625 | 2 |
PythonScripts/pickle_processing.py | marinmarcillat/PELGAS-TS-Analysis | 0 | 12794627 | from sklearn.neighbors import NearestNeighbors
import Sv
import logging
import pandas as pd
import numpy as np
import functools
import os
import math
logger = logging.getLogger('marin')
logger.setLevel(logging.DEBUG)
def point_processing(tracks_data):
"""
input: tracking data matrix
ouput: column of distances to nearest neighbors in meters
"""
tracks = tracks_data.loc[:, ['x_gps', 'y_gps', 'z_gps']] # get position of each tracks
tracks['long_m'] = tracks.y_gps * (
40075000 * np.cos(tracks.x_gps) / 360) # Get the equivalent of the longitude in meters
tracks['lat_m'] = tracks.x_gps * 60 * 1852 # Get the equivalent of the latitude in meters
array = np.vstack(
[tracks.lat_m, tracks.long_m, tracks.z_gps]) # preparing the array for nearest neighbors algorithm
array = np.transpose(array)
nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(array) # nearest neighbors algorithm
distances, indices = nbrs.kneighbors(array)
return distances[:, 1]
def conjunction(*conditions):
"""Multiple conditions filter for panda"""
return functools.reduce(np.logical_and, conditions)
def calc_distance_lat(lat1, lat2):
"""Returns a distance between 2 latitudes"""
dlat = lat2 - lat1
dist = dlat * 60 * 1852
return dist
def calc_distance_long(lat, lon1, lon2):
"""Returns a distance between 2 longitudes for a given latitude"""
dlon = lon2 - lon1
dist = dlon * (40075000 * math.cos(lat) / 360)
return dist
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def pickle_processing(path_pickle, path_output, transducer, freq_TS, TS_parameters, hac_info, orient):
"""
Process the pickle file from pymovies tracking and returns several key parameters for each track.
input:
- path_pickle: path to a pickle file, output of movies TS analysis
- path_output: path to store output csv
- transducer; name of the used transducer
- freq_TS: reference frequence for TS extraction
- TS_parameters: parameter for the TS detection and tracks selection
- hac_info: complementary info on the different runs, same for all tracks of each run
- orient: orientation ('H' or 'V')
outputs: multiple csv
- tracks: matrix of tracks with:
- track, target: relative and absolute index for each tracks
- TSrange: mean distance in m to transducer
- TSalong, TSarthwart: mean angle in the transducer beam
- x, y, z, x_gps, y_gps, z_gps: relative and absolute position
- TScomp_mean, TScomp: mean TS of all frequencies or for the closest frequency from reference frequency
- nb_target: number of targets per tracks
- timeInt and Time: mean time in ns since 1970 and in string formats
- k_dist: distance in m to the nearest neighbour
- State, Abrv, tailleMoyenne: variables from the hac info file
- b20: b20 value
- Nv: Nv value
- dist_x, dist_y, dist_z, dist_range, dist_tot: mean displacement in m following different axis
- tilt_angle, cap_rel, cap_abs: tilt or heading angle (absolute and relative) in degrees (according to orientation)
- vit_x, vit_y, vit_z, vit_range: speed following different axis
- sd_x, sd_y, sd_z, sd_range, sd_ta: standard deviation of previous displacement and angle
- sd_tot: sum of standard deviation
- targets: matrix of all targets
- freq: mean TScomp for each frequency
"""
if path_pickle[-7:] != ".pickle": # Check the pickle file
logger.error("Not a pickle file !")
return
name_transect = os.path.basename(path_pickle)[:-18]
logger.info("reading...")
if os.path.getsize(path_pickle) > 0:
result = pd.read_pickle(path_pickle) # read the pickle file
else:
logger.error("File empty !") # Si le fichier Pickle est vide
logger.info("done !")
for i in range(len(result[10])): # get index for the sounder and transducer according to given transducer
for j in range(len(result[10][i])):
if result[10][i][j] == transducer:
indexSounder = i
indexTransducer = j
logger.info("creating tables...") # Extract the pickle data in several panda tables.
nb_target = len(result[0][indexSounder][indexTransducer]) # number of targets for the given sounder and transducer
if nb_target > 0: # check if any targets
nb_freq = int(len(result[9][indexSounder][indexTransducer]) / nb_target)
index_targets = []
for i in range(nb_target):
index_targets += [i for j in range(nb_freq)]
targets = pd.DataFrame( # individual target data
{
"track": np.array(result[8][indexSounder][indexTransducer]),
"target": range(nb_target),
"timeTarget": np.array(result[0][indexSounder][indexTransducer]),
"TSrange": np.array(result[1][indexSounder][indexTransducer]),
"TSalong": np.array(result[4][indexSounder][indexTransducer]),
"TSathwart": np.array(result[5][indexSounder][indexTransducer]),
},
index=range(nb_target)
)
freq = pd.DataFrame( # TS and frequency data
{
"target": index_targets,
"TScomp": np.array(result[2][indexSounder][indexTransducer]),
"TSucomp": np.array(result[3][indexSounder][indexTransducer]),
"TSfreq": np.array(result[9][indexSounder][indexTransducer]),
},
index=range(nb_freq * nb_target)
)
# get the position of each targets (relative and absolute)
position = pd.DataFrame(result[6][indexSounder][indexTransducer],
index=range(0, len(result[0][indexSounder][indexTransducer])), columns=['x', 'y', 'z'])
positionGPS = pd.DataFrame(result[7][indexSounder][indexTransducer],
index=range(0, len(result[0][indexSounder][indexTransducer])),
columns=['x_gps', 'y_gps', 'z_gps'])
TS_means = freq.groupby(by="target").mean() # get the TScomp_mean: mean TScomp for all frequencies
TS_means = TS_means.rename(columns={'TScomp': 'TScomp_mean'})
freq_TS = min(list(freq['TSfreq']), key=lambda x: abs(x - freq_TS)) # closest frequency from the reference
# frequency freq_TS
TS_freq = freq[freq.TSfreq == freq_TS] # get the TScomp for the given reference frequency
TS_freq.index = range(len(TS_freq))
logger.info("done !")
targets = pd.concat([targets, position, positionGPS, TS_means['TScomp_mean'], TS_freq['TScomp']],
axis=1) # merge of all the data
tracks = targets.groupby(by="track").target.agg('count') # get number of target per tracks
tracks_len = pd.DataFrame(
{'track': tracks.index,
'nb_target': tracks.values},
index=range(len(tracks.index))
)
targets = pd.merge(targets, tracks_len, how='inner', on='track') # add the track length to the target data
targets_selected = targets.loc[targets['nb_target'] >= TS_parameters['MinEchoNumber']] # Select by track length
targets_data = targets_selected.sort_values('track')
targets_data['timeInt'] = targets_data['timeTarget'].apply(lambda x: x.value) # convert time to int (ns, 1970)
logger.info("targets ready !")
##### Tracks grouping and analysis
logger.info('Gathering tracks data...')
tracks_data = targets_data.groupby('track').mean() # group targets by tracks, keep each parameters as mean
tracks_data['Time'] = pd.to_datetime(tracks_data['timeInt']) # panda's datetime
tracks_data['k_dist'] = point_processing(tracks_data) # Distance to closest neighbor
for index, row in hac_info.iterrows(): # add the hac_info columns (same for each run)
if row.Name == name_transect:
for header in hac_info.columns[1:]:
tracks_data[header] = row[header]
tracks_data['b20'] = tracks_data['TScomp'] - (
20 * np.log10(tracks_data['tailleMoyenne'])) # get the b20 from TScomp and taille moyenne
# get the Nv value for each track
path_Nv = path_output + '/' + name_transect + "_Nv.csv"
if os.path.exists(path_Nv):
Nv = pd.read_csv(path_Nv)
tracks_data['Nv'] = Sv.get_nv(tracks_data, Nv)
else:
tracks_data['Nv'] = -999 # No Nv data provided
# tracks movement analysis
tracks_id = list(targets_data.groupby('track').groups)
scores = []
for i in tracks_id: # for each track
track_i = targets_data.loc[
targets_data['track'] == i, ['timeTarget', 'x', 'y', 'z', 'TSrange', 'x_gps', 'y_gps']]
track_i = track_i.sort_values('timeTarget') # Sort by time
deltas = [[], [], [], [], [], [], [], [], []]
for j in range(1, len(track_i)):
deltas[0].append(track_i.x.iloc[j] - track_i.x.iloc[j - 1]) # delta in x axis
deltas[1].append(track_i.y.iloc[j] - track_i.y.iloc[j - 1]) # delta in y axis
deltas[2].append(track_i.z.iloc[j] - track_i.z.iloc[j - 1]) # delta in z axis
deltas[3].append(track_i.TSrange.iloc[j] - track_i.TSrange.iloc[j - 1]) # delta in range
deltas[4].append(calc_distance_lat(track_i.x_gps.iloc[j],
track_i.x_gps.iloc[j - 1])) # distance between the 2 latitudes
deltas[5].append(calc_distance_long(track_i.x_gps.iloc[j], track_i.y_gps.iloc[j],
track_i.y_gps.iloc[j - 1])) # distance between the 2 longitudes
if orient == 'H': #Horizontal echo sounder
if track_i.x.iloc[
j] > 0: # check if x is coherent (beam is oriented on starboard), corrects direction
# accordingly
cap_rel = abs(math.degrees(
math.atan2(deltas[1][j - 1], - deltas[0][j - 1]))) # heading relative to the boat
else:
cap_rel = abs(math.degrees(math.atan2(deltas[1][j - 1], deltas[0][j - 1])))
cap_abs = math.degrees(
math.atan2(deltas[5][j - 1], deltas[4][j - 1])) # absolute (geographical) heading
if cap_abs < 0:
cap_abs = 360 + cap_abs # correct to have 0-360° headings
tilt_angle = (math.degrees(
math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2),
deltas[2][j - 1])) - 90) # tilt angle of the track
deltas[6].append(tilt_angle)
deltas[7].append(cap_rel)
deltas[8].append(cap_abs)
else: #vertical echo sounder
tilt_angle = (math.degrees(
math.atan2(math.sqrt(deltas[0][j - 1] ** 2 + deltas[1][j - 1] ** 2),
deltas[2][j - 1])) - 90) # tilt angle of the track
deltas[6].append(tilt_angle)
deltas[7].append(999) # relative and absolute heading is irrelevant on vertical echo sounder
deltas[8].append(999)
delta_t = track_i.timeTarget.iloc[len(track_i) - 1] - track_i.timeTarget.iloc[0]
delta_t = delta_t.total_seconds() # time length of the track (s)
dist_x = np.sum(deltas[4]) # dist is the length of the track on several dimensions
dist_y = np.sum(deltas[5])
dist_z = np.sum(deltas[2])
dist_range = np.sum(deltas[3])
dist_tot = dist_x + dist_y + dist_z
tilt_angle = np.mean(deltas[6]) # mean tilt angle of the track
cap_rel = np.mean(deltas[7]) # mean relative heading of the track
cap_abs = np.mean(deltas[8]) # mean absolute heading of the track
vit_x = dist_x / delta_t # speed
vit_y = dist_y / delta_t
vit_z = dist_z / delta_t
vit_range = dist_range / delta_t
sd_x = np.std(deltas[4]) # standard deviation
sd_y = np.std(deltas[5])
sd_z = np.std(deltas[2])
sd_range = np.std(deltas[3])
sd_ta = np.std(deltas[6])
sd_cr = np.std(deltas[7])
sd_ca = np.std(deltas[8])
sd_tot = sd_x + sd_y + sd_z
scores.append(
[i, dist_x / len(track_i), dist_y / len(track_i), dist_z / len(track_i), dist_range, dist_tot,
tilt_angle, cap_rel, cap_abs, vit_x, vit_y, vit_z, vit_range, sd_x, sd_y, sd_z, sd_range, sd_tot,
sd_ta, sd_cr, sd_ca]
)
dist_scores = pd.DataFrame(scores, index=range(len(scores)), # storing values as a panda data frame
columns=['track', 'dist_x', 'dist_y', 'dist_z', 'dist_range', 'dist_tot',
'tilt_angle', 'cap_rel', 'cap_abs', 'vit_x', 'vit_y', 'vit_z', 'vit_range',
'sd_x',
'sd_y', 'sd_z', 'sd_range', 'sd_tot', 'sd_ta', 'sd_cr', 'sd_ca'])
tracks_data = pd.merge(tracks_data, dist_scores, how='inner', on='track') # merge with the main data frame
logger.info("Done !")
logger.debug('Tracks summary :')
logger.debug(str(tracks_data.describe()))
# Storing 2 different data frames as csv:
# - targets, with individual targets of each points
# - tracks, with the run track data
filename_1 = path_output + "/" + name_transect + "_tracks.csv"
filename_2 = path_output + "/" + name_transect + "_targets.csv"
tracks_data.to_csv(filename_1, index=False)
targets_data.to_csv(filename_2, index=False)
logger.info("files saved !")
freq_data = freq.groupby('TSfreq').mean()
freq_data['freq'] = freq_data.index
filename_3 = path_output + "/" + name_transect + "_freq.csv"
freq_data.to_csv(filename_3, index=False)
else:
logger.error("No targets !!!")
| 3.296875 | 3 |
final_model/space_recognition_original.py | AEyeAlliance/aeye-alliance | 6 | 12794628 | <gh_stars>1-10
import torch
import torch.nn as nn
# import torch.onnx
# import onnx
# import onnx_caffe2.backend
# from onnx import checker, helper
import torch.optim as optim
import numpy as np
import cv2
from PIL import Image
import torch.utils.model_zoo as model_zoo
import torch.onnx
def export_model():
model = CNN()
model.load_state_dict(torch.load("model.pth"))
# Input to the model
x = torch.randn(5, 3, 28, 28)
# Export the model
torch_out = torch.onnx._export(model, # model being run
x, # model input (or a tuple for multiple inputs)
"model.onnx-2",
# where to save the model (can be a file or file-like object)
export_params=True) # store the trained parameter weights inside the model file
def inspect_model():
# Input image into the ONNX model
onnx_model = onnx.load("model.onnx")
model = onnx_caffe2.backend.prepare(onnx_model)
image = Image.open("z.jpg")
# # image = image.convert('RGB')
image = np.array(image)
image = cv2.resize(image, (28, 28))
image = image.astype(np.float32) / 255.0
image = torch.from_numpy(image[None, :, :, :])
image = image.permute(0, 3, 1, 2)
W = {model.graph.input[0].name: image.data.numpy()}
model_out = model.run(W)[0]
print(model_out)
#
# # onnx_model(image)
#
# print(onnx_model)
# onnx.checker.check_model(onnx_model)
# # print(onnx.helper.printable_graph(onnx_model.graph))
def make_prediction(img_path):
model = CNN()
model.load_state_dict(torch.load("final_model/model.pth"))
image = Image.open(img_path)
image = image.convert('RGB')
width, height = image.size
num = round(width/height/0.78)
w = width/num
letters = []
for i in range(0, num):
cropped = image.crop((i * w, 0, (i + 1) * w, height))
# cropped.show()
cropped = np.array(cropped)
cropped = cv2.resize(cropped, (28, 28))
cropped = cropped.astype(np.float32) / 255.0
cropped = torch.from_numpy(cropped[None, :, :, :])
cropped = cropped.permute(0, 3, 1, 2)
predicted_tensor = model(cropped)
_, predicted_letter = torch.max(predicted_tensor, 1)
if int(predicted_letter) == 26:
letters.append(chr(32))
elif int(predicted_letter) == 27:
letters.append(chr(35))
elif int(predicted_letter) == 28:
letters.append(chr(46))
elif int(predicted_letter) == 29:
letters.append(chr(44))
elif int(predicted_letter) == 30:
letters.append(chr(58))
elif int(predicted_letter) == 31:
letters.append(chr(92))
elif int(predicted_letter) == 32:
letters.append(chr(45))
elif int(predicted_letter) == 33:
letters.append(chr(59))
elif int(predicted_letter) == 34:
letters.append(chr(63))
elif int(predicted_letter) == 35:
letters.append(chr(33))
elif int(predicted_letter) == 36:
letters.append(chr(126))
else:
letters.append(chr(97 + predicted_letter))
output = ""
number = False
capL = False
capW = False
for j in letters:
if j == '#':
number = True
elif ord(j) == 126:
if capL:
capW = True
capL = True
elif j == ' ':
number = False
capL = False
capW = False
output = output + j
elif not number:
if capW and ord(j) in range(97, 123):
output = output + chr(ord(j) - 32)
elif capL and ord(j) in range(97, 123):
output = output + chr(ord(j) - 32)
capL = False
else:
output = output + j
else:
if ord(j) in range(97, 106):
output = output + chr(ord(j)-48)
elif ord(j) == 106:
output = output + chr(48)
else:
output = output + j
return output
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.block1 = nn.Sequential(
# 3x28x28
nn.Conv2d(in_channels=3,
out_channels=16,
kernel_size=5,
stride=1,
padding=2),
# 16x28x28
nn.MaxPool2d(kernel_size=2),
# 16x14x14
nn.LeakyReLU()
)
# 16x14x14
self.block2 = nn.Sequential(
nn.Conv2d(in_channels=16,
out_channels=32,
kernel_size=5,
stride=1,
padding=2),
# 32x14x14
nn.MaxPool2d(kernel_size=2),
# 32x7x7
nn.LeakyReLU()
)
# linearly
self.block3 = nn.Sequential(
nn.Linear(32 * 7 * 7, 100),
nn.LeakyReLU(),
nn.Linear(100, 37)
)
# 1x36
def forward(self, x):
out = self.block1(x)
out = self.block2(out)
# flatten the dataset
# ipdb; ipdb.set_trace()
out = out.view(-1, 32 * 7 * 7)
out = self.block3(out)
return out
# print(make_prediction("test/Prairie.jpg"))
# print(make_prediction("test/He_was_happy..png"))
# print(make_prediction("test/the_little.png"))
# print(make_prediction("test/with_his_family.png"))
# print(make_prediction("test/with_his_mouth..png"))
# print(make_prediction("test/would_run_and_get_it.png")) | 2.59375 | 3 |
Ex010.py | paulosv2/CursoEmVideo | 0 | 12794629 | real = float(input('Insíra quantos reais voce quer converter para dólar: '))
dolar = real / 3.27
print(f'Com R${real} você pode comprar US${dolar}!')
| 3.625 | 4 |
data_structure/list.py | mstao/pycore | 0 | 12794630 | <filename>data_structure/list.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 初始化list
classmates = ['Michael', 'Bob', 'Tracy']
print(classmates)
# 取第一个元素
print(classmates[0])
# 取最后一个元素
print(classmates[-1])
# 追加元素到末尾
classmates.append("Mary")
print(classmates)
# 把元素插入到指定的位置
classmates.insert(1, "Walker")
print(classmates)
# 删除最后一个元素
classmates.pop()
print(classmates)
# 删除指定位置的元素
classmates.pop(3)
print(classmates)
# 替换指定位置元素
classmates[0] = "OK"
print(classmates)
# 获取长度
print(len(classmates))
| 4.03125 | 4 |
leagueinputs.py | pritish-devurkar/Hackathon2021_FFL | 0 | 12794631 | <reponame>pritish-devurkar/Hackathon2021_FFL
from wtforms import Form, IntegerField, StringField, validators, SelectField, FieldList, FormField
class InputForm(Form):
league_id = IntegerField(
label='League ID',
validators=[validators.InputRequired()]
)
year = IntegerField(
label='Year', default=2021,
validators=[validators.InputRequired()]
)
espn_s2 = StringField(
label='ESPN_S2',
validators=[validators.InputRequired()]
)
swid = StringField(
label='swid',
validators=[validators.InputRequired()]
)
class TradeForms(Form):
name = SelectField("Placeholder", choices=[])
class SelectFormList(Form):
name_entries = FieldList(FormField(TradeForms)) | 2.765625 | 3 |
audiorename/utils.py | Josef-Friedrich/mutagen-renamer | 0 | 12794632 | <filename>audiorename/utils.py
import re
def indent(text: str) -> str:
return ' ' + re.sub(r'\n', '\n ', text)
def read_file(path: str) -> str:
return open(path, 'r').read()
| 2.984375 | 3 |
grsc_app/apps.py | NicholasFry/geothermalrisingsc | 0 | 12794633 | <gh_stars>0
from django.apps import AppConfig
class GrscAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'grsc_app'
| 1.28125 | 1 |
geomstats/geometry/stratified/point_set.py | shubhamtalbar96/geomstats | 0 | 12794634 | <reponame>shubhamtalbar96/geomstats
"""Class for Stratified Spaces.
Lead authors: <NAME> & <NAME>
"""
import functools
import itertools
from abc import ABC, abstractmethod
def broadcast_lists(list_a, list_b):
"""Broadcast two lists.
Similar behavior as ``gs.broadcast_arrays``, but for lists.
"""
n_a = len(list_a)
n_b = len(list_b)
if n_a == n_b:
return list_a, list_b
if n_a == 1:
return itertools.zip_longest(list_a, list_b, fillvalue=list_a[0])
if n_b == 1:
return itertools.zip_longest(list_a, list_b, fillvalue=list_b[0])
raise Exception(f"Cannot broadcast lens {n_a} and {n_b}")
def _manipulate_input(arg):
if not (type(arg) in [list, tuple]):
return [arg]
return arg
def _vectorize_point(*args_positions, manipulate_input=_manipulate_input):
"""Check point type and transform in iterable if not the case.
Parameters
----------
args_positions : tuple
Position and corresponding argument name. A tuple for each position.
Notes
-----
Explicitly defining args_positions and args names ensures it works for all
combinations of input calling.
"""
def _dec(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
args = list(args)
for pos, name in args_positions:
if name in kwargs:
kwargs[name] = manipulate_input(kwargs[name])
else:
args[pos] = manipulate_input(args[pos])
return func(*args, **kwargs)
return _wrapped
return _dec
class Point(ABC):
r"""Class for points of a set."""
@abstractmethod
def __repr__(self):
"""Produce a string with a verbal description of the point."""
@abstractmethod
def __hash__(self):
"""Define a hash for the point."""
@abstractmethod
def to_array(self):
"""Turn the point into a numpy array.
Returns
-------
array_point : array-like, shape=[...]
An array representation of the Point type.
"""
class PointSet(ABC):
r"""Class for a set of points of type Point.
Parameters
----------
param: int
Parameter defining the pointset.
default_point_type : str, {\'vector\', \'matrix\', \'Point\'}
Point type.
Optional, default: \'Point\'.
default_coords_type : str, {\'intrinsic\', \'extrinsic\', etc}
Coordinate type.
Optional, default: \'intrinsic\'.
"""
@abstractmethod
def belongs(self, point, atol):
r"""Evaluate if a point belongs to the set.
Parameters
----------
point : Point-like, shape=[...]
Point to evaluate.
atol : float
Absolute tolerance.
Optional, default: backend atol.
Returns
-------
belongs : array-like, shape=[...]
Boolean evaluating if point belongs to the set.
"""
@abstractmethod
def random_point(self, n_samples=1):
r"""Sample random points on the PointSet.
Parameters
----------
n_samples : int
Number of samples.
Optional, default: 1.
Returns
-------
samples : List of Point
Points sampled on the PointSet.
"""
@abstractmethod
def set_to_array(self, points):
"""Convert a set of points into an array.
Parameters
----------
points : list of Point, shape=[...]
Number of samples of point type to turn
into an array.
Returns
-------
points_array : array-like, shape=[...]
Points sampled on the PointSet.
"""
class PointSetMetric(ABC):
r"""Class for the lenght spaces.
Parameters
----------
Set : PointSet
Underling PointSet.
default_point_type : str, {\'vector\', \'matrix\', \'Point\' }
Point type.
Optional, default: \'Point\'.
default_coords_type : str, {\'intrinsic\', \'extrinsic\', etc}
Coordinate type.
Optional, default: \'intrinsic\'.
"""
def __init__(self, space: PointSet, **kwargs):
super(PointSetMetric, self).__init__(**kwargs)
self.space = space
@abstractmethod
def dist(self, point_a, point_b, **kwargs):
"""Distance between two points in the PointSet.
Parameters
----------
point_a: Point or List of Point, shape=[...]
Point in the PointSet.
point_b: Point or List of Point, shape=[...]
Point in the PointSet.
Returns
-------
distance : array-like, shape=[...]
Distance.
"""
@abstractmethod
def geodesic(self, initial_point, end_point, **kwargs):
"""Compute the geodesic in the PointSet.
Parameters
----------
initial_point: Point or List of Points, shape=[...]
Point in the PointSet.
end_point: Point or List of Points, shape=[...]
Point in the PointSet.
Returns
-------
path : callable
Time parameterized geodesic curve.
"""
| 2.9375 | 3 |
mail/test.py | Codefans-fan/p2pSpider | 0 | 12794635 | <reponame>Codefans-fan/p2pSpider
# -*- coding: utf-8 -*-
'''
Created on Mar 18, 2016
@author: fky
'''
import smtplib
if not 'SMTP_SSL' in smtplib.__all__:
print('''error: Server does not support SMTP-over-SSL. You could use STARTTLS instead. If SSL is needed, an upgrade to Python 2.6 on the server-side should do the trick.''')
print(smtplib.__all__)
| 1.96875 | 2 |
deepface/recognizers/recognizer_vgg.py | wanjinchang/deepface-1 | 1 | 12794636 | <gh_stars>1-10
import abc
import os
import sys
import cv2
import numpy as np
import tensorflow as tf
from scipy.io import loadmat
import pickle
from deepface.confs.conf import DeepFaceConfs
from .recognizer_base import FaceRecognizer
from deepface.utils.common import grouper, faces_to_rois, feat_distance_cosine
class FaceRecognizerVGG(FaceRecognizer):
NAME = 'recognizer_vgg'
def __init__(self, custom_db=None):
self.batch_size = 4
dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface')
filename = 'weight.mat'
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
raise FileNotFoundError('Weight file not found, path=%s' % filepath)
data = loadmat(filepath)
# read meta info
meta = data['meta']
classes = meta['classes']
normalization = meta['normalization']
self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3)
self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2])
self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image')
self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]]
input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image')
# read layer info
layers = data['layers']
current = input_norm
network = {}
for layer in layers[0]:
name = layer[0]['name'][0][0]
layer_type = layer[0]['type'][0][0]
if layer_type == 'conv':
if name[:2] == 'fc':
padding = 'VALID'
else:
padding = 'SAME'
stride = layer[0]['stride'][0][0]
kernel, bias = layer[0]['weights'][0][0]
# kernel = np.transpose(kernel, (1, 0, 2, 3))
bias = np.squeeze(bias).reshape(-1)
conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding)
current = tf.nn.bias_add(conv, bias)
elif layer_type == 'relu':
current = tf.nn.relu(current)
elif layer_type == 'pool':
stride = layer[0]['stride'][0][0]
pool = layer[0]['pool'][0][0]
current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1),
padding='SAME')
elif layer_type == 'softmax':
current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)]))
network[name] = current
self.network = network
self.graph = tf.get_default_graph()
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
self.persistent_sess = tf.Session(graph=self.graph, config=config)
self.db = None
if custom_db:
db_path = custom_db
else:
db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '')
db_path = os.path.join(dir_path, db_path)
with open(db_path, 'rb') as f:
self.db = pickle.load(f)
# warm-up
self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={
self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8)
})
def name(self):
return FaceRecognizerVGG.NAME
def get_new_rois(self, rois):
new_rois = []
for roi in rois:
if roi.shape[0] != self.input_hw[0] or roi.shape[1] != self.input_hw[1]:
new_roi = cv2.resize(roi, self.input_hw, interpolation=cv2.INTER_AREA)
# new_roi = cv2.cvtColor(new_roi, cv2.COLOR_BGR2RGB)
new_rois.append(new_roi)
else:
# roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
new_rois.append(roi)
return new_rois
def extract_features(self, rois=None, npimg=None, faces=None):
probs = []
feats = []
if not rois and faces:
rois = faces_to_rois(npimg=npimg,
faces=faces)
new_rois = []
if len(rois) > 0:
new_rois = self.get_new_rois(rois=rois)
for roi_chunk in grouper(new_rois, self.batch_size,
fillvalue=np.zeros((self.input_hw[0], self.input_hw[1], 3), dtype=np.uint8)):
prob, feat = self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={
self.input_node: roi_chunk
})
feat = [np.squeeze(x) for x in feat]
probs.append(prob)
feats.append(feat)
probs = np.vstack(probs)[:len(rois)]
feats = np.vstack(feats)[:len(rois)]
return probs, feats
def detect(self, npimg, rois=None, faces=None):
probs, feats = self.extract_features(npimg=npimg,
rois=rois,
faces=faces)
if self.db is None:
names = [[(self.class_names[idx], prop[idx]) for idx in
prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs]
else:
# TODO
names = []
for feat in feats:
scores = []
for db_name, db_feature in self.db.items():
similarity = feat_distance_cosine(feat, db_feature)
scores.append((db_name, similarity))
scores.sort(key=lambda x: x[1], reverse=True)
names.append(scores)
return {
'output': probs,
'feature': feats,
'name': names
}
def get_threshold(self):
return DeepFaceConfs.get()['recognizer']['vgg']['score_th']
| 1.9375 | 2 |
junos_get_bgp_neighbors.py | ksator/junos-automation-with-NAPALM | 15 | 12794637 | <filename>junos_get_bgp_neighbors.py
from json import dumps
from napalm_base import get_network_driver
junos_driver = get_network_driver('junos')
junos_device = {'username': 'pytraining', 'password': '<PASSWORD>', 'hostname': '172.30.179.95'}
with junos_driver(**junos_device) as junos:
print('-'*60)
print junos.get_bgp_neighbors()
print('-'*60)
print dumps(junos.get_bgp_neighbors(), indent=4)
print('-'*60)
print junos.get_bgp_neighbors()['global']['peers']['192.168.0.4']['uptime']
'''
# python junos_get_bgp_neighbors.py
------------------------------------------------------------
{u'global': {u'router_id': u'172.16.31.10', u'peers': {u'192.168.0.0': {u'is_enabled': True, u'uptime': 4525781, u'remote_as': 104, u'address_family': {u'ipv4': {u'sent_prefixes': 5, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.17.32', u'local_as': 109, u'is_up': True, u'description': u''}, '192.168.0.4': {u'is_enabled': True, u'uptime': 4525784, u'remote_as': 110, u'address_family': {u'ipv4': {u'sent_prefixes': 6, u'accepted_prefixes': 5, u'received_prefixes': 5}, u'ipv6': {u'sent_prefixes': -1, u'accepted_prefixes': -1, u'received_prefixes': -1}}, u'remote_id': u'172.16.58.3', u'local_as': 109, u'is_up': True, u'description': u''}}}}
------------------------------------------------------------
{
"global": {
"router_id": "172.16.31.10",
"peers": {
"192.168.0.0": {
"is_enabled": true,
"uptime": 4525781,
"remote_as": 104,
"address_family": {
"ipv4": {
"sent_prefixes": 5,
"accepted_prefixes": 5,
"received_prefixes": 5
},
"ipv6": {
"sent_prefixes": -1,
"accepted_prefixes": -1,
"received_prefixes": -1
}
},
"remote_id": "172.16.17.32",
"local_as": 109,
"is_up": true,
"description": ""
},
"192.168.0.4": {
"is_enabled": true,
"uptime": 4525784,
"remote_as": 110,
"address_family": {
"ipv4": {
"sent_prefixes": 6,
"accepted_prefixes": 5,
"received_prefixes": 5
},
"ipv6": {
"sent_prefixes": -1,
"accepted_prefixes": -1,
"received_prefixes": -1
}
},
"remote_id": "172.16.58.3",
"local_as": 109,
"is_up": true,
"description": ""
}
}
}
}
------------------------------------------------------------
4525930
'''
| 2.375 | 2 |
log-analysis/log2tbl.py | danpoe/gpu-tools | 0 | 12794638 | #!/usr/bin/env python3
import argparse
import os
import sys
import collections
import textwrap
from functools import partial
import machinery as ma
from machinery import ErrMsg, chk, bail
from machinery import LogEntry as L
from generic import lty, interleave, itemify, dupchk, listify, w_str
# ------------------------------------------------------------------------------
# Html file (including navigation and sections)
class HtmlFile:
"""Html file representing litmus test results"""
sp = ' '
# HTML prefix before tables
prefix = textwrap.dedent("""\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>GPU Litmus Test Results</title>
<link rel="stylesheet" href="common.css" type="text/css" media="screen"/>
</head>
<body>
<div class="outer">
<div class="inner">
<h1>GPU Litmus Test Results</h1>
<br>
<center>
To view the logfile for a test and chip, click on the corresponding number.
The logfile contains the litmus test code, and the incantations used for the
test run.
</center>
<br><br>
""")
# HTML suffix after tables
suffix = textwrap.dedent("""
</div>
</div>
</body>
</html>
""")
def __init__(self):
self.items = []
self.nav = '<h4>Contents</h4>\n'
self.secn = 0
self.last_level = -1
def add_nav_item(self, link, level):
sp = self.sp
li = sp * (level + 1)
ul = sp * (self.last_level + 1)
if level == self.last_level:
self.nav += li + '<li><a href="#id' + str(self.secn) + '">' + link +\
'</a></li>\n'
elif level == self.last_level + 1:
self.nav += ul + '<ul>\n'
self.nav += li + '<li><a href="#id' + str(self.secn) + '">' + link +\
'</a></li>\n'
elif level < self.last_level:
self.close_nav(level)
self.nav += li + '<li><a href="#id' + str(self.secn) + '">' + link +\
'</a></li>\n'
else:
assert(False)
self.last_level = level
def close_nav(self, level):
sp = self.sp
while self.last_level > level:
self.nav += sp * self.last_level + '</ul>\n'
self.last_level -= 1
def new_section(self, heading, level):
assert(0 <= level <= 2)
l = str(level+2)
s = '<h' + l + '><a id="id' + str(self.secn) + '">' + heading + '</a></h'\
+ l + '>\n'
self.items.append(s)
self.add_nav_item(heading, level)
self.secn += 1
def add_html(self, html):
self.items.append(html)
def finish(self, nav=True):
self.close_nav(-1)
l = [self.prefix]
if nav:
l += [self.nav]
l += self.items + [self.suffix]
self.s = ''.join(l)
def write(self, fn):
assert(self.s)
f = open(fn, 'w')
f.write(self.s)
f.close()
# ------------------------------------------------------------------------------
### Used by all HTML file producers
# ks: list of test names to include in the table
# logs: list of log objects (only logs which have the key are included in the
# table)
def produce_table(ks, logs, diro='entries'):
logs = [ l for l in logs if l.any_key(ks) ]
s = '<table>\n'
# Process header
s += '<tr>\n'
s += ' <th>Scope tree</th>\n'
s += ' <th>Memory map</th>\n'
s += ' <th>Name</th>\n'
for log in logs:
# Remove directory prefix and suffix
name = os.path.basename(log.fn)
idx = name.find('.')
if idx != -1:
name = name[:idx]
s += ' <th>' + name + '</th>\n'
s += '</tr>\n'
# Process rows
for k in ks:
# Start new row
s += '<tr>\n'
le = ma.get_entry(k, logs)
s += le.pp_prefix(2)
for log in logs:
e = log.get(k)
if e:
s += e.pp_cell_link_dir(2, diro)
# Produce file containing raw litmus log
e.store_log_dir(diro)
else:
s += '<td><a href="">---</a></td>\n'
s += '</tr>\n'
s += '</table>\n'
return s
# Filtering according to scopes and memory regions; no filtering according to
# names
def get_section_filters():
def c(f, g):
return lambda e: f(e) and g(e)
# List of functions that each take a log entry
d = [
# Simple scopes, global memory
c(L.is_warp, L.is_global),
c(L.is_cta, L.is_global),
c(L.is_ker, L.is_global),
# Simple scopes, shared memory
c(L.is_warp, L.is_shared),
# Simple scopes, mixed memory
c(L.is_warp, L.is_mixed_mem),
# Mixed scopes, global memory
c(L.is_mixed_scope, L.is_global),
# Mixed scopes, shared memory
c(L.is_mixed_scope, L.is_shared),
# Mixed scopes, mixed memory
c(L.is_mixed_scope, L.is_mixed_mem)
]
return d
def get_section_names():
# Parallel the above functions
names = [
'Different warps, same CTA; global memory',
'Different CTAs, same kernel; global memory',
'Different kernels, same device; global memory',
'Different warps, same CTA; shared memory',
'Different warps, same CTA; mixed memory',
'Mixed scopes, global memory',
'Mixed scopes, shared memory',
'Mixed scopes, mixed memory'
]
return names
# Get key patterns per axiom
def get_axiom_patterns():
l = [
('SC per location', ['CO', 'Co']),
('No Thin Air', ['(LB$)|(LB\+)|(LB\-)']),
('Observation', ['(MP$)|(MP\+)|(MP\-)', 'WRC', 'ISA2']),
('Propagation Light', ['2\+2W', 'W\+RW\+2W', '(S$)|(S\+)|(S\-)']),
('Propagation Heavy', [ 'SB', '(R$)|(R\+)|(R\-)', 'RWC', 'IRIW' ])
]
return l
# ------------------------------------------------------------------------------
############
# Toplevel #
############
# f: function to be called; args: arguments to the function
def mux(f, args):
inp = args.input
l = list(listify(inp))
if hasattr(args, 'out'):
l.append(args.out)
chk(not dupchk(l), 'duplicate files given')
# Read ordinary logs (if we do not want to read an incantation log)
if f != incantations and f != incantations_flat and f != incantations_html_flat:
c = type(inp) is list
if not c:
inp = [inp]
inp = ma.get_logs(inp, lh=ma.Log)
if not c:
inp = inp[0]
args.input = inp
f(args)
###############
# Subcommands #
###############
### Produce table with sections according to axioms
def classified(args):
pos = args.pos
logs = args.input
assert(lty(logs, ma.Log))
assert(hasattr(args, 'diro'))
l = get_axiom_patterns()
h = HtmlFile()
all_matching = []
for name, val in l:
ks = ma.get_matching_keys(val, logs)
if pos:
ks = ma.get_pos_keys(logs, ks)
all_matching += ks
if ks:
h.new_section(name, 0)
s = produce_table(ks, logs, diro=args.diro)
h.add_html(s)
all_matching = set(all_matching)
if pos:
ks = ma.get_pos_keys(logs)
else:
ks = ma.get_keys(logs)
ks = set(ks) - all_matching
ks = list(ks)
if ks:
h.new_section('Other', 0)
ks.sort()
s = produce_table(ks, logs)
h.add_html(s)
h.finish()
h.write(args.out)
### Two level classification
def two_level(args):
pos = args.pos
logs = args.input
assert(lty(logs, ma.Log))
assert(hasattr(args, 'diro'))
l = get_axiom_patterns()
h = HtmlFile()
all_matching = []
for name, val in l:
ks_s = ma.get_matching_keys(val, logs)
if pos:
ks_s = ma.get_pos_keys(logs, ks_s)
all_matching += ks_s
if ks_s:
h.new_section(name, 0)
# Now divide by other sections
filters = get_section_filters()
names = get_section_names()
for f, name in zip(filters, names):
ks = ma.get_filtered_keys(f, logs, ks_s)
if pos:
ks = ma.get_pos_keys(logs, ks)
if ks:
h.new_section(name, 1)
s = produce_table(ks, logs, diro=args.diro)
h.add_html(s)
# Rest
all_matching = set(all_matching)
if pos:
ks_s = ma.get_pos_keys(logs)
else:
ks_s = ma.get_keys(logs)
ks_s = set(ks_s) - all_matching
ks_s = list(ks_s)
if ks_s:
h.new_section('Other', 0)
ks_s.sort()
filters = get_section_filters()
names = get_section_names()
for f, name in zip(filters, names):
ks = ma.get_filtered_keys(f, logs, ks_s)
if pos:
ks = ma.get_pos_keys(logs, ks)
if ks:
h.new_section(name, 1)
s = produce_table(ks, logs, diro=args.diro)
h.add_html(s)
h.finish()
h.write(args.out)
### Produce table with sections according to scopes and memory regions
def sections(args):
pos = args.pos
logs = args.input
assert(lty(logs, ma.Log))
assert(hasattr(args, 'diro'))
s = ''
h = HtmlFile()
filters = get_section_filters()
names = get_section_names()
for f, name in zip(filters, names):
ks = ma.get_filtered_keys(f, logs)
if pos:
ks = ma.get_pos_keys(logs, ks)
if ks:
h.new_section(name, 0)
s = produce_table(ks, logs, diro=args.diro)
h.add_html(s)
h.finish()
h.write(args.out)
### Produce flat table with all tests
def flat(args):
pos = args.pos
logs = args.input
assert(lty(logs, ma.Log))
assert(hasattr(args, 'diro'))
# Get all the keys
if pos:
ks = ma.get_pos_keys(logs)
else:
ks = ma.get_keys(logs)
s = produce_table(ks, logs, diro=args.diro)
h = HtmlFile()
h.add_html(s)
h.finish(nav=False)
h.write(args.out)
# ------------------------------------------------------------------------------
### Fill up table line by line
# l: list of items
# sep: separator
# end: end of line
# n: number of elements on line
def fill_up(l, sep, end, nl):
n = len(l)
s = ""
while l:
chunk = l[:nl]
line = sep.join(chunk)
s += line + ((nl - len(chunk)) * sep) + end
l = l[nl:]
return s
def latex_tbl(f, logs, n):
ks = ma.get_filtered_keys(f, logs)
sep = ' & '
s = ''
def mapper(k):
e = ma.get_entry(k, logs)
return e.short_name.lower() + sep + str(e.pos)
l = list(map(mapper, ks))
header = sep.join(["Test" + sep + "Freq."] * n) + "\\\\\n"
header += '\midrule\n'
s = header + fill_up(l, sep, '\\\\\n', n)
s += '\\bottomrule\n'
return s
def latex_tbl2(f, logs, n):
ks = ma.get_filtered_keys(f, logs)
sep = ' & '
s = '\midrule\n'
def mapper(k):
e = ma.get_entry(k, logs)
return e.short_name.lower(), str(e.pos)
l = list(map(mapper, ks))
l1, l2 = zip(*l)
l = interleave(l1, l2, n)
s = fill_up(l, sep, '\\\\\n', n)
s += '\\bottomrule\n'
return s
### Produce latex tables
def latex(args):
pos = args.pos
logs = args.input
assert(type(logs) == ma.Log)
n = 4
l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\+2W[^+]',
'W\+RW\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]']
# Produce d-warp:s-cta table, global memory
f = lambda e: L.is_global(e) and \
((L.is_warp(e) and L.does_match(e, l)) or
(L.does_match(e, ['CoWW', 'COWW'])))
s = latex_tbl(f, logs, n)
s += '\n'
# Produce d-warp:s-cta table, shared memory
f = lambda e: L.is_shared(e) and \
((L.is_warp(e) and L.does_match(e, l)) or
(L.does_match(e, ['CoWW', 'COWW'])))
s += latex_tbl(f, logs, n)
s += '\n'
# Produce d-cta:s-ker table, global memory
f = lambda e: L.is_global(e) and \
((L.is_cta(e) and L.does_match(e, l)))
s += latex_tbl(f, logs, n)
w_str(args.out, s)
def latex2(args):
pos = args.pos
logs = args.input
assert(type(logs) == ma.Log)
sep = ' & '
l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\+2W[^+]',
'W\+RW\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]']
lc = ['CoWW', 'COWW']
ks = ma.get_matching_keys(l, logs)
# Names + s1 + global memory
f = lambda e: L.is_global(e) and (L.is_warp(e) or L.does_match(e, lc))
ks1 = ma.get_filtered_keys(f, logs, ks)
ks1.sort()
n = len(ks1)
l = list()
for i, k in enumerate(ks1):
e = ma.get_entry(k, logs)
l.append(e.short_name.lower() + sep + str(e.pos) + sep)
# s1 + shared memory
f = lambda e: L.is_shared(e) and (L.is_warp(e) or L.does_match(e, lc))
ks2 = ma.get_filtered_keys(f, logs, ks)
ks2.sort()
assert(len(ks2) == n)
for i, k in enumerate(ks2):
e = ma.get_entry(k, logs)
l[i] += str(e.pos) + sep
# s2 + global memory
f = lambda e: L.is_global(e) and (L.is_cta(e) or L.does_match(e, lc))
ks3 = ma.get_filtered_keys(f, logs, ks)
ks3.sort()
assert(len(ks3) == n)
for i, k in enumerate(ks3):
e = ma.get_entry(k, logs)
l[i] += str(e.pos) + '\\\\'
s = '\n'.join(l)
w_str(args.out, s)
### Produce latex tables
def latex3(args):
pos = args.pos
logs = args.input
assert(type(logs) == ma.Log)
n = 8
l = ['CO', 'Co', 'LB[^+]', 'MP[^+]', 'WRC[^+]', 'ISA2[^+]', '2\+2W[^+]',
'W\+RW\+2W[^+]', 'S[^+]+$', 'SB[^+]', 'R[^+]+$', 'RWC[^+]', 'IRIW[^+]']
# Produce d-warp:s-cta table, global memory
f = lambda e: L.is_global(e) and \
((L.is_warp(e) and L.does_match(e, l)) or
(L.does_match(e, ['CoWW', 'COWW'])))
s = latex_tbl2(f, logs, n)
s += '\n'
# Produce d-warp:s-cta table, shared memory
f = lambda e: L.is_shared(e) and \
((L.is_warp(e) and L.does_match(e, l)) or
(L.does_match(e, ['CoWW', 'COWW'])))
s += latex_tbl2(f, logs, n)
s += '\n'
# Produce d-cta:s-ker table, global memory
f = lambda e: L.is_global(e) and \
((L.is_cta(e) and L.does_match(e, l)))
s += latex_tbl2(f, logs, n)
w_str(args.out, s)
# ------------------------------------------------------------------------------
### Produce incantations tables
# All tests that are not explicitely listed under 'line filters' in this file
# are ignored; non-existing tests and non-existing entries (e.g. for a certain
# combination of incantations) are also ignored
def incantations(args):
log = args.input
assert(type(log) == str)
# Get chip name
chip = os.path.basename(log)
assert(type(chip) == str)
chip_old = chip
while True:
chip = os.path.splitext(chip)[0]
if chip == chip_old:
break
chip_old = chip
assert(type(chip) == str)
# Get incantation log
log = ma.get_logs(log, lh=ma.LogInc)
assert(lty(log, ma.LogInc))
assert(len(log) == 1)
log = log[0]
out_base = args.out
assert(out_base)
les = log.get_all()
assert(lty(les, L))
# Table header
prefix = textwrap.dedent(r"""
\definecolor{Gray}{gray}{0.85}
\newcolumntype{g}{>{\columncolor{Gray}}r}
\newcolumntype{h}{>{\columncolor{Gray}}c}
\begin{tabular}{l g g g g r r r r g g g g r r r r}
\toprule
\multicolumn{17}{l}{Chip: <chip>}\\
\multicolumn{17}{l}{GPU Configuration: <config>}\\
\hline
& \multicolumn{4}{h}{Critical Incantations:} & \multicolumn{4}{c}{Critical Incantations:} & \multicolumn{4}{h}{Critical Incantations:} & \multicolumn{4}{c}{Critical Incantations:}\\
& \multicolumn{4}{h}{none} & \multicolumn{4}{c}{GBC} & \multicolumn{4}{h}{MS} & \multicolumn{4}{c}{GBC+MS}\\
& \multicolumn{4}{h}{Extra Incantations:} & \multicolumn{4}{c}{Extra Incantations:} & \multicolumn{4}{h}{Extra Incantations:} & \multicolumn{4}{c}{Extra Incantations:}\\
& none & R & S & R+S & none & R & S & R+S & none & R & S & R+S & none & R & S & R+S\\
\hline
""")
# Scope and mem filters, including table description and filename suffix
sfs = [
(lambda e: L.is_warp(e) and L.is_global(e),
'All threads in different warps, global memory',
's1-global'),
(lambda e: L.is_warp(e) and L.is_shared(e),
'All threads in different warps, shared memory',
's1-shared'),
(lambda e: L.is_cta(e) and L.is_global(e),
'All threads in different CTAs, global memory',
's2-global')
]
# Column filters
fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)]
fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)]
fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)]
fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)]
nc = 16
# Line filters
lfs = [
('uniproc', ['corr', 'corw', 'cowr', 'coww']),
('observation', ['mp', 'isa2', 'wrc']),
('prop light', ['2+2w', 'w+rw+2w', 's']),
('prop heavy', ['sb', 'rwc', 'iriw', 'r']),
('thin air', ['lb'])
]
lfs = collections.OrderedDict(lfs)
for sf, cfg, suf in sfs:
s = prefix
s = s.replace('<config>', cfg, 1)
s = s.replace('<chip>', chip, 1)
l1 = list(filter(sf, les))
assert(lty(l1, L))
for sec, tests in lfs.items():
tests.sort()
# Section header
s += r'{\bf ' + sec + '}' + (' &' * nc) + r'\\' + '\n'
for t in tests:
# Get all tests that match a simple test name (like rwc)
l2 = list(filter(partial(L.simple_match, s=t), l1))
assert(lty(l2, L))
if (len(l2) == 0):
continue
s += t
for i in range(0, nc):
i1 = (i & 0b1000) >> 3
i2 = (i & 0b0100) >> 2
i3 = (i & 0b0010) >> 1
i4 = (i & 0b0001)
f1 = fs1[i1]
f2 = fs2[i2]
f3 = fs3[i3]
f4 = fs4[i4]
f = lambda e: f1(e) and f2(e) and f3(e) and f4(e)
entry = '-'
item = list(filter(f, l2))
if item:
item = itemify(item)
assert(type(item) == L)
entry = item.pos
# ppi_incantations: mem_stress, general_bc, barrier, rand_threads
s += ' & ' + str(entry)
s += '\\\\\n'
s += '\\hline\n'
s += '\\end{tabular}\n'
# Write table to file
f_out = out_base + '-' + suf + '.tex'
w_str(f_out, s)
# ------------------------------------------------------------------------------
### Produce flat incantation tables
def incantations_flat(args):
log = args.input
assert(type(log) == str)
chip = os.path.basename(log)
assert(type(chip) == str)
chip_old = chip
while True:
chip = os.path.splitext(chip)[0]
if chip == chip_old:
break
chip_old = chip
assert(type(chip) == str)
log = ma.get_logs(log, lh=ma.LogInc)
assert(lty(log, ma.LogInc))
assert(len(log) == 1)
log = log[0]
# Prefix of output filename, default is the command name
out_base = args.out
assert(out_base)
les = log.get_all()
assert(lty(les, L))
short_names = log.get_names()
assert(lty(short_names, str))
short_names.sort()
# Table header
prefix = textwrap.dedent(r"""
\definecolor{Gray}{gray}{0.85}
\newcolumntype{g}{>{\columncolor{Gray}}r}
\newcolumntype{h}{>{\columncolor{Gray}}c}
\begin{tabular}{l g g g g r r r r g g g g r r r r}
\toprule
\multicolumn{17}{l}{Chip: <chip>}\\
\multicolumn{17}{l}{GPU Configuration: <config>}\\
\hline
& \multicolumn{4}{h}{Critical Incantations:} & \multicolumn{4}{c}{Critical Incantations:} & \multicolumn{4}{h}{Critical Incantations:} & \multicolumn{4}{c}{Critical Incantations:}\\
& \multicolumn{4}{h}{none} & \multicolumn{4}{c}{GBC} & \multicolumn{4}{h}{MS} & \multicolumn{4}{c}{GBC+MS}\\
& \multicolumn{4}{h}{Extra Incantations:} & \multicolumn{4}{c}{Extra Incantations:} & \multicolumn{4}{h}{Extra Incantations:} & \multicolumn{4}{c}{Extra Incantations:}\\
& none & R & S & R+S & none & R & S & R+S & none & R & S & R+S & none & R & S & R+S\\
\hline
""")
# Scope and mem filters, including table description and filename suffix
sfs = [
(lambda e: L.is_warp(e) and L.is_global(e),
'All threads in different warps, global memory',
's1-global'),
(lambda e: L.is_warp(e) and L.is_shared(e),
'All threads in different warps, shared memory',
's1-shared'),
(lambda e: L.is_cta(e) and L.is_global(e),
'All threads in different CTAs, global memory',
's2-global')
]
# Column filter building blocks (need to be combined to yield a single column
# filter)
fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)]
fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)]
fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)]
fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)]
nc = 16
# Scope and mem filters, table description, filename suffix
for sf, cfg, suf in sfs:
s = prefix
s = s.replace('<config>', cfg, 1)
s = s.replace('<chip>', chip, 1)
l1 = list(filter(sf, les))
assert(lty(l1, L))
for t in short_names:
l2 = list(filter(partial(L.simple_match, s=t), l1))
assert(lty(l2, L))
if (len(l2) == 0):
continue
# Name of test
s += t
for i in range(0, nc):
i1 = (i & 0b1000) >> 3
i2 = (i & 0b0100) >> 2
i3 = (i & 0b0010) >> 1
i4 = (i & 0b0001)
f1 = fs1[i1]
f2 = fs2[i2]
f3 = fs3[i3]
f4 = fs4[i4]
f = lambda e: f1(e) and f2(e) and f3(e) and f4(e)
entry = '-'
item = list(filter(f, l2))
if item:
item = itemify(item)
assert(type(item) == L)
entry = item.pos
# ppi_incantations: mem_stress, general_bc, barrier, rand_threads
s += ' & ' + str(entry)
s += '\\\\\n'
s += '\\end{tabular}\n'
# Write table to file
f_out = out_base + '-' + suf + '.tex'
w_str(f_out, s)
# ------------------------------------------------------------------------------
### Produce flat incantation tables
def incantations_html_flat(args):
log = args.input
assert(type(log) == str)
assert(hasattr(args, 'diro'))
chip = os.path.basename(log)
assert(type(chip) == str)
chip_old = chip
while True:
chip = os.path.splitext(chip)[0]
if chip == chip_old:
break
chip_old = chip
assert(type(chip) == str)
log = ma.get_logs(log, lh=ma.LogInc)
assert(lty(log, ma.LogInc))
assert(len(log) == 1)
log = log[0]
# Prefix of output filename, default is the command name
out_base = args.out
assert(out_base)
les = log.get_all()
assert(lty(les, L))
short_names = log.get_names()
assert(lty(short_names, str))
short_names.sort()
# Table header
# ' ': non-breaking space
# '✓': checkmark
prefix = textwrap.dedent(r"""
<!DOCTYPE html>
<html style="background:white;">
<head>
<meta charset="UTF-8">
<title>Evaluating incantations</title>
<link rel="stylesheet" href="common.css" type="text/css" media="screen"/>
<style>
ul {
padding-top: 10px;
}
li {
padding-top: 5px;
}
th, td {
text-align: right;
padding: 5px;
padding-right: 15px;
padding-left: 15px;
}
td:nth-child(1) {
text-align: left;
}
tr:nth-child(1), tr:nth-child(5) {
border-bottom: 2px solid black;
}
table {
border-top: none;
}
</style>
</head>
<body>
<div class="outer" style="width: 100%;">
<div class="inner">
<h1>Evaluating incantations</h1>
<br>
<center>
To view the logfile for a test, click on the corresponding number. The logfile
also contains the litmus test code. When a dash appears instead of a result,
it is either because optcheck failed or because there were insufficient
resources on the chip to run the test.
</center>
<br>
<center>
<table style="border:none">
<tr style="border:none">
<td style="text-align:left">Chip:</td>
<td style="text-align:left"> <chip> </td>
</tr>
<tr style="border:none">
<td style="text-align:left">Config:</td>
<td style="text-align:left"> <config> </td>
</tr>
</table>
</center>
<br>
<table>
<tr>
<td> </td>
<td>1</td>
<td>2</td>
<td>3</td>
<td>4</td>
<td>5</td>
<td>6</td>
<td>7</td>
<td>8</td>
<td>9</td>
<td>10</td>
<td>11</td>
<td>12</td>
<td>13</td>
<td>14</td>
<td>15</td>
<td>16</td>
</tr>
<tr>
<td>memory stress</td>
<td> </td><td> </td><td> </td><td> </td>
<td> </td><td> </td><td> </td><td> </td>
<td>✓</td><td>✓</td><td>✓</td><td>✓</td>
<td>✓</td><td>✓</td><td>✓</td><td>✓</td>
</tr>
<tr>
<td>general bank conflicts</td>
<td> </td><td> </td><td> </td><td> </td>
<td>✓</td><td>✓</td><td>✓</td><td>✓</td>
<td> </td><td> </td><td> </td><td> </td>
<td>✓</td><td>✓</td><td>✓</td><td>✓</td>
</tr>
<tr>
<td>thread synchronisation</td>
<td> </td><td> </td><td>✓</td><td>✓</td>
<td> </td><td> </td><td>✓</td><td>✓</td>
<td> </td><td> </td><td>✓</td><td>✓</td>
<td> </td><td> </td><td>✓</td><td>✓</td>
</tr>
<tr>
<td>thread randomisation</td>
<td> </td><td>✓</td><td> </td><td>✓</td>
<td> </td><td>✓</td><td> </td><td>✓</td>
<td> </td><td>✓</td><td> </td><td>✓</td>
<td> </td><td>✓</td><td> </td><td>✓</td>
</tr>
""")
# Scope and mem filters, including table description and filename suffix
sfs = [
(lambda e: L.is_warp(e) and L.is_global(e),
'All threads in different warps, global memory',
's1-global'),
(lambda e: L.is_warp(e) and L.is_shared(e),
'All threads in different warps, shared memory',
's1-shared'),
(lambda e: L.is_cta(e) and L.is_global(e),
'All threads in different CTAs, global memory',
's2-global')
]
# Column filter building blocks (need to be combined to yield a single column
# filter)
fs1 = [lambda e: not L.is_mem_stress(e), lambda e: L.is_mem_stress(e)]
fs2 = [lambda e: not L.is_general_bc(e), lambda e: L.is_general_bc(e)]
fs3 = [lambda e: not L.is_barrier(e), lambda e: L.is_barrier(e)]
fs4 = [lambda e: not L.is_rand_threads(e), lambda e: L.is_rand_threads(e)]
nc = 16
# Scope and mem filters, table description, filename suffix
for sf, cfg, suf in sfs:
s = prefix
s = s.replace('<config>', cfg, 1)
s = s.replace('<chip>', chip, 1)
l1 = list(filter(sf, les))
assert(lty(l1, L))
for t in short_names:
l2 = list(filter(partial(L.simple_match, s=t), l1))
assert(lty(l2, L))
if (len(l2) == 0):
continue
# Name of test
s += '<tr>\n'
s += '<td>' + t + '</td>'
for i in range(0, nc):
i1 = (i & 0b1000) >> 3
i2 = (i & 0b0100) >> 2
i3 = (i & 0b0010) >> 1
i4 = (i & 0b0001)
f1 = fs1[i1]
f2 = fs2[i2]
f3 = fs3[i3]
f4 = fs4[i4]
f = lambda e: f1(e) and f2(e) and f3(e) and f4(e)
entry = '-'
item = list(filter(f, l2))
if item:
item = itemify(item)
assert(type(item) == L)
entry = item.pos
s += item.pp_cell_link_dir(2, args.diro)
# Produce file containing raw litmus log
item.store_log_dir(args.diro)
else:
# ppi_incantations: mem_stress, general_bc, barrier, rand_threads
s += '<td>' + str(entry) + '</td>'
s += '</tr>\n'
s += """
</table>
</div>
</div>
</body>
</html>
"""
# Write table to file
f_out = out_base + '-' + suf + '.html'
w_str(f_out, s)
# ------------------------------------------------------------------------------
#######################
# Command line parser #
#######################
# Open files and parse or unpickle
class InputAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def get_cmdline_parser(cmds):
# Parent of all
p = argparse.ArgumentParser()
# Dummy parent for common options
parent = argparse.ArgumentParser(add_help=False)
parent.add_argument('-p', '--pos', action='store_true')
# Subparsers
sp = p.add_subparsers(help='use <subcommand> -h for further help', title=
'subcommands')
# Flat
p1 = sp.add_parser(cmds[0], parents=[parent])
p1.add_argument('input', nargs='+', action=InputAction)
f = cmds[0] + '.html'
p1.add_argument('-o', '--out', action='store', default=f)
p1.add_argument('-d', '--diro', action='store', default='entries')
p1.set_defaults(func=partial(mux, flat))
# Classified
p2 = sp.add_parser(cmds[1], parents=[parent])
p2.add_argument('input', nargs='+', action=InputAction)
f = cmds[1] + '.html'
p2.add_argument('-o', '--out', action='store', default=f)
p2.add_argument('-d', '--diro', action='store', default='entries')
p2.set_defaults(func=partial(mux, classified))
# Sections
p3 = sp.add_parser(cmds[2], parents=[parent])
p3.add_argument('input', nargs='+', action=InputAction)
f = cmds[2] + '.html'
p3.add_argument('-o', '--out', action='store', default=f)
p3.add_argument('-d', '--diro', action='store', default='entries')
p3.set_defaults(func=partial(mux, sections))
# Two-level
p4 = sp.add_parser(cmds[3], parents=[parent])
p4.add_argument('input', nargs='+', action=InputAction)
f = cmds[3] + '.html'
p4.add_argument('-o', '--out', action='store', default=f)
p4.add_argument('-d', '--diro', action='store', default='entries')
p4.set_defaults(func=partial(mux, two_level))
# Latex
p5 = sp.add_parser(cmds[4], parents=[parent])
p5.add_argument('input', action=InputAction)
f = cmds[4] + '.tex'
p5.add_argument('-o', '--out', action='store', default=f)
p5.set_defaults(func=partial(mux, latex))
# Latex 2
p6 = sp.add_parser(cmds[5], parents=[parent])
p6.add_argument('input', action=InputAction)
f = cmds[5] + '.tex'
p6.add_argument('-o', '--out', action='store', default=f)
p6.set_defaults(func=partial(mux, latex2))
# Latex 3
p7 = sp.add_parser(cmds[6], parents=[parent])
p7.add_argument('input', action=InputAction)
f = cmds[6] + '.tex'
p7.add_argument('-o', '--out', action='store', default=f)
p7.set_defaults(func=partial(mux, latex3))
# Incantations
p8 = sp.add_parser(cmds[7], description='Produce tables comparing the\
effectiveness of the incantations')
p8.add_argument('input', action=InputAction, help='log (text or pickle)')
f = cmds[7]
p8.add_argument('-o', '--out', action='store', default=f,
help='output file basename (instead of default name)')
p8.set_defaults(func=partial(mux, incantations))
# Incantations flat
p9 = sp.add_parser(cmds[8], description='Produce flat tables comparing the\
effectiveness of the incantations')
p9.add_argument('input', action=InputAction, help='log (text or pickle)')
f = cmds[8]
p9.add_argument('-o', '--out', action='store', default=f,
help='output file basename (instead of default name)')
p9.set_defaults(func=partial(mux, incantations_flat))
# Incantations html
p10 = sp.add_parser(cmds[9], description='Produce flat html tables comparing\
the effectiveness of the incantations')
p10.add_argument('input', action=InputAction, help='log (text or pickle)')
f = cmds[9]
p10.add_argument('-o', '--out', action='store', default=f,
help='output file basename (instead of default name)')
p10.add_argument('-d', '--diro', action='store', default='entries-inc')
p10.set_defaults(func=partial(mux, incantations_html_flat))
return p
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.argv += ['-h']
cmd = sys.argv[1]
ma.setup_err_handling('log2tbl.py')
cmds = ['flat', 'classified', 'sections', 'two-level', 'latex', 'latex2',
'latex3', 'incantations', 'incantations-flat', 'incantations-html']
p = get_cmdline_parser(cmds)
if cmd not in cmds:
p.print_help()
sys.exit(2)
print('cmd: ' + cmd)
pr = p.parse_args()
pr.func(pr)
| 2.5 | 2 |
CodeWars/arr_diff.py | Jaidev810/Competitive-Questions | 1 | 12794639 | <gh_stars>1-10
def arr_diff(arr1, arr2):
if len(arr2) == 0:
return arr1
result = list()
for i in range(0, len(arr1)):
flag = False
for j in range(0, len(arr2)):
if arr1[i] == arr2[j]:
flag = True
if not flag:
result.append(arr1[i])
return result
arr1 = [1, 2, 2, 2, 3 ]
arr2 = [2]
print(arr_diff(arr1, arr2)) | 3.5625 | 4 |
Taller_estruturas_de_control_secuenciales/Python_Daniel/Ejercicio_6.py | Danielnaki/Algoritmos_y_programacion | 0 | 12794640 | """
Entradas
Numero_hombres-->int-->n_hombres
Numero_mujeres-->int-->n_mujeres
Salidas
porcentaje_hombres-->float-->p_hombres
porcentaje_mujeres-->float-->p_mujeres
"""
n_hombres=int(input("Ingrese el número de hombres "))
n_mujeres=int(input("Ingrese el número de mujeres "))
total=int(n_hombres+n_mujeres)
p_h=float(n_hombres/total)
p_m=float(n_mujeres/total)
p_hombres=float(p_h*100)
p_mujeres=float(p_m*100)
print("El porcentaje de los hombres es: "+str(p_hombres)+" %")
print("El porcentaje de las mujeres es: "+str(p_mujeres)+" %") | 3.984375 | 4 |
flask_api/app/resources/__init__.py | brennanhfredericks/network-monitor-server | 0 | 12794641 | <reponame>brennanhfredericks/network-monitor-server<gh_stars>0
from .packet_endpoints import (
Packet_EP,
Packet_Table_EP,
Packet_Table_Counts_EP,
Packet_Table_Views_EP,
)
| 1.070313 | 1 |
baselines/a2c/a2c.py | wgrathwohl/BackpropThroughTheVoidRL | 44 | 12794642 | <gh_stars>10-100
import os.path as osp
import gym
import time
import joblib
import logging
import numpy as np
import tensorflow as tf
from baselines import logger
from baselines.common import set_global_seeds, explained_variance
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.atari_wrappers import wrap_deepmind
from baselines.a2c.utils import discount_with_dones, jacobian
from baselines.a2c.utils import Scheduler, make_path, find_trainable_variables
from baselines.a2c.policies import CnnPolicy
from baselines.a2c.utils import cat_entropy, mse
import random
def gs(x):
return x.get_shape().as_list()
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs,
ent_coef=0.01, vf_coef=0.5, max_grad_norm=0.5, lr=7e-4,
alpha=0.99, epsilon=1e-5, total_timesteps=int(80e6), lrschedule='linear', logdir=None):
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=num_procs,
inter_op_parallelism_threads=num_procs)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
nact = ac_space.n
nbatch = nenvs*nsteps
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
step_model = policy(sess, ob_space, ac_space, nenvs, 1, nstack, reuse=False)
train_model = policy(sess, ob_space, ac_space, nenvs, nsteps, nstack, reuse=True)
neglogpac = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=train_model.pi, labels=train_model.a0)
entropy = tf.reduce_sum(cat_entropy(train_model.pi))
params = find_trainable_variables("model")
tf.summary.histogram("vf", train_model.vf)
tf.summary.histogram("R", R)
if train_model.relaxed:
pg_loss = tf.constant(0.0)
oh_A = tf.one_hot(train_model.a0, ac_space.n)
params = find_trainable_variables("model")
policy_params = [v for v in params if "pi" in v.name]
vf_params = [v for v in params if "vf" in v.name]
entropy_grads = tf.gradients(entropy, policy_params)
ddiff_loss = tf.reduce_sum(train_model.vf - train_model.vf_t)
ddiff_grads = tf.gradients(ddiff_loss, policy_params)
sm = tf.nn.softmax(train_model.pi)
dlogp_dpi = oh_A * (1. - sm) + (1. - oh_A) * (-sm)
pi_grads = -((tf.expand_dims(R, 1) - train_model.vf_t) * dlogp_dpi)
pg_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads)
pg_grads = [pg - dg for pg, dg in zip(pg_grads, ddiff_grads)]
pi_param_grads = tf.gradients(train_model.pi, policy_params, grad_ys=pi_grads)
cv_grads = tf.concat([tf.reshape(p, [-1]) for p in pg_grads], 0)
cv_grad_splits = tf.reduce_sum(tf.square(cv_grads))
vf_loss = cv_grad_splits * vf_coef
cv_grads = tf.gradients(vf_loss, vf_params)
policy_grads = []
for e_grad, p_grad, param in zip(entropy_grads, pg_grads, policy_params):
grad = -e_grad * ent_coef + p_grad
policy_grads.append(grad)
grad_dict = {}
for g, v in list(zip(policy_grads, policy_params))+list(zip(cv_grads, vf_params)):
grad_dict[v] = g
grads = [grad_dict[v] for v in params]
print(grads)
else:
pg_loss = tf.reduce_sum((tf.stop_gradient(R) - tf.stop_gradient(train_model.vf)) * neglogpac)
policy_params = [v for v in params if "pi" in v.name]
pg_grads = tf.gradients(pg_loss, policy_params)
vf_loss = tf.reduce_sum(mse(tf.squeeze(train_model.vf), R))
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
grads = tf.gradients(loss, params)
grads = list(zip(grads, params))
ema = tf.train.ExponentialMovingAverage(.99)
all_policy_grads = tf.concat([tf.reshape(g, [-1]) for g in pg_grads], 0)
all_policy_grads_sq = tf.square(all_policy_grads)
apply_mean_op = ema.apply([all_policy_grads, all_policy_grads_sq])
em_mean = ema.average(all_policy_grads)
em_mean_sq = ema.average(all_policy_grads_sq)
em_var = em_mean_sq - tf.square(em_mean)
em_log_var = tf.log(em_var + 1e-20)
mlgv = tf.reduce_mean(em_log_var)
for g, v in grads:
print(v.name, g)
tf.summary.histogram(v.name, v)
tf.summary.histogram(v.name+"_grad", g)
self.sum_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(logdir)
trainer = tf.train.AdamOptimizer(learning_rate=LR, beta2=.99999)
with tf.control_dependencies([apply_mean_op]):
_train = trainer.apply_gradients(grads)
lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
self._step = 0
def train(obs, states, rewards, masks, u1, u2, values, summary=False):
advs = rewards - values
for step in range(len(obs)):
cur_lr = lr.value()
td_map = {
train_model.X:obs, train_model.U1:u1, train_model.U2:u2,
ADV:advs, R:rewards, LR:cur_lr
}
if states != []:
td_map[train_model.S] = states
td_map[train_model.M] = masks
if summary:
sum_str, policy_loss, value_loss, policy_entropy, lv, _ = sess.run(
[self.sum_op, pg_loss, vf_loss, entropy, mlgv, _train],
td_map
)
self.writer.add_summary(sum_str, self._step)
else:
policy_loss, value_loss, policy_entropy, lv, _ = sess.run(
[pg_loss, vf_loss, entropy, mlgv, _train],
td_map
)
self._step += 1
return policy_loss, value_loss, policy_entropy, lv
def save(save_path):
ps = sess.run(params)
make_path(save_path)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
ps = sess.run(restores)
self.train = train
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.value = step_model.value
self.initial_state = step_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess)
class Runner(object):
def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99):
self.env = env
self.model = model
self.n_in, = env.observation_space.shape
nenv = env.num_envs
self.nenv = nenv
self.batch_ob_shape = (nenv*nsteps, self.n_in*nstack)
self.obs = np.zeros((nenv, self.n_in*nstack))
obs = env.reset()
self.update_obs(obs)
self.gamma = gamma
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(nenv)]
def update_obs(self, obs):
# Do frame-stacking here instead of the FrameStack wrapper to reduce
# IPC overhead
self.obs = np.roll(self.obs, shift=-self.n_in, axis=1)
self.obs[:, -self.n_in:] = obs[:, :self.n_in]
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [],[],[],[],[]
mb_states = self.states
for n in range(self.nsteps):
actions, values, states = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
obs, rewards, dones, _ = self.env.step(actions)
self.states = states
self.dones = dones
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n]*0
self.update_obs(obs)
mb_rewards.append(rewards)
mb_dones.append(self.dones)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
last_values = self.model.value(self.obs, self.states, self.dones).tolist()
#discount/bootstrap off value fn
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards+[value], dones+[0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
mb_rewards = mb_rewards.flatten()
mb_actions = mb_actions.flatten()
mb_values = mb_values.flatten()
mb_masks = mb_masks.flatten()
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values
class RolloutRunner(Runner):
def __init__(self, env, model, nsteps=5, nstack=4, gamma=0.99):
super().__init__(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma)
self._num_rollouts = 0
self._num_steps = 0
self.rewards = []
def run(self):
# reset env
self.obs = np.zeros(self.obs.shape)
obs = self.env.reset()
self.update_obs(obs)
# run env until all threads finish
episode_over = [-1 for i in range(self.nenv)]
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_u1, mb_u2 = [], [], [], [], [], [], []
mb_states = self.states
step = 0
while not all([e >= 0 for e in episode_over]):
actions, u1, u2, values, states = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
mb_u1.append(u1)
mb_u2.append(u2)
obs, rewards, dones, _ = self.env.step(actions)
self.states = states
self.dones = dones
for n, done in enumerate(dones):
if done:
self.obs[n] = self.obs[n] * 0
if episode_over[n] == -1:
episode_over[n] = step
self.update_obs(obs)
mb_rewards.append(rewards)
step += 1
mb_dones.append(self.dones)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs).swapaxes(1, 0)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
mb_u1 = np.asarray(mb_u1, dtype=np.float32).swapaxes(1, 0)
mb_u2 = np.asarray(mb_u2, dtype=np.float32).swapaxes(1, 0)
# discount/bootstrap off value fn
_obs, _rewards, _actions, _values, _masks, _u1, _u2 = [], [], [], [], [], [], []
for n, (obs, rewards, actions, values, dones, masks, u1, u2) in enumerate(zip(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_masks, mb_u1, mb_u2)):
# pull out data
rewards = rewards.tolist()
self.rewards.append(sum(rewards))
actions = actions.tolist()
values = values.tolist()
dones = dones.tolist()
masks = masks.tolist()
u1, u2 = u1.tolist(), u2.tolist()
# get length of this episode
episode_length = episode_over[n]+1
# crop out only played experience
obs = obs[:episode_length]
rewards = rewards[:episode_length]
actions = actions[:episode_length]
values = values[:episode_length]
dones = dones[:episode_length]
u1 = u1[:episode_length]
u2 = u2[:episode_length]
assert dones[-1] == True
masks = masks[:episode_length]
# discount the rewards
rewards = discount_with_dones(rewards, dones, self.gamma)
_obs.extend(obs)
_rewards.extend(rewards)
_actions.extend(actions)
_values.extend(values)
_masks.extend(masks)
_u1.extend(u1)
_u2.extend(u2)
self.rewards = self.rewards[-100:]
# make numpy
mb_obs = np.asarray(_obs)
mb_rewards = np.asarray(_rewards)
mb_actions = np.asarray(_actions)
mb_values = np.asarray(_values)
mb_masks = np.asarray(_masks)
mb_u1 = np.asarray(_u1)
mb_u2 = np.asarray(_u2)
self._num_rollouts += 1
self._num_steps += len(rewards) * 4 # FRAME STACK
ave_r = np.mean(self.rewards)
#print("Episode {}, Ave R {}".format(self._num_rollouts, ave_r))
logger.record_tabular("ave_r", ave_r)
logger.record_tabular("last_r", self.rewards[-1])
logger.record_tabular("num_rollouts", self._num_rollouts)
logger.record_tabular("l", len(rewards) * 4)
#logger.dump_tabular()
END = False
#print(self._num_steps, len(rewards))
#if self._num_steps > 5000000:
if np.mean(self.rewards) >= 195.:#195.:
#if self._num_rollouts > 1000:
logger.record_tabular("finished_in", self._num_rollouts)
logger.record_tabular("total_steps", self._num_steps)
logger.dump_tabular()
END = True
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, mb_u1, mb_u2, END
def learn(policy, env, seed, nsteps=5, nstack=1, total_timesteps=int(80e6),
ent_coef=0.01, max_grad_norm=0.5,
lrschedule='linear', epsilon=1e-5, alpha=0.99, gamma=0.99,
log_interval=100, logdir=None, bootstrap=False, args=None):
tf.reset_default_graph()
set_global_seeds(seed)
lr = args.lr
vf_coef = args.vf_coef
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
num_procs = len(env.remotes) # HACK
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack, num_procs=num_procs, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, lr=lr, alpha=alpha, epsilon=epsilon, total_timesteps=total_timesteps, lrschedule=lrschedule, logdir=logdir)
runner = RolloutRunner(env, model, nsteps=nsteps, nstack=nstack, gamma=gamma)
nbatch = nenvs*nsteps
tstart = time.time()
for update in range(1, total_timesteps//nbatch+1):
if True: #update % log_interval == 0 or update == 1:
obs, states, rewards, masks, actions, values, u1, u2, END = runner.run()
if END:
break
policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values, summary=False)
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
ev = explained_variance(values, rewards)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(ev))
logger.record_tabular("log_variance", lv)
logger.dump_tabular()
else:
obs, states, rewards, masks, actions, values, u1, u2, END = runner.run()
if END:
break
policy_loss, value_loss, policy_entropy, lv = model.train(obs, states, rewards, masks, u1, u2, values)
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
env.close()
if __name__ == '__main__':
main()
| 1.710938 | 2 |
reversestring/test_.py | technolingo/AlgoStructuresPy | 0 | 12794643 | <reponame>technolingo/AlgoStructuresPy<filename>reversestring/test_.py
from .index import reverse_string
def test_reverse_string():
assert reverse_string('heLLo woRld') == 'dlRow oLLeh'
| 2.375 | 2 |
setup.py | msimms/ZwoReader | 0 | 12794644 | <reponame>msimms/ZwoReader
from setuptools import setup, find_packages
__version__ = '1.0.0'
setup(
name='ZwoReader',
version=__version__,
description='.',
url='https://github.com/msimms/ZwoReader',
packages=[],
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=[],
python_requires='>=2.6'
)
| 1.179688 | 1 |
arxivdigest/core/scraper/scrape_metadata.py | iai-group/arXivDigest | 13 | 12794645 | # -*- coding: utf-8 -*-
"""This module contains the the methods related to scraping articles from arXiv.
To only scrape the metadata from the articles in the rss-stream use the
harvestMetaDataRss method.
It's also possible to scrape articles between any two dates,
to accomplish this use the get_records_by_date method."""
import datetime
import requests
__author__ = '<NAME> and <NAME>'
__copyright__ = 'Copyright 2020, The arXivDigest project'
import urllib
import xml.etree.ElementTree as ET
from time import sleep
from urllib.request import urlopen
import feedparser
OAI = '{http://www.openarchives.org/OAI/2.0/}'
ARXIV = '{http://arxiv.org/OAI/arXiv/}'
def prepare_record(record):
"""Formats the data to a dictionary structure that is easy to work with."""
if record.find(OAI + 'header').get('status', None) == 'deleted':
return {}
info = record.find(OAI + 'metadata').find(ARXIV + 'arXiv')
result = {'title': info.find(ARXIV + 'title').text.replace('\n', ' '),
'description': info.find(ARXIV + 'abstract').text.replace('\n', ' '),
'id': info.find(ARXIV + 'id').text,
'categories': info.find(ARXIV + 'categories').text.split(),
}
doi = info.find(ARXIV + 'doi')
comments = info.find(ARXIV + 'comments')
licenses = info.find(ARXIV + 'license')
journal = info.find(ARXIV + 'journal-ref')
# check that element is not None before trying to access the text
result['doi'] = doi.text if doi is not None else None
result['comments'] = comments.text if comments is not None else None
result['license'] = licenses.text if licenses is not None else None
result['journal'] = journal.text if journal is not None else None
authors = []
for author in info.find(ARXIV + 'authors'):
a = {}
firstname = author.find(ARXIV + 'forenames')
a['firstname'] = '' if firstname is None else firstname.text
a['lastname'] = author.find(ARXIV + 'keyname').text
a['affiliations'] = []
for affiliation in author.findall(ARXIV + 'affiliation'):
a['affiliations'].append(affiliation.text)
authors.append(a)
result['authors'] = authors
datestamp = record.find(OAI + 'header').find(OAI + 'datestamp')
result['datestamp'] = datestamp.text
return result
def get_records_by_date(start_date, end_date=None):
"""Scrapes the OAI-api for articles submitted from the n previous days."""
base_url = 'http://export.arxiv.org/oai2'
params = {'verb': 'ListRecords',
'metadataPrefix': 'arXiv',
'from': start_date}
if end_date:
params['until'] = end_date
result = {}
while True:
r = requests.get(base_url, params=params)
print('Fetching', r.url)
if r.status_code == 503:
time_out = int(r.headers.get('retry-after', 5))
msg = '503: Have to wait before further requests. Retrying in {} seconds.'
print(msg.format(time_out))
sleep(time_out)
continue
# generate elementtree from responsedata
root = ET.fromstring(r.text)
# parse the response and add it to result
for record in root.find(OAI + 'ListRecords').findall(OAI + 'record'):
element = prepare_record(record)
if element:
result[element['id']] = element
# If the xmlfile contains more than 1000 articles arXiv will add a
# resumptiontoken to the response, if we already have all the articles
# there will be no resumptiontoken and we can safely break
token = root.find(OAI + 'ListRecords').find(OAI + 'resumptionToken')
if token is None or token.text is None:
break
# update url to use resumptiontoken in the next request
params = {'verb': 'ListRecords', 'resumptionToken': token.text}
return result
def get_record(id):
"""Gets metadata for a single record."""
url = 'http://export.arxiv.org/oai2?verb=GetRecord&identifier=oai:arXiv.org:%s&metadataPrefix=arXiv' % id
print('Fetching', url)
response = urlopen(url)
root = ET.fromstring(response.read())
record = root.find(OAI + 'GetRecord').find(OAI + 'record')
return prepare_record(record)
def get_categories():
"""Returns a dict of all the main categories available with info."""
url = 'http://export.arxiv.org/oai2?verb=ListSets'
print('fetching', url)
while True:
try:
response = urlopen(url)
except urllib.error.HTTPError as e:
if e.code == 503:
timeOut = int(e.headers.get('retry-after', 30))
print(
'503: Have to wait before further requests. Retrying in %d seconds.' % timeOut)
sleep(timeOut)
continue
else:
raise
break
root = ET.fromstring(response.read())
categories = root.find(OAI + 'ListSets').findall(OAI + 'set')
result = {}
for category in categories:
categoryID = category.find(OAI + 'setSpec').text
categoryName = category.find(OAI + 'setName').text
categoryInfo = {'name': categoryName}
categoryID = categoryID.split(':')
if len(categoryID) > 1:
categoryInfo['masterCategory'] = categoryID[0].capitalize()
result[categoryID[-1]] = categoryInfo
return result
def get_id_from_rss():
"""Returns a set of all the article-ids found in the rss stream, which will
be approximately the same as the articles uploaded the previous day."""
rssUrl = 'http://export.arxiv.org/rss/'
result = set()
for category in get_categories():
print('Fetching IDs from the %s rss-feed' % category)
feed = feedparser.parse(rssUrl + category)
for entry in feed['entries']:
id = entry['link'].split('abs/')[1]
result.add(id)
return result
def harvest_metadata_rss():
"""This function will return the metadata from all the articles present
in any of the arXiv rss-streams."""
rss_ids = get_id_from_rss()
yesterday = datetime.datetime.utcnow().date() - datetime.timedelta(days=1)
articles = get_records_by_date(yesterday)
result = {}
for item in rss_ids:
if item not in articles: # download missing articles, if any
element = get_record(item)
result[element['id']] = element
else:
result[item] = articles[item]
return result
| 2.96875 | 3 |
enumeracion.py | DannielF/Python | 1 | 12794646 | objetive = int(input('Choose a whole number: '))
answer = 0
while answer**2 < objetive:
answer += 1
if answer**2 == objetive:
print(f'Square root of {objetive} is {answer}')
else:
print(f'{objetive} it doesn´t have square root exact') | 4.1875 | 4 |
cases/Diginetica/diginetica_baseline.py | sparsh-ai/reco-session | 0 | 12794647 | # This is sample baseline for CIKM Personalization Cup 2016
# by <NAME> & <NAME>
import numpy as np
import pandas as pd
import datetime
start_time = datetime.datetime.now()
print("Running baseline. Now it's", start_time.isoformat())
# Loading queries (assuming data placed in <dataset-train/>
queries = pd.read_csv('dataset-train/train-queries.csv', sep=';')[['queryId', 'items', 'is.test']]
print('Total queries', len(queries))
# Leaving only test queries (the ones which items we have to sort)
queries = queries[queries['is.test'] == True][['queryId', 'items']]
print('Test queries', len(queries))
queries.reset_index(inplace=True)
queries.drop(['index'], axis=1, inplace=True)
# Loading item views; taking itemId column
item_views = pd.read_csv('dataset-train/train-item-views.csv', sep=';')[['itemId']]
print('Item views', len(item_views))
# Loading clicks; taking itemId column
clicks = pd.read_csv('dataset-train/train-clicks.csv', sep=';')[['itemId']]
print('Clicks', len(clicks))
# Loading purchases; taking itemId column
purchases = pd.read_csv('dataset-train/train-purchases.csv', sep=';')[['itemId']]
print('Purchases', len(purchases))
# Calculating popularity as [Amount of views] * 1 + Amount of clicks * 2 + [Amount of purchases] * 3
print('Scoring popularity for each item ...')
prod_pop = {}
for cost, container in enumerate([item_views, clicks, purchases]):
for prod in container.values:
product = str(prod[0])
if product not in prod_pop:
prod_pop[product] = cost
else:
prod_pop[product] += cost
print('Popularity scored for', len(prod_pop), 'products')
# For each query:
# parse items (comma-separated values in last column)
# sort them by score;
# write them to the submission file.
# This is longest part; it usually takes around 5 minutes.
print('Sorting items per query by popularity...')
answers = []
step = int(len(queries) / 20)
with open('submission.txt', 'w+') as submission:
for i, q in enumerate(queries.values):
# Fancy progressbar
if i % step == 0:
print(5 * i / step, '%...')
# Splitting last column which contains comma-separated items
items = q[-1].split(',')
# Getting scores for each item. Also, inverting scores here, so we can use argsort
items_scores = list(map(lambda x: -prod_pop.get(x, 0), items))
# Sorting items using items_scores order permutation
sorted_items = np.array(items)[np.array(items_scores).argsort()]
# Squashing items together
s = ','.join(sorted_items)
# and writing them to submission
submission.write(str(q[0]) + " " + s + "\n")
end_time = datetime.datetime.now()
print("Done. Now it's ", end_time.isoformat())
print("Calculated baseline in ", (end_time - start_time).seconds, " seconds")
| 2.484375 | 2 |
dgmvae/losses/discrete_kl.py | rnagumo/dgm_vae | 5 | 12794648 |
"""Discrete KL divergence
KL loss for Categorical and RelaxedCategorical
ref) KL divergence in PyTorch
https://pytorch.org/docs/stable/_modules/torch/distributions/kl.html#kl_divergence
"""
from typing import Optional, List, Dict, Tuple
import sympy
import torch
from torch._six import inf
from pixyz.distributions import Distribution
from pixyz.losses.losses import Loss
from pixyz.utils import get_dict_values
def _kl_categorical_categorical(p: torch.distributions.Distribution,
q: torch.distributions.Distribution
) -> torch.Tensor:
"""KL divergence between categorical and categorical, KL(p||q).
Args:
p (torch.distributions.Distribution): PyTorch Distribution class.
q (torch.distributions.Distribution): PyTorch Distribution class.
Returns:
t (torch.Tensor): Calculated KL divergence.
"""
t = p.probs * (p.logits - q.logits)
t[(q.probs == 0).expand_as(t)] = inf
t[(p.probs == 0).expand_as(t)] = 0
return t.sum(-1)
class CategoricalKullbackLeibler(Loss):
"""Kullback Leibler divergence for categorical distributions.
Args:
p (pixyz.distributions.distributions.Distribution): Distribution class.
q (pixyz.distributions.distributions.Distribution): Distribution class.
input_var (list, optional): Input variable name.
dim (int, optional): Aggregate dimension.
"""
def __init__(self,
p: Distribution,
q: Distribution,
input_var: Optional[List[str]] = None,
dim: Optional[int] = None):
self.dim = dim
super().__init__(p, q, input_var)
@property
def _symbol(self):
return sympy.Symbol("D_{{KL}} \\left[{}||{} \\right]".format(
self.p.prob_text, self.q.prob_text))
def _get_eval(self,
x_dict: Dict[str, torch.Tensor],
**kwargs) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if (not hasattr(self.p, 'distribution_torch_class')) \
or (not hasattr(self.q, 'distribution_torch_class')):
raise ValueError("Divergence between these two distributions "
"cannot be evaluated, got %s and %s."
% (self.p.distribution_name,
self.q.distribution_name))
input_dict = get_dict_values(x_dict, self.p.input_var, True)
self.p.set_dist(input_dict)
input_dict = get_dict_values(x_dict, self.q.input_var, True)
self.q.set_dist(input_dict)
divergence = _kl_categorical_categorical(self.p.dist, self.q.dist)
if self.dim is not None:
divergence = torch.sum(divergence, dim=self.dim)
return divergence, x_dict
dim_list = list(torch.arange(divergence.dim()))
divergence = torch.sum(divergence, dim=dim_list[1:])
return divergence, x_dict
| 3.15625 | 3 |
pe8.py | ChrisCalderon/project-euler | 1 | 12794649 | import sys
def yield_next_product(ints, n):
# ints is a string
while len(ints) > n:
next_sequence = ints[:n]
zero = next_sequence.find('0')
if zero > -1:
ints = ints[zero + 1:]
else:
yield reduce(lambda a, b: a*int(b), next_sequence, 1)
ints = ints[1:]
def main():
n = int(sys.argv[1])
ints = ''.join(open('pe8.txt').read().split('\n'))
print max(yield_next_product(ints, n))
if __name__ == '__main__':
main()
| 3.734375 | 4 |
services/gevent-server.py | kevinmcguinness/axes-research | 1 | 12794650 | <reponame>kevinmcguinness/axes-research
#!/usr/bin/env python
"""
Django gevent server for axes research
"""
import os, sys
def setup_paths():
import site
old_sys_path = list(sys.path)
base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
py_version = '%d.%d' % (sys.version_info[0], sys.version_info[1])
# Setup virtual environment
venv_path = os.path.join(base_dir,
'venv/lib/python%s/site-packages' % py_version)
print >> sys.stderr, 'virtual env path:', venv_path
site.addsitedir(venv_path)
# Setup python path
sys.path.append(base_dir)
# Reorder sys path
new_sys_path = [p for p in sys.path if p not in old_sys_path]
for item in new_sys_path:
sys.path.remove(item)
sys.path[:0] = new_sys_path
def setup_gevent():
from gevent import monkey
monkey.patch_all()
def setup_app(debug):
if debug:
os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'axesresearch.settings.production'
def start_server(port, host=''):
from gevent.wsgi import WSGIServer
from django.core.handlers.wsgi import WSGIHandler
WSGIServer((host, port), WSGIHandler()).serve_forever()
def parse_args():
import argparse
p = argparse.ArgumentParser(description=__doc__.strip())
p.add_argument('-p', '--port', type=int, default=8088, help='TCP port')
p.add_argument('-d', '--debug', action='store_true')
return p.parse_args()
def main():
setup_paths()
setup_gevent()
args = parse_args()
setup_app(args.debug)
servertype = 'debug' if args.debug else 'production'
print 'Starting', servertype, 'server on port', args.port
start_server(args.port)
if __name__ == '__main__':
main()
| 2.03125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.