max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
setup.py | pnxenopoulos/soccer-data-gen | 0 | 2400 | from setuptools import setup, find_packages
setup(
name="soccergen",
version="0.1",
packages=find_packages(),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=["gfootball>=2.8",],
# metadata to display on PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Soccer trajectory and event data generation",
keywords="soccer data-generation foootball",
url="https://github.com/pnxenopoulos/soccer-data-gen", # project home page, if any
project_urls={
"Issues": "https://github.com/pnxenopoulos/soccer-data-gen/issues",
"Documentation": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/",
"Github": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/",
},
classifiers=["License :: OSI Approved :: MIT License"],
)
| 1.390625 | 1 |
metaspace/engine/sm/engine/tests/test_fdr.py | METASPACE2020/METASPACE | 0 | 2401 | from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import format_modifiers
FDR_CONFIG = {'decoy_sample_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_target_decoy_df = pd.DataFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(target_formulas=['H2O'])
assert_frame_equal(
fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_df = pd.DataFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).assign(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_df = pd.DataFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_ions():
formulas = ['H2O', 'C5H2OH']
target_adducts = ['+H', '+Na']
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)
< len(ions)
<= len(formulas) * len(target_adducts) * decoy_sample_size
+ len(formulas) * len(target_adducts)
)
target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
target_adducts = ['+H', '+Na', '[M]+']
target_modifiers = [
format_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)
]
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
min_count = len(formulas) * len(target_modifiers)
max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)
assert min_count < len(ions) <= max_count
target_ions = list(product(formulas, target_modifiers))
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_run_fdr_ranking():
target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])
decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1])
n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])
expected_fdr = n_decoys / n_targets
expected_fdr_ros = (n_decoys + 1) / (n_targets + 1)
expected_fdr_mono = pd.Series(
[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]
)
fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False)
fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False)
fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True)
assert np.isclose(fdr, expected_fdr).all()
assert np.isclose(fdr_ros, expected_fdr_ros).all()
assert np.isclose(fdr_mono, expected_fdr_mono).all()
| 1.9375 | 2 |
tests/__init__.py | acarl005/plotille | 2 | 2402 | from logging import getLogger
getLogger('flake8').propagate = False
| 1.234375 | 1 |
umigame/nlp/labelling.py | penguinwang96825/Umigame | 0 | 2403 | <reponame>penguinwang96825/Umigame
import math
import numpy as np
import pandas as pd
def fixed_time_horizon(df, column='close', lookback=20):
"""
Fixed-time Horizon
As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method.
Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing
financial time series for machine learning.
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
lookahead: str
The number of days to look ahead.
References
----------
1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html
2. https://arxiv.org/pdf/1603.08604.pdf
3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/
4. De Prado, Advances in financial machine learning, 2018
5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017
"""
price = df[column]
label = (price.shift(-lookback) / price > 1).astype(int)
return label
def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True):
"""
Triple Barrier
The idea is to consider the full dynamics of a trading strategy and not a simple performance proxy.
The rationale for this extension is that often money managers implement P&L triggers that cash in
when gains are sufficient or opt out to stop their losses. Upon inception of the strategy,
three barriers are fixed (De Prado, 2018).
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
ub: float
It stands for upper bound, e.g. 0.07 is a 7% profit taking.
lb: float
It stands for lower bound, e.g. 0.03 is a 3% stop loss.
lookback: str
Maximum holding time.
References
----------
1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/
2. http://www.mlfactor.com/Data.html#the-triple-barrier-method
3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/
4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e
5. De Prado, Advances in financial machine learning, 2018
"""
ub = 1 + ub
lb = 1- lb
def end_price(s):
return np.append(s[(s / s[0] > ub) | (s / s[0] < lb)], s[-1])[0]/s[0]
r = np.array(range(lookback))
def end_time(s):
return np.append(r[(s / s[0] > ub) | (s / s[0] < lb)], lookback-1)[0]
price = df[column]
p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1)
t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1)
t = pd.Series(
[t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT')
for i, k in enumerate(t)], index=t.index
).dropna()
label = pd.Series(0, p.index)
label.loc[p > ub] = 1
label.loc[p < lb] = -1
if binary_classification:
label = np.where(label == 1, 1, 0)
return pd.Series(label, index=price.index)
def get_continuous_trading_signals(df, column='close', lookahead=5):
"""
Continuous Trading Signal
A hybrid stock trading framework integrating technical analysis with machine learning techniques.
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
lookahead: str
The number of days to look ahead.
References
----------
1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf
2. Dash and Dash, A hybrid stock trading framework integrating technical analysis with machine learning techniques, 2016
"""
price = df.data[column]
OTr = []
trends = []
for idx in range(len(price)-lookahead+1):
arr_window = price[idx:(idx+lookahead)]
if price[idx+lookahead-1] > price[idx]:
coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))
y_t = coef * 0.5 + 0.5
elif price[idx+lookahead-1] <= price[idx]:
coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))
y_t = coef * 0.5
OTr.append(y_t)
OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr))))
trends = (OTr >= np.mean(OTr)).astype(int)
return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index) | 3.71875 | 4 |
mayan/apps/converter/api.py | Dave360-crypto/mayan-edms | 3 | 2404 | from __future__ import absolute_import
import hashlib
import logging
import os
from django.utils.encoding import smart_str
from common.conf.settings import TEMPORARY_DIRECTORY
from common.utils import fs_cleanup
from .exceptions import OfficeConversionError, UnknownFileFormat
from .literals import (DEFAULT_PAGE_NUMBER,
DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT)
from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE,
TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR,
FILE_FORMATS)
from .runtime import backend, office_converter
HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest()
logger = logging.getLogger(__name__)
def cache_cleanup(input_filepath, *args, **kwargs):
try:
os.remove(create_image_cache_filename(input_filepath, *args, **kwargs))
except OSError:
pass
def create_image_cache_filename(input_filepath, *args, **kwargs):
if input_filepath:
hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)]))
return os.path.join(TEMPORARY_DIRECTORY, hash_value)
else:
return None
def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs):
size = kwargs.get('size')
file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT)
zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL)
rotation = kwargs.get('rotation', DEFAULT_ROTATION)
page = kwargs.get('page', DEFAULT_PAGE_NUMBER)
transformations = kwargs.get('transformations', [])
if transformations is None:
transformations = []
if output_filepath is None:
output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs)
if os.path.exists(output_filepath):
return output_filepath
if office_converter:
try:
office_converter.convert(input_filepath, mimetype=mimetype)
if office_converter.exists:
input_filepath = office_converter.output_filepath
mimetype = 'application/pdf'
else:
# Recycle the already detected mimetype
mimetype = office_converter.mimetype
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
if size:
transformations.append(
{
'transformation': TRANSFORMATION_RESIZE,
'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR)))
}
)
if zoom != 100:
transformations.append(
{
'transformation': TRANSFORMATION_ZOOM,
'arguments': {'percent': zoom}
}
)
if rotation != 0 and rotation != 360:
transformations.append(
{
'transformation': TRANSFORMATION_ROTATE,
'arguments': {'degrees': rotation}
}
)
try:
backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype)
finally:
if cleanup_files:
fs_cleanup(input_filepath)
return output_filepath
def get_page_count(input_filepath):
logger.debug('office_converter: %s' % office_converter)
if office_converter:
try:
office_converter.convert(input_filepath)
logger.debug('office_converter.exists: %s' % office_converter.exists)
if office_converter.exists:
input_filepath = office_converter.output_filepath
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
return backend.get_page_count(input_filepath)
def get_available_transformations_choices():
result = []
for transformation in backend.get_available_transformations():
result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label']))
return result
def get_format_list():
return [(format, FILE_FORMATS.get(format, u'')) for format in backend.get_format_list()]
| 1.914063 | 2 |
LogisticRegression/learn.py | ValYouW/DeepLearningCourse | 0 | 2405 | <reponame>ValYouW/DeepLearningCourse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import utils
def plot_data(x_mat, y, db_x, db_y):
plt.figure()
plt.title('Data')
admitted = (y == 1).flatten()
rejected = (y == 0).flatten()
# plot decision boundary
plt.plot(db_x, db_y)
# plot admitted
plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+')
# plot rejected
plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o')
plt.xlabel('exam 1 score')
plt.ylabel('exam 2 score')
plt.legend(['boundary', 'admitted', 'rejected'])
def main():
print('Loading dataset...')
# data is: exam 1 score, exam 2 score, bool whether admitted
frame = pd.read_csv('ex2data1.csv', header=None)
data = frame.values
x_mat = data[:, 0:2] # exam scores
y = data[:, 2:3] # admitted or not
# normalize input (input has large values which causes sigmoid to always be 1 or 0)
x_mean = np.mean(x_mat, axis=0)
x_std = np.std(x_mat, axis=0)
x_norm = (x_mat - x_mean) / x_std
# add intercept
x_norm = np.insert(x_norm, 0, 1, axis=1)
# Learn model
print('starting to learn...')
(loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1)
print('Final loss %s' % loss[-1])
print('Final theta \n%s' % theta)
# predict for student
joe = np.array([[45, 85]])
joe_norm = (joe - x_mean) / x_std
joe_norm = np.insert(joe_norm, 0, 1, axis=1)
p = utils.sigmoid(joe_norm.dot(theta))
print('Student with grades %s and %s has admission probability: %s' % (45, 85, p[0, 0]))
# Predict on train set
prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5)
actual = (y == 1)
predict_success = np.sum(prediction == actual)
print('Model evaluation on training set has success of %s/%s' % (predict_success, y.shape[0]))
# calc decision boundary
# The decision boundary is the threshold line that separates true/false predictions,
# this means that on this line the prediction is exactly 0.5, meaning:
# p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0
# so our line equation is: theta0 + theta1*x1 + theta2*x2 = 0
# x2 = -theta0 / theta2 - (theta1/theta2)*x1
theta = theta.flatten()
# calc 2 points on the line
plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])])
plot_y = -1 * (theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x
# denormalize the points
plot_x = plot_x * x_std[0] + x_mean[0]
plot_y = plot_y * x_std[1] + x_mean[1]
plot_data(x_mat, y, plot_x, plot_y)
utils.plot_loss(loss)
plt.show()
if __name__ == '__main__':
main()
| 3.71875 | 4 |
ignite/handlers/time_profilers.py | iamhardikat11/ignite | 4,119 | 2406 | import functools
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if len(data) > 1:
out += [
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float]
min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
mean = "None" # type: Union[str, float]
std = "None" # type: Union[str, float]
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
| 2.515625 | 3 |
bellmanford.py | asmodehn/aiokraken | 0 | 2407 | <gh_stars>0
"""
Bellman Ford Arbitrage implementation over websocket API.
"""
from __future__ import annotations
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
from math import log
import pandas as pd
import numpy as np
import asyncio
import typing
from aiokraken.model.assetpair import AssetPair
from aiokraken.rest import AssetPairs, Assets
from aiokraken.model.asset import Asset
from aiokraken.rest.client import RestClient
from aiokraken.websockets.publicapi import ticker
import networkx as nx
client = RestClient()
async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix):
# For required pairs, get ticket updates
if isinstance(pairs, AssetPairs): # TODO : we need to unify iterable of pairs somehow...
properpairs = pairs
pairs = [p for p in pairs.values()]
else:
properpairs = AssetPairs({p.wsname: p for p in pairs})
tkrs = await client.ticker(pairs=[p for p in pairs])
# TODO : build price matrix
for p, tk in tkrs.items():
# retrieve the actual pair
pair = properpairs[p]
fee = pair.fees[0].get('fee')
# TODO : pick the right fee depending on total traded volume !
await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee)
# TODO : 2 levels :
# - slow updates with wide list of pairs and potential interest (no fees - small data for quick compute)
# - websockets with potential arbitrage (including fees - detailed data & precise compute)
async for upd in ticker(pairs=pairs, restclient=client):
print(f"wss ==> tick: {upd}")
# update pricematrix
base = upd.pairname.base
quote = upd.pairname.quote
fee = properpairs[upd.pairname].fees[0].get('fee')
await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee)
class PriceMatrix:
# Note This matrix is square
# since we want to do arbitrage and find cycles...
df: pd.DataFrame
# we also need to be careful that only one writer can modify data at a time...
wlock: asyncio.Lock
assets: typing.Optional[Assets]
def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]):
self.wlock = asyncio.Lock()
if isinstance(assets, Assets):
assets = [a for a in assets.values()]
self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c in assets}, columns=[c.restname for c in assets], dtype='float64')
self.assets = None
async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal):
if self.assets is None: # retrieve assets for filtering calls params, only once.
self.assets = await client.retrieve_assets()
async with self.wlock: # careful with concurrent control.
if not isinstance(base, Asset):
base = self.assets[base].restname
if not isinstance(quote, Asset):
quote = self.assets[quote].restname
# These are done with decimal, but stored as numpy floats for faster compute
self.df[quote][base] = bid_price * ((100 - fee_pct) /100) # bid price to get: quote_curr -- (buy_price - fee) --> base_curr
self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask price to get: base_curr -- (sell_price - fee) --> quote_curr
def __getitem__(self, item):
if item not in self.df.columns:
raise KeyError(f"{item} not found")
if item not in self.df:
return pd.Series(dtype=pd.dtype('decimal'))
return self.df[item]
def __len__(self):
return len(self.df.columns)
def __str__(self):
return self.df.to_string()
def neglog(self):
if not self.assets:
return False
newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns])
# copy all values and take -log()
for c in self.df.columns:
# TODO : fix this : is it on row, or columns ? which is best ??
newpm.df[c] = np.negative(np.log(self.df[c]))
return newpm
def to_graph(self):
G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph)
# from bokeh.io import output_file, show
# from bokeh.plotting import figure, from_networkx
#
# plot = figure(title="Networkx Integration Demonstration", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1),
# tools="", toolbar_location=None)
#
# graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0))
# plot.renderers.append(graph)
#
# output_file("networkx_graph.html")
# show(plot)
return G
def test_pricematrix_mapping():
# testing with string for simplicity for now
pm = PriceMatrix(["EUR", "BTC"])
pm["EUR"]["BTC"] = Decimal(1.234)
pm["BTC"]["EUR"] = Decimal(4.321)
assert pm["EUR"]["BTC"] == Decimal(1.234)
assert pm["BTC"]["EUR"] == Decimal(4.321)
async def arbiter(user_assets):
assets = await client.retrieve_assets()
proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets})
assetpairs = await client.retrieve_assetpairs()
proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values()
if p.wsname is not None and (
p.base in proper_userassets or p.quote in proper_userassets
)})
# retrieving widely related assets
related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values())
proper_related_assets = Assets({a.restname: a for a in related_assets})
pmtx = PriceMatrix(assets=proper_related_assets)
# running ticker updates in background
bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx))
try:
# observe pricematrix changes
while True:
# TODO : efficient TUI lib !
# print(pmtx)
# pricegraph = pmtx.to_graph() # display...
neglog = pmtx.neglog()
if neglog:
negcycle = bellmanford(neglog)
if len(negcycle):
amnt = 1 # arbitrary starting amount
pred = negcycle[-1]
dscr = f"{amnt} {pred}"
for cn in reversed(negcycle[:-1]):
amnt = amnt * pmtx[pred][cn]
pred = cn
dscr = dscr + f" -> {amnt} {pred}"
print(f"ARBITRAGE POSSIBLE: {dscr}")
# TODO : from these we can extract market making opportunities ??
# Another way :
# negloggraph = neglog.to_graph()
#
# negcycle = list()
#
# if nx.negative_edge_cycle(negloggraph):
# # find it !
# print("NEGATIVE CYCLE FOUND !")
#
# # Now find it
# print(f"computing cycles... {datetime.now()}")
#
# for cycle in nx.simple_cycles(negloggraph):
# # for cycle in nx.cycle_basis(negloggraph): # NOT implemented !
# # find negative weight sum (cycle need to be more than one node)
# if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0:
# print(f"Found one: {cycle}")
# negcycle.append(cycle)
# print(negcycle)
# print(f"computing cycles DONE ! {datetime.now()}")
await asyncio.sleep(5)
finally:
# in every case cancel the background task now
bgtsk.cancel()
# TODO: react !
def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'):
n = len(pmatrix_neglog)
min_dist = {source: 0}
min_pred = {}
# Relax edges |V - 1| times
for i in range(n - 1): # iterations
for v in pmatrix_neglog.df.columns: # vertex source
if v in min_dist.keys(): # otherwise distance infinite until we know it...
for w in pmatrix_neglog.df.columns: # vertex target
if w not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
min_dist[w] = min_dist[v] + pmatrix_neglog[v][w]
min_pred[w] = v
# If we can still relax edges, then we have a negative cycle
for v in pmatrix_neglog.df.columns:
if v in min_dist.keys(): # otherwise node is not yet relevant here
for w in pmatrix_neglog.df.columns:
if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
# print(f"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}")
path = (w, min_pred[w])
while len(set(path)) == len(path): # while no duplicates, cycle is not complete...
path = (*path, min_pred[path[-1]])
# First cycle retrieved is *likely* (?) to be the minimal one -> the only one we are interested in
return path[path.index(path[-1]):]
return ()
if __name__ == '__main__':
asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
| 2.3125 | 2 |
custom_components/snowtire/__init__.py | borys-kupar/smart-home | 128 | 2408 | #
# Copyright (c) 2020, Andrey "Limych" Khrolenok <<EMAIL>>
# Creative Commons BY-NC-SA 4.0 International Public License
# (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/)
#
"""
The Snowtire binary sensor.
For more details about this platform, please refer to the documentation at
https://github.com/Limych/ha-snowtire/
"""
| 0.949219 | 1 |
tests/test_bayes_classifier.py | manishgit138/pomegranate | 3,019 | 2409 | from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2) | 1.960938 | 2 |
ks_engine/variable_scoring.py | FilippoRanza/ks.py | 2 | 2410 | #! /usr/bin/python
from .solution import Solution
try:
import gurobipy
except ImportError:
print("Gurobi not found: error ignored to allow tests")
def variable_score_factory(sol: Solution, base_kernel: dict, config: dict):
if config.get("VARIABLE_RANKING"):
output = VariableRanking(sol, base_kernel)
else:
output = ReducedCostScoring(sol, base_kernel)
return output
class AbstactVariableScoring:
def __init__(self, solution: Solution, base_kernel: dict):
self.score = {k: 0 if base_kernel[k] else v for k, v in solution.vars.items()}
def get_value(self, var_name):
return self.score[var_name]
def success_update_score(self, curr_kernel, curr_bucket):
raise NotImplementedError
def failure_update_score(self, curr_kernel, curr_bucket):
raise NotImplementedError
class ReducedCostScoring(AbstactVariableScoring):
def success_update_score(self, curr_kernel, curr_bucket):
pass
def failure_update_score(self, curr_kernel, curr_bucket):
pass
class VariableRanking(AbstactVariableScoring):
def cb_update_score(self, name, value):
if value == 0:
self.score[name] += 0.1
else:
self.score[name] -= 0.1
def success_update_score(self, curr_kernel, curr_bucket):
for var in curr_bucket:
if curr_kernel[var]:
self.score[var] -= 15
else:
self.score[var] += 15
def failure_update_score(self, curr_kernel, curr_bucket):
for var in curr_bucket:
if curr_kernel[var]:
self.score[var] += 1
else:
self.score[var] -= 1
def callback_factory(scoring: AbstactVariableScoring):
if isinstance(scoring, VariableRanking):
output = __build_callback__(scoring)
else:
output = None
return output
def __build_callback__(scoring):
def callback(model, where):
if where == gurobipy.GRB.Callback.MIPSOL:
for var in model.getVars():
value = model.cbGetSolution(var)
scoring.cb_update_score(var.varName, value)
return callback
| 2.375 | 2 |
src/fetchcode/vcs/pip/_internal/utils/entrypoints.py | quepop/fetchcode | 7 | 2411 | <gh_stars>1-10
import sys
from fetchcode.vcs.pip._internal.cli.main import main
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
| 1.984375 | 2 |
Support/Make_Documentation.py | bvbohnen/x4-projects | 24 | 2412 | <filename>Support/Make_Documentation.py
'''
Support for generating documentation readmes for the extensions.
Extracts from decorated lua block comments and xml comments.
'''
from pathlib import Path
from lxml import etree
import sys
from itertools import chain
project_dir = Path(__file__).resolve().parents[1]
# Set up an import from the customizer for some text processing.
x4_customizer_dir = str(project_dir.parent / 'X4_Customizer')
if x4_customizer_dir not in sys.path:
sys.path.append(x4_customizer_dir)
from Framework.Make_Documentation import Merge_Lines
#from Framework.Make_Documentation import Get_BB_Text
# Grab the project specifications.
from Release_Specs import release_specs
def Make():
for spec in release_specs:
# Update all of the content.xml files.
spec.Update_Content_Version()
# Make each of the doc files (if any).
# (Note: this function not included in the class methods to avoid
# import issues with the text helper functions below.)
for rel_path, file_list in spec.doc_specs.items():
# Set up the full path.
doc_path = spec.root_path / rel_path
# Get lines for all files.
doc_lines = []
for file_path in file_list:
if file_path.suffix == '.xml':
doc_lines += Get_XML_Cue_Text(file_path)
elif file_path.suffix == '.lua':
doc_lines += Get_Lua_Text(file_path)
with open(doc_path, 'w') as file:
file.write('\n'.join(doc_lines))
return
def Sections_To_Lines(doc_text_sections):
'''
Converts a dict of {section label: text} to a list of text lines,
with labelling and formatting applied.
Expects the input to start with a 'title', then 'overview', then
a series of names of cues or functions.
'''
# Transfer to annotated/indented lines.
functions_started = False
title = ''
ret_text_lines = []
for key, text in doc_text_sections:
# Extract the title and continue; this isn't printed directly.
if key == 'title':
title = text.strip()
continue
# Header gets an 'overview' label.
if key == 'overview':
ret_text_lines += ['', '### {} Overview'.format(title), '']
indent = ''
# Lua functions are in one lump, like overview.
elif key == 'functions':
ret_text_lines += ['', '### {} Functions'.format(title), '']
indent = ''
# Sections may be multiple.
elif key == 'section':
ret_text_lines += ['','']
indent = ''
# Otherwise these are md cues.
else:
indent = ' '
# Stick a label line when starting the function section.
if not functions_started:
functions_started = True
ret_text_lines += ['', '### {} Cues'.format(title), '']
# Bullet the function name.
ret_text_lines.append('* **{}**'.format(key))
# Process the text a bit.
text = Merge_Lines(text)
# Add indents to functions, and break into convenient lines.
text_lines = [indent + line for line in text.splitlines()]
# Record for output.
ret_text_lines += text_lines
return ret_text_lines
def Get_XML_Cue_Text(xml_path):
'''
Returns a list of lines holding the documentation extracted
from a decorated MD xml file.
'''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Read the xml and pick out the cues.
tree = etree.parse(str(xml_path))
root = tree.xpath('/*')[0]
cues = tree.xpath('/*/cues')[0]
# Stride through comments/cues in the list.
# Looking for decorated comments.
for node in chain(root.iterchildren(), cues.iterchildren()):
# Skip non-comments.
# Kinda awkward how lxml checks this (isinstance doesn't work).
if node.tag is not etree.Comment:
continue
# Handle title declarations.
if '@doc-title' in node.text:
label = 'title'
text = node.text.replace('@doc-title','')
elif '@doc-overview' in node.text:
label = 'overview'
text = node.text.replace('@doc-overview','')
elif '@doc-section' in node.text:
label = 'section'
text = node.text.replace('@doc-section','')
elif '@doc-cue' in node.text:
label = node.getnext().get('name')
text = node.text.replace('@doc-cue','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
def Get_Lua_Text(lua_path):
'''
Extract documentation text from a decorated lua file.
'''
text = lua_path.read_text()
ret_text_lines = []
# Extract non-indented comments.
# TODO: maybe regex this.
comment_blocks = []
lua_lines = text.splitlines()
i = 0
while i < len(lua_lines):
this_line = lua_lines[i]
if this_line.startswith('--[['):
# Scan until the closing ]].
these_lines = []
# Record the first line.
these_lines.append(this_line.replace('--[[',''))
i += 1
# Only search to the end of the doc.
while i < len(lua_lines):
next_line = lua_lines[i]
if next_line.startswith(']]'):
# Found the last line; skip it.
break
these_lines.append(next_line)
i += 1
comment_blocks.append('\n'.join(these_lines))
# Check single-line comments after block comments, to avoid
# -- confusion.
elif this_line.startswith('--'):
comment_blocks.append(this_line.replace('--',''))
# Always one increment per loop.
i += 1
# Title to put on label lines.
# Starts blank, filled by decorator.
title = ''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Go through the comments looking for decorators.
for comment in comment_blocks:
# Handle title declarations.
if '@doc-title' in comment:
label = 'title'
text = comment.replace('@doc-title','')
# Text blocks are either overview or cue.
elif '@doc-overview' in comment:
label = 'overview'
text = comment.replace('@doc-overview','')
# For now, all functions are lumped together in one comment.
elif '@doc-functions' in comment:
label = 'functions'
text = comment.replace('@doc-functions','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
#-Removed; generally avoiding putting main docs on the forum.
#def Make_BB_Code(doc_dir, header_lines = []):
# '''
# Turn the ext_dir's readme into a bbcode txt file.
# Output is placed in the release folder.
# '''
# release_dir = project_dir / 'Release'
# if not release_dir.exists():
# release_dir.mkdir()
#
# # Grab the readme contents.
# doc_lines = (doc_dir / 'Readme.md').read_text().splitlines()
# # Generate a bbcode version, prefixing with custom header.
# bb_lines = header_lines + Get_BB_Text(doc_lines)
# (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\n'.join(bb_lines))
# return
if __name__ == '__main__':
Make()
| 2.484375 | 2 |
Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py | alex-dsouza777/Python-Basics | 0 | 2413 | #Addition of two numbers
a = 30
b = 17
print("Sum of a and b is",a + b) | 3.65625 | 4 |
curso 1/04 - caixa de texto/a4.py | andersonssh/aprendendo-pyqt5 | 0 | 2414 | <reponame>andersonssh/aprendendo-pyqt5<filename>curso 1/04 - caixa de texto/a4.py
import sys
from PyQt5.QtWidgets import (QApplication,
QMainWindow,
QPushButton,
QToolTip,
QLabel,
QLineEdit)
from PyQt5 import QtGui
class Janela(QMainWindow):
def __init__(self):
super().__init__()
self.topo = 50
self.esquerda = 50
self.largura = 800
self.altura = 600
self.titulo = 'Primeira janela'
self.gera_labels()
self.gera_botoes()
self.gera_imagens()
self.gera_caixas_de_texto()
def carregar_janela(self):
self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)
self.setWindowTitle(self.titulo)
self.show()
def gera_botoes(self):
# botoes
botao1 = QPushButton('Botao 1', self)
botao1.move(100, 100)
botao1.resize(100, 50)
botao1.setStyleSheet(
'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}')
botao1.clicked.connect(self.b1)
botao2 = QPushButton('Botao 2', self)
botao2.move(300, 100)
botao2.resize(100, 50)
botao2.setStyleSheet(
'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')
botao2.clicked.connect(self.b2)
botao3 = QPushButton('Texto', self)
botao3.move(500, 100)
botao3.resize(100, 50)
botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')
botao3.clicked.connect(self.b3)
def gera_labels(self):
self.l1 = QLabel(self)
self.l1.setText('Clique em um botao')
self.l1.move(50, 50)
self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}')
self.l1.resize(250, 50)
self.l2 = QLabel(self)
self.l2.setText('Digitou: ')
self.l2.move(300, 30)
self.l2.resize(260, 50)
self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}')
def gera_imagens(self):
self.carro = QLabel(self)
self.carro.move(25, 200)
self.carro.resize(450, 337)
self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))
def gera_caixas_de_texto(self):
self.caixa_texto = QLineEdit(self)
self.caixa_texto.move(25, 10)
self.caixa_texto.resize(150, 50)
def b1(self):
# forma 1
self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))
def b2(self, l):
# forma 2
self.carro.setPixmap(QtGui.QPixmap('carro2.jpg'))
def b3(self):
conteudo = self.caixa_texto.text()
self.l2.setText('Digitou: {}'.format(conteudo))
if __name__ == '__main__':
app = QApplication(sys.argv)
janela = Janela()
janela.carregar_janela()
sys.exit(app.exec_()) | 3.1875 | 3 |
pdf2write.py | codeunik/stylus_labs_write_pdf_importer | 0 | 2415 | import base64
import os
import sys
import PyPDF2
svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<rect id="write-doc-background" width="100%" height="100%" fill="#808080"/>
<defs id="write-defs">
<script type="text/writeconfig">
<int name="docFormatVersion" value="2" />
<int name="pageColor" value="-1" />
<int name="pageNum" value="0" />
<int name="ruleColor" value="0" />
<float name="marginLeft" value="0" />
<float name="xOffset" value="-380.701752" />
<float name="xRuling" value="0" />
<float name="yOffset" value="1536.84216" />
<float name="yRuling" value="0" />
</script>
</defs>
'''
pdf_path = sys.argv[1]
pdf = PyPDF2.PdfFileReader(pdf_path, "rb")
img_width = 720
n_pages = pdf.getNumPages() + 1
page = pdf.getPage(0)
width = page.mediaBox.getWidth()
height = page.mediaBox.getHeight()
aspect_ratio = height/width
img_height = int(aspect_ratio * img_width)
os.system('mkdir -p /tmp/pdf2write')
new_page_height = 0
for page in range(n_pages):
print(f"Processing {page}/{n_pages}", end='\r')
os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')
with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f:
base64_data = base64.b64encode(f.read()).decode('utf-8')
tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000">
<g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke">
<rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" />
</g>
<image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/>
</g>
</svg>'''
new_page_height += (img_height+10)
svg += tmp_svg
svg += '''</svg>'''
os.system('rm -rf /tmp/pdf2write')
with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f:
f.write(svg)
os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
| 2.703125 | 3 |
py_headless_daw/project/having_parameters.py | hq9000/py-headless-daw | 22 | 2416 | from typing import Dict, List, cast
from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
class HavingParameters:
def __init__(self):
self._parameters: Dict[str, Parameter] = {}
super().__init__()
def has_parameter(self, name: str) -> bool:
return name in self._parameters
def add_parameter(self,
name: str,
value: ParameterValueType,
param_type: str,
value_range: ParameterRangeType):
if name in self._parameters:
raise Exception('parameter named ' + name + ' already added to this object')
parameter = Parameter(name, value, param_type, value_range)
self._parameters[name] = parameter
def add_parameter_object(self, parameter: Parameter) -> None:
self._parameters[parameter.name] = parameter
def get_parameter(self, name: str) -> Parameter:
for parameter in self.parameters:
if parameter.name == name:
return parameter
list_of_names: List[str] = [p.name for p in self.parameters]
# noinspection PyTypeChecker
available_names: List[str] = cast(List[str], list_of_names)
raise Exception('parameter named ' + name + ' not found. Available: ' + ', '.join(available_names))
def get_parameter_value(self, name: str) -> ParameterValueType:
param = self.get_parameter(name)
return param.value
def get_float_parameter_value(self, name: str) -> float:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_FLOAT:
raise ValueError(f"parameter {name} was expected to be float (error: f009d0ef)")
value = self.get_parameter_value(name)
cast_value = cast(float, value)
return cast_value
def get_enum_parameter_value(self, name: str) -> str:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_ENUM:
raise ValueError(f"parameter {name} was expected to be enum (error: 80a1d180)")
value = self.get_parameter_value(name)
cast_value = cast(str, value)
return cast_value
def set_parameter_value(self, name: str, value: ParameterValueType):
param = self.get_parameter(name)
param.value = value
@property
def parameters(self) -> List[Parameter]:
return list(self._parameters.values())
| 2.703125 | 3 |
wasatch/ROI.py | adiravishankara/Wasatch.PY | 9 | 2417 | ##
# This class encapsulates a Region Of Interest, which may be either horizontal
# (pixels) or vertical (rows/lines).
class ROI:
def __init__(self, start, end):
self.start = start
self.end = end
self.len = end - start + 1
def valid(self):
return self.start >= 0 and self.start < self.end
def crop(self, spectrum):
return spectrum[self.start:self.end+1]
def contains(self, value):
return self.start <= value <= self.end
| 3.109375 | 3 |
examples/python/oled_ssd1327.py | whpenner/upm | 1 | 2418 | #!/usr/bin/python
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Load i2clcd display module
import time, signal, sys
import pyupm_i2clcd as upmLCD
myLCD = upmLCD.SSD1327(0, 0x3C);
logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0,
0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E,
0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0,
0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,
0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83,
0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06,
0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3,
0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E,
0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF,
0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66,
0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3,
0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86,
0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF,
0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83,
0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66,
0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7,
0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE,
0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6,
0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00,
0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3,
0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00,
0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8,
0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E,
0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00,
0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB,
0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3,
0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00,
0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00,
0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40,
0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70,
0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C,
0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48,
0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40,
0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C,
0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48,
0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40,
0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
SeeedLogo = upmLCD.uint8Array(len(logoArr))
for x in range(len(logoArr)):
SeeedLogo.__setitem__(x, logoArr[x])
# If you don't set the display to be white, the seeed logo will appear jagged
myLCD.setGrayLevel(12)
myLCD.draw(SeeedLogo, 96 * 96 / 8);
for i in range(12):
myLCD.setCursor(i, 0)
myLCD.setGrayLevel(i)
myLCD.write('Hello World')
print "Exiting"
| 1.53125 | 2 |
digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py | juansdev/digital_image_processing | 1 | 2419 | from .bernsen import bernsen_thresholding_method
from .bradley_roth import bradley_thresholding_method
from .contrast import contrast_thresholding_method
from .feng import feng_thresholding_method
from .gaussian import threshold_value_gaussian
from .johannsen import johannsen_thresholding_method
from .kapur import kapur_thresholding_method
from .mean import threshold_value_mean
from .minimum_error import minimum_err_thresholding_method
from .niblack import niblack_thresholding_method
from .nick import nick_thresholding_method
from .otsu import otsu_thresholding_method
from .p_tile import p_tile_thresholding_method
from .pun import pun_thresholding_method
from .rosin import rosin_thresholding_method
from .sauvola import sauvola_thresholding_method
from .singh import singh_thresholding_method
from .two_peaks import two_peaks_thresholding_method
from .wolf import wolf_thresholding_method
| 1.21875 | 1 |
data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py | harshp8l/deep-learning-lang-detection | 84 | 2420 | <filename>data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py
import os
from kombu import Queue, Exchange
## Broker settings.
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672')
#BROKER_URL = "amqp://guest:guest@localhost:5672/"
#BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379')
#BROKER_HOST = "localhost"
#BROKER_PORT = 27017
#BROKER_TRANSPORT = 'mongodb'
#BROKER_VHOST = 'celery'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', exchange=Exchange('default'), routing_key='default'),
# Queue('aws_uploads', routing_key='video.uploads'),
)
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_IMPORTS = ('celeryservice.tasks',)
#CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')
## Using the database to store task state and results.
#CELERY_RESULT_BACKEND = "mongodb"
#CELERY_MONGODB_BACKEND_SETTINGS = {
# "host": "localhost",
# "port": 27017,
# "database": "celery",
# "taskmeta_collection": "celery_taskmeta",
#}
| 1.617188 | 2 |
timesheet.py | dgollub/timesheet-google-thingy | 0 | 2421 | # -*- coding: utf-8 -*-
#
#
from __future__ import print_function
import csv
import os
import re
import sys
import arrow
from gsheets import Sheets
CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DEBUG = os.environ.get('DEBUG', "0") == "1"
AS_CSV = os.environ.get('CSV', "0") == "1"
COL_DATE = 0
COL_WEEKDAY = 1
COL_TIME_START = 2
COL_TIME_END = 3
COL_LUNCH = 4
COL_TIME = 5 # includes lunch
COL_TIME_FIXED = 6 # does not include lunch
COL_MOVE = 7
COL_WORK_FROM_HOME = 8
COL_NOTES = 9
COL_TASKS_START = 10
SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"]
SATURDAY = 5
SUNDAY = 6
def calc(hour, half_it=False, split_char = ":"):
parts = str(hour).split(split_char)
try:
local_hours = int(parts[0])
local_minutes = int(parts[1])
if half_it:
local_hours = local_hours / 2
local_minutes = local_minutes / 2
return local_hours, local_minutes
except:
if len(parts) == 1:
try:
return int(parts[0]), 0
except:
return 0, 0
def get_client_secret_filenames():
filename = os.path.join(CURRENT_PATH, "client-secrets.json")
cachefile = os.path.join(CURRENT_PATH, "client-secrets-cache.json")
if not os.path.exists(filename):
filename = os.path.expanduser(os.path.join("~", "client-secrets.json"))
cachefile = os.path.expanduser(os.path.join("~", "client-secrets-cache.json"))
if not os.path.exists(filename):
raise Exception("Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart")
return filename, cachefile
def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')):
print("Opening timesheet for %s ..." % (date))
sheets = api.get(timesheet_url)
sheet = sheets.sheets[0]
print(u"Timesheet [%s] sheet [%s] opened. Accessing cell data ..." % (sheets.title or "???", sheet.title or "???"))
rows = sheet.values()
return rows
def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name):
now = arrow.now()
today = now.format('YYYYMMDD')
try:
other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD')
except arrow.parser.ParserError:
other_date = today
use_date = other_date
rows = load_first_sheet_rows(api, timesheet_url, use_date)
timesheet = get_timesheet_for_date(rows, use_date, user_full_name)
if timesheet:
print("\n\n")
print("Timesheet for %s" % (use_date))
print(timesheet)
print("\n")
else:
print("No entry found for %s" % use_date)
def get_timesheet_for_date(rows, date, user_full_name):
# find the row with the first column that has today's date in it
result_rows = [row for row in rows if row and str(row[COL_DATE]) == date]
if result_rows is None or not result_rows:
return None
if len(result_rows) != 1:
print("More than one entry (%d) found for date %s! Please fix your sheet!" % (len(result_rows), date))
return None
found_row = result_rows[0]
found_index = rows.index(found_row)
start_val = found_row[COL_TIME_START]
end_val = found_row[COL_TIME_END]
duration_val = found_row[COL_TIME_FIXED]
max_cols = len(found_row)
if not start_val:
if start_val in SPECIAL_VALUES:
print("You forgot to add your start time.")
return None
if not end_val:
if end_val in SPECIAL_VALUES:
print("You forgot to add your end time.")
return None
#if max_cols >= COL_NOTES:
# print("No notes/tasks entered yet.")
# return None
def parse_hours(val):
try:
return arrow.get(val, "HH:mm")
except arrow.parser.ParserError:
return arrow.get(val, "H:mm")
start = parse_hours(start_val).format("HH:mm")
end = parse_hours(end_val).format("HH:mm")
duration = str(duration_val)
notes_str = found_row[COL_NOTES]
notes = notes_str.split('\n')
# check the previous Friday entry (if today is not Friday), to see what work from home
# days were were selected
weekday = (found_row[COL_WEEKDAY] or "").lower()
check_start_index = found_index if weekday.startswith("fr") else found_index - 7
check_row = found_row
while (check_start_index < found_index):
check_row = rows[check_start_index]
if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or "").lower().startswith("fr"):
break
check_start_index += 1
is_same_day = None
if check_start_index != found_index:
# print("HA! GOT PREVS FRIDAY.")
is_same_day = False
else:
# print("SAME DAY")
is_same_day = True
wfh = u"" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME]
wfh = wfh.replace("Mon", "Monday")
wfh = wfh.replace("Tue", "Tuesday")
wfh = wfh.replace("Wed", "Wednesday")
wfh = wfh.replace("Thu", "Thursday")
wfh = wfh.replace("Fri", "Friday")
wfh = wfh.replace(", ", ",").replace(",", " and ")
wfh_extra = "Next week" if is_same_day else "This week"
wfh_info = """%s %s""" % (wfh_extra, wfh) if wfh != "" else "all days"
# 2021-01-04 just make this the default for now
wfh_info = "at all times, unless mentioned otherwise below"
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
total_time_minutes_from_tasks = 0
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = found_row[idx].strip()
if task:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, task_duration] = g
hours, half_hours = calc(task_duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
total_time_minutes_from_tasks += minutes
other_lines = task.split('\n')[1:]
tasks.append("%s %s\n%s" % (task_number.strip(), task_details[:-2].strip(), '\n'.join(other_lines)))
def format_tasks(tasks):
if not tasks:
return ''
result = 'Tasks:\n'
for task in tasks:
if '\n' in task:
sub_tasks = task.split('\n')
if len(sub_tasks) > 1:
result += '\n* ' + sub_tasks[0] # main task
for sub_task in sub_tasks[1:]: # actual sub tasks
result += '\n\t' + sub_task
result += '\n'
else:
result += '\n* ' + task
else:
result += '\n* ' + task
return result
def format_notes(notes):
if not notes or (len(notes) == 1 and not notes[0]):
return ''
result = 'Additional Notes:\n'
for note in notes:
result += '\n* ' + note
return result
total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2)
total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2)
total_duration = "%s:%s" % (total_hours, total_minutes)
test_duration = duration
if len(test_duration) <= 4:
test_duration = "0%s" % duration
if total_duration != test_duration:
print("")
print("")
print("The task times do not add up! Tasks vs time entered: %s != %s" % (total_duration, test_duration))
print("")
print("")
# Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s])
msg = """
[Daily Report] %(date)s
WFH: %(wfh_info)s
Hi,
Daily Report for Date: %(date)s
%(tasks)s
%(notes)s
Kind regards,
%(user_full_name)s
""".strip() % {
"date": date,
"user_full_name": user_full_name,
"start": start,
"end": end,
"duration": duration,
"wfh_info": wfh_info,
"tasks": format_tasks(tasks) if tasks else "",
"notes": format_notes(notes) if notes else "",
"total_hours": total_hours,
"total_minutes": total_minutes,
}
print("Total time for all tasks (%s): %s - %s:%s" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes))
return msg
def _load_sheet_data(api, timesheet_url, arg_date=None):
try:
date = arrow.get(arg_date, 'YYYYMM')
except Exception: # pylint: disable=W0703
now = arrow.now()
date = now.format('YYYYMM')
rows = load_first_sheet_rows(api, timesheet_url, date)
date_str = str(date.format('YYYYMM'))
return (rows, date_str)
def export_csv(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
csv_filename = os.path.join(os.getcwd(), "%s.csv" % (arg_date))
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
print("Writing to %s" % (csv_filename))
with open(csv_filename, mode='w') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# f.writerow(['<NAME>', 'Accounting', 'November'])
f.writerow(["username", "date", "task", "duration", "work_type", "details"])
def w(task, duration_minutes, details = ""):
work_type = "Meeting" if "meeting" in details.lower() else "Development"
# Needed CSV columns
# username|date|task|duration|work_type|details
f.writerow(["daniel", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, "%dm" % (duration_minutes), work_type, details])
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if time_start is None or time_end is None or date is None:
continue
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
if len(tasks) == 0:
print("%s: no tasks found! %s" % (date, time_start))
continue
print("%s: %d tasks found!" % (date, len(tasks)))
for task in tasks:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, duration] = g
hours, half_hours = calc(duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
if DEBUG:
print("time: %s, %s $ %s $ %s" % (hours, half_hours, duration, minutes))
details = "%s %s" % (task_number, task_details[:-1].strip())
w(task_number, minutes, details.strip())
print("")
print("CSV output to: %s" % (csv_filename))
def calc_daily_hours_for_month(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
minutes = 0
days = 0
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None
notes = row[COL_NOTES] if max_cols >= COL_NOTES else ""
if time_start is None or time_end is None or date is None:
continue
start_hours, start_minutes = calc(time_start)
end_hours, end_minutes = calc(time_end)
if start_hours == 0:
print("%s: Day off because of %s" % (date, "whatever" if time_start == 0 else time_start))
continue
extra_info = ""
the_date = arrow.get(str(date), 'YYYYMMDD')
if the_date.weekday() in [SATURDAY, SUNDAY]:
extra_info += " - Weekend work"
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
extra_info += " - half day PTO"
if worked_at in ['o', 'O'] or "OFFICE" in notes.upper():
extra_info += " - Commute to office"
minutes_day = abs(end_hours - start_hours) * 60
minutes_day += end_minutes - start_minutes
minutes += minutes_day
hours_day = int(minutes_day / 60)
hours_day_without_lunch = hours_day - 1
minutes_day = minutes_day % 60
total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2)
days += 1
no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2)
print("%s: %s to %s = %s (without lunch: %s)%s" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info))
hours = str(minutes / 60).zfill(2)
minutes = str(minutes % 60).zfill(2)
lunch_hours = str(int(float(hours)) - days).zfill(2)
print("")
print("Total days worked: %s" % str(days))
print("Total hours: %s:%s (with 1 hour lunch: %s:%s)" % (hours, minutes, lunch_hours, minutes))
print("")
def calc_stats(api, timesheet_url, arg_date=None):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
# find the rows for the given month
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
if not AS_CSV:
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
dates, hours = [], []
half_days = {}
first = None
last = None
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if day_type is None:
continue
if day_type in SPECIAL_VALUES:
time = day_type
hours.append(time)
dates.append(date)
continue
elif not tasks:
continue
# If it was a half day, meaning I took half a day off, then only count half the time
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
half_days[date] = time
hours.append(time)
dates.append(date)
if first is None:
first = row
else:
last = row
total_hours, total_minutes, total_time = 0, 0, ""
for index, hour in enumerate(hours):
date = dates[index]
local_hours, local_minutes = calc(hour, date in half_days)
total_hours += local_hours
total_minutes += local_minutes
if total_minutes >= 60:
total_hours += (total_minutes / 60)
total_minutes = total_minutes % 60
total_time = "%d:%d hours:minutes" % (total_hours, total_minutes)
expected = 0
actual_h, actual_m = 0, 0
if not AS_CSV:
print("*" * 50)
print("")
print("Valid hours entries: %s\t[required vs actual]" % len(hours))
deduct_work_hours = 0
work_hours = 0
work_minutes = 0
days = 0
expected_hours_accumulated_total = 0
for index, worked_date in enumerate(dates):
days += 1
if hours[index] in SPECIAL_VALUES:
if not AS_CSV:
print(" %s: Off, because %s" % (worked_date, hours[index]))
else:
pass
else:
half_day = worked_date in half_days
# each workday has 8 hours of work, but on half days it is only half of 8, aka 4.
work_hours_for_the_day = 8 if not half_day else 4
expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day)
expected_minutes_accumulated_total = expected_hours_accumulated_total * 60
# hours[index] is the actual time worked, e.g. 6:30 means 6 hours and 30 minutes
local_h, local_m = calc(hours[index])
work_hours += local_h
work_minutes += local_m
actual_h = work_hours
# 330 minutes = 6 hours and 30 minutes
actual_h += int(work_minutes / 60)
actual_m = work_minutes % 60
if AS_CSV:
print("%s;%s;" % (worked_date, hours[index]))
else:
print(" %s: %s\t[%s:00 vs %s:%s] %s" % (worked_date, hours[index], expected_hours_accumulated_total,
str(actual_h).zfill(2), str(actual_m).zfill(2),
"Half day" if half_day else ""))
if not AS_CSV:
print("")
print("First:", "<first> not found" if first is None else first[COL_DATE])
print("Last:", "<last> not found" if last is None else last[COL_DATE])
print("")
print("Total time in %s: %s" % (date, total_time))
print("")
print("*" * 50)
def main():
# print("Checking environment variable TIMESHEET_URL for spreadsheet URL...")
timesheet_url = os.environ.get('TIMESHEET_URL', "").strip()
if not timesheet_url:
raise Exception("Please set the TIMESHEET_URL environment variable accordingly.")
# print("Checking environment variable USER_FULL_NAME for spreadsheet URL...")
user_full_name = os.environ.get('USER_FULL_NAME', "").strip()
if not user_full_name:
print("Warning: USER_FULL_NAME environment variable not set!")
user_full_name = "<NAME>"
print("")
print("Usage: python timesheet.py [command|date] [date]")
print("Example: python timesheet.py stats 202011")
print("Example: python timesheet.py 20201130")
print("")
print("Available commands:")
print("- stats: show summed up hours and minutes for the given/current month")
print(" use \"CSV=1 python timesheet.py stats\" to format the output")
print(" as CSV")
print("- daily: same as stats, except ready to email to HR")
print("- csv: task breakdown for the month and time spend on each task")
print("")
print("""Tip: use "DEBUG=1 timesheet <parameter>" to enable debug output""")
print("")
print("Trying to load client-secrets.json file ...")
secrets_file, cache_file = get_client_secret_filenames()
sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False)
print("Success.")
date = None if len(sys.argv) < 3 else sys.argv[2].strip()
arg = "read today" if len(sys.argv) < 2 else sys.argv[1].strip()
if arg == "stats":
calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "daily":
calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "csv":
export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
else:
date_to_use = "read today" if arg == '' else arg
load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name)
print("Done.")
if __name__ == "__main__":
main()
| 2.578125 | 3 |
league/game.py | Orpheon/All-in | 0 | 2422 | <gh_stars>0
import numpy as np
import pickle
import treys
import constants
FULL_DECK = np.array(treys.Deck.GetFullDeck())
class GameEngine:
def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger):
self.BATCH_SIZE = BATCH_SIZE
self.INITIAL_CAPITAL = INITIAL_CAPITAL
self.SMALL_BLIND = SMALL_BLIND
self.BIG_BLIND = BIG_BLIND
self.logger = logger
self.N_PLAYERS = 6
def generate_cards(self):
cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1))
for i in range(self.BATCH_SIZE):
cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])]
community_cards = cards[:, :5]
hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2))
return community_cards, hole_cards
def run_game(self, players):
if len(players) != self.N_PLAYERS:
raise ValueError('Only {} players allowed'.format(self.N_PLAYERS))
community_cards, hole_cards = self.generate_cards()
folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool)
prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int)
for player in players:
player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS)
# Pre-flop
bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0])
prev_round_investment += bets
# Flop
bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3])
prev_round_investment += bets
# Turn
bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4])
prev_round_investment += bets
# River
bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards)
prev_round_investment += bets
# Showdown
pool = np.sum(prev_round_investment, axis=1)
total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float)
hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded))
ranks = np.argsort(hand_scores, axis=1)
sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1)
# Get everyone who has the best hand and among which pots will be split
participants = hand_scores == sorted_hands[:, 0][:, None]
# Get the number of times each pot will be split
n_splits_per_game = participants.sum(axis=1)
# Split and distribute the money
gains = pool / n_splits_per_game
total_winnings += participants * gains[:, None]
total_winnings -= prev_round_investment
self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players], folded, hole_cards))
self.logger.save_to_file()
for player_idx, player in enumerate(players):
round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state
player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser,
hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx])
return total_winnings
def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards):
"""
:param players: [Player]
:param prev_round_investment: np.ndarray(batchsize, n_players) = int
:param folded: np.ndarray(batchsize, n_players) = bool
:param round: int ∈ {0..3}
:param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card
:param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card
:return: current_bets: np.ndarray(batchsize, n_players)=int {0-200}
"""
current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int)
max_bets = np.zeros(self.BATCH_SIZE, dtype=int)
min_raise = np.zeros(self.BATCH_SIZE, dtype=int)
min_raise[:] = self.BIG_BLIND
last_raiser = np.zeros(self.BATCH_SIZE, dtype=int)
player_order = list(enumerate(players))
round_countdown = np.zeros(self.BATCH_SIZE, dtype=int)
round_countdown[:] = self.N_PLAYERS
if round == constants.PRE_FLOP:
current_bets[:, 0] = self.SMALL_BLIND
current_bets[:, 1] = self.BIG_BLIND
max_bets[:] = self.BIG_BLIND
player_order = player_order[2:] + player_order[:2]
while True:
running_games = np.nonzero(round_countdown > 0)[0]
for player_idx, player in player_order:
actions, amounts = player.act(player_idx, round, round_countdown > 0, current_bets, min_raise,
prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :],
community_cards)
# Disabled when not necessary because it bloats the log size (by ~500 kB or so, which triples the size)
# self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx]))
# People who have already folded continue to fold
actions[folded[:, player_idx] == 1] = constants.FOLD
# People who have gone all-in continue to be all-in
actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL
###########
# CALLING #
###########
calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0]
if calls.size > 0:
investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls])
# Reset the bets and countdown
current_bets[calls, player_idx] = investment
###########
# RAISING #
###########
raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0]
if raises.size > 0:
# print("True raises", raises, amounts[raises])
investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises])
min_raise[raises] = investment - max_bets[raises]
max_bets[raises] = investment
# Reset the bets and countdown
current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx])
round_countdown[raises] = self.N_PLAYERS
last_raiser[raises] = player_idx
###########
# FOLDING #
###########
folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] = 1
round_countdown[running_games] -= 1
#TODO: if all folded stops game, improves performance but breaks tests
# test is not broken, is there another reason?
round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0
if np.max(round_countdown[running_games]) <= 0:
return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser)
def evaluate_hands(self, community_cards, hole_cards, contenders):
evaluator = treys.Evaluator()
# 7463 = 1 lower than the lowest score a hand can have (scores are descending to 1)
results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int)
for game_idx,community in enumerate(community_cards):
for player_idx,hole in enumerate(hole_cards[game_idx]):
if contenders[game_idx, player_idx]:
results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist())
return results
| 2.390625 | 2 |
cms/admin/views.py | miloprice/django-cms | 0 | 2423 | # -*- coding: utf-8 -*-
from cms.models import Page, Title, CMSPlugin, Placeholder
from cms.utils import get_language_from_request
from django.http import Http404
from django.shortcuts import get_object_or_404
def revert_plugins(request, version_id, obj):
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
cms_plugin_list = []
placeholders = {}
plugin_list = []
titles = []
others = []
page = obj
lang = get_language_from_request(request)
for rev in revs:
obj = rev.object
if obj.__class__ == Placeholder:
placeholders[obj.pk] = obj
if obj.__class__ == CMSPlugin:
cms_plugin_list.append(obj)
elif hasattr(obj, 'cmsplugin_ptr_id'):
plugin_list.append(obj)
elif obj.__class__ == Page:
pass
#page = obj #Page.objects.get(pk=obj.pk)
elif obj.__class__ == Title:
titles.append(obj)
else:
others.append(rev)
if not page.has_change_permission(request):
raise Http404
current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page))
for pk, placeholder in placeholders.items():
# admin has already created the placeholders/ get them instead
try:
placeholders[pk] = page.placeholders.get(slot=placeholder.slot)
except Placeholder.DoesNotExist:
placeholders[pk].save()
page.placeholders.add(placeholders[pk])
for plugin in cms_plugin_list:
# connect plugins to the correct placeholder
plugin.placeholder = placeholders[plugin.placeholder_id]
plugin.save(no_signals=True)
for plugin in cms_plugin_list:
plugin.save()
for p in plugin_list:
if int(p.cmsplugin_ptr_id) == int(plugin.pk):
plugin.set_base_attr(p)
p.save()
for old in current_plugins:
if old.pk == plugin.pk:
plugin.save()
current_plugins.remove(old)
for title in titles:
title.page = page
try:
title.save()
except:
title.pk = Title.objects.get(page=page, language=title.language).pk
title.save()
for other in others:
other.object.save()
for plugin in current_plugins:
plugin.delete() | 1.953125 | 2 |
delete.py | lvwuyunlifan/crop | 0 | 2424 | <reponame>lvwuyunlifan/crop
import os
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# import seaborn as sns
import pandas as pd
import numpy as np
import random
train_path = './AgriculturalDisease_trainingset/'
valid_path = './AgriculturalDisease_validationset/'
def genImage(gpath, datatype):
if datatype == 'train':
gen_number = 0 # 统计生成的图片数量
if not os.path.exists(gpath+'delete'):
os.makedirs(gpath+'delete')
label = pd.read_csv(gpath + 'label.csv')
label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label
for i in range(61):
li = label[label['label'] == i]
imagenum = li['label'].count()
print('第%d个,总共有有%d个图片'%(i, imagenum))
imagelist = np.array(li['img_path']).tolist()
img_path_gen, label_gen = [], []
# for imagefile in imagelist:
for aa in range(len(imagelist)):
if aa <= 40:
print(aa)
path, imagename = os.path.split(imagelist[aa])
im = Image.open(imagelist[aa])
im = im.convert('RGB')
im_detail = im.transpose(Image.ROTATE_180)
# im_detail = im.filter(ImageFilter.DETAIL) # 细节增强
img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename)
label_gen.extend([int(i)])
im_detail.save(gpath + 'delete/' +'idetail_'+imagename)
gen_number += 1
label_dict = {'img_path':img_path_gen, 'label':label_gen}
label_gen_dict['img_path'].extend(img_path_gen)
label_gen_dict['label'].extend(label_gen)
label_gen_pd = pd.DataFrame(label_dict)
# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label
# label['label'] = label[['label']].astype('int64') # 转化为int64
# print(label)
label_gen_p = pd.DataFrame(label_gen_dict)
label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)
# label_gen_p = pd.DataFrame(label_gen_dict)
# label_gen_p.to_csv(gpath + 'label_gen.csv', index=False)
print('训练集总共生成%d个图片'%gen_number)
if datatype == 'valid':
gen_number = 0
if not os.path.exists(gpath+'delete'):
os.makedirs(gpath+'delete')
label = pd.read_csv(gpath + 'label.csv')
label_gen_dict = {'img_path':[], 'label':[]}
for i in range(61):
li = label[label['label'] == i]
imagenum = li['label'].count()
print('第%d个,总共有有%d个图片'%(i, imagenum))
imagelist = np.array(li['img_path']).tolist()
img_path_gen, label_gen = [], []
# for imagefile in imagelist:
for aa in range(len(imagelist)):
if aa <= 20:
print(aa)
path, imagename = os.path.split(imagelist[aa])
im = Image.open(imagelist[aa])
im = im.convert('RGB')
im_detail = im.transpose(Image.ROTATE_180)
#im_detail = im.filter(ImageFilter.DETAIL) # 细节增强
img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename)
label_gen.extend([int(i)])
im_detail.save(gpath + 'delete/' + 'idetail_' + imagename)
gen_number += 1
label_dict = {'img_path': img_path_gen, 'label': label_gen}
label_gen_dict['img_path'].extend(img_path_gen)
label_gen_dict['label'].extend(label_gen)
label_gen_pd = pd.DataFrame(label_dict)
# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label
# label['label'] = label[['label']].astype('int64') # 转化为int64
# print(label)
label_gen_p = pd.DataFrame(label_gen_dict)
label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)
print('验证集总共生成%d个图片'%gen_number)
if __name__ == '__main__':
genImage(train_path, 'train')
genImage(valid_path, 'valid')
| 2.796875 | 3 |
数据分析/matplotlib/03.demo.py | likedeke/python-spider-study | 1 | 2425 | <gh_stars>1-10
# - - - - - - - - - - -
# @author like
# @since 2021-02-23 11:08
# @email <EMAIL>
# 十点到十二点的气温变化
from matplotlib import pyplot as plt
from matplotlib import rc
from matplotlib import font_manager
import random
x = range(0, 120)
y = [random.randint(20, 35) for i in range(120)]
plt.figure(figsize=(20, 8), dpi=80)
plt.plot(x, y)
# 中文字体
chFont = font_manager.FontProperties(family="SimHei") # SimHei
# chFont = font_manager.FontProperties(fname="C:/Windows/Fonts/SIMHEI.TTF")
# 刻度相关设置
step = 10
xLabels = ["10点,{}分".format(i) for i in range(60)]
xLabels += ["11点,{}分".format(i) for i in range(60)]
plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont)
# 添加描述信息
plt.xlabel("时间", fontProperties=chFont)
plt.ylabel("温度 单位(℃)", fontProperties=chFont)
plt.title("10点到12点每分钟的气温变化", fontProperties=chFont)
plt.show()
| 2.921875 | 3 |
testing/vcs/test_vcs_isoline_labels.py | xylar/cdat | 62 | 2426 | import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
isoline = canvas.createisoline()
isoline.label="y"
texts=[]
colors = []
for i in range(10):
text = canvas.createtext()
text.color = 50 + 12 * i
text.height = 12
colors.append(100 + 12 * i)
if i%2 == 0:
texts.append(text.name)
else:
texts.append(text)
isoline.text = texts
# First test using isoline.text[...].color
canvas.plot(data, isoline, bg=1)
baseline = os.path.splitext(sys.argv[1])
baselineImage = "%s%s"%baseline
ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage)
# Now set isoline.linecolors and test again.
canvas.clear()
isoline.linecolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels2.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
# Now set isoline.textcolors and test again.
canvas.clear()
isoline.textcolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels3.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
sys.exit(ret)
| 2.21875 | 2 |
src/Python_version/ICE_py36.py | ds-utilities/ICE | 2 | 2427 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
def getCutoff(rw_mat, avgNeighborsSize):
tmp = rw_mat.flatten('F')
a = np.flip(np.sort(tmp), 0)
len1 = len(rw_mat)
#cutoffs = []
all_neibs = int( avgNeighborsSize * len1 )
print( all_neibs)
ct = a[all_neibs]
return ct
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_len_of_each_ele(c1):
#% Assume c1 is a 1-dimension cell array, and each element is a 1d double
#% array. This function counts the length of each double array.
lens = np.zeros(len(c1))
for i in range(0, len(c1)):
lens[i] = len(c1[i])
return lens
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
for i in range(0, k):
tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()
clus.append(tmp)
# ---------------------------------------------------------------
# % sort the clusters
lens = f_len_of_each_ele(clus)
ix = np.argsort(lens)[::-1]
clus_ordered = [clus[i] for i in ix]
print(' center inst. index of each cluster: ')
ixs_centers = np.array(ixs_centers)
print(ixs_centers[ix])
print(' size of each cluster: ')
print(lens[ix])
print(' done RWR clustering')
return clus_ordered
#test
#clus = f_fuzzy_rwr_clusters(X, 100)
# pass
def f_clus_to_tfs(clus, n_inst):
#% convert the cluster information from cell array to mat. But for each
#% instance, the rank of clusters information will be lost - you won't know
#% what is the top 1/2/3 cluster it belongs to.
#%
#% clus e.g:
#% 1x5 cell
#% 1x195 double 1x193 double 1x169 double 1x161 double 1x62 double
#%
#% tfs e.g:
#% 295x5 double
#% 1 0 0 0 0
#% 1 1 1 1 0
#% 1 1 1 0 0
#% 1 1 0 0 0
#% 1 1 1 1 0
#% ...
#% 1 1 1 1 1
#% 1 0 0 0 0
#% 1 1 1 0 0
tfs = np.zeros((n_inst, len(clus)), dtype=bool)
for i in range(0, len(clus)):
tfs[clus[i], i] = True
return tfs
# test
#tfs = f_clus_to_tfs(clus, len(X))
# pass
def f_tfs_2_instClus(tfs):
'''
convert the boolean table representation of clustering result to for each
instance, what clusters it belongs to.
'''
inst_clus = []
for i in range(0, len(tfs)):
row = list( np.where(tfs[i, :] ) [0] )
inst_clus.append(row)
return inst_clus
# test
#inst_clus = f_tfs_2_instClus(tfs)
#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
# test
'''
X_tr = X
y_tr = y
X_te = X
y_te = y
[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)
'''
#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):
# '''
# corresponds to f_weka_bg_svm_tr_te() in Matlab version
# '''
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
def f_tr(X_tr, y_tr, model):
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
return model_inner
def f_te(X_te, model):
y_pred = model.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
return y_pred
def f_tr_te(X_tr, y_tr, X_te, model):
'''
corresponds to f_weka_bg_svm_tr_te() in Matlab version
'''
#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
#bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
y_pred = model_inner.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
#auc = roc_auc_score(y_te.flatten(), y_pred)
return y_pred
def f_k_fo(X, y, model, k_fold=10):
'''
corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version
'''
y = y.flatten()
y_pred = np.zeros(y.size)
skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)
skf.get_n_splits(X, y)
for train_index, test_index in skf.split(X, y):
#print("TRAIN: ", train_index, " TEST: ", test_index)
X_tr, X_te = X[train_index], X[test_index]
#y_tr, y_te = y[train_index], y[test_index]
y_tr = y[train_index]
if np.unique(y_tr).size == 1:
y_pred_fo = np.zeros( len(test_index) )
#print len(X_te)
#print len(test_index)
#print y_pred_fo
y_pred_fo.fill(np.unique(y_tr)[0] )
#print y_pred_fo
else:
y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)
y_pred[test_index] = y_pred_fo
#auc = roc_auc_score(y.flatten(), y_pred)
return y_pred
# test
#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
#
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
#y_pred = f_k_fo(X, y, model, k_fold=10)
#
#print roc_auc_score(y.flatten(), y_pred)
# the easy dataset mesothelioma get 1.0 CV result.
# breast cancer get 0.599
# all results are correct.
def f_quantileNorm(templete, target):
'''
Templete is the standard, change the target to the values in the templete.
Target may have a very different range than the templete.
templete and target should be 1d n by 1 array.
f_my_quantileNorm()
'''
ix_target = np.argsort(target, kind='mergesort')
ix_templete = np.argsort(templete, kind='mergesort')
target[ix_target] = templete[ix_templete]
new = target
return new
# test
#templete = X[:, 0]
#target = X[:, 1]
#new = f_quantileNorm(templete, target)
#def f_bg_k_fo_3(X, y, k_fold=10):
# '''
# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version
# corresponds to f_k_fo()
# '''
# y_pred = np.zeros((y.size, 1))
#
# skf = StratifiedKFold(n_splits=k_fold)
# skf.get_n_splits(X, y)
#
# for train_index, test_index in skf.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# X_tr, X_te = X[train_index], X[test_index]
# y_tr, y_te = y[train_index], y[test_index]
def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):
'''
% using each cluster data to predict the whole instances, while self
% prediction using 10-fold CV.
corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version
'''
n_clusters = len(clus)
y_pred_multi = np.zeros((y.size, n_clusters) )
models = []
for j in range(0, n_clusters):
# for each cluster
Xj = X[clus[j].flatten(), :]
yj = y[clus[j].flatten() ]
model_a_clust = copy.deepcopy(model)
print(' Cluster '+str(j)+' started...')
#if len(yj) > 10:
if len(yj) > 15 and np.unique(yj).size != 1:
# ------------------ for self ------------------
#if np.unique(yj).size == 1:
# y_pred = np.zeros(yj.size)
# y_pred.fill(np.unique(yj)[0])
#else:
try:
y_pred = f_k_fo(Xj, yj, model, fo_inner)
# quantileNorm
templete = y_pred_whole[clus[j].flatten()]
target = y_pred
y_pred = f_quantileNorm(templete, target)
# copy the normed prediction to the whole data.
y_pred_multi[clus[j].flatten(), j] = y_pred
print(' c-'+str(j)+' done predicting local instances')
# ------------------ for other -----------------
ix_other = set(range(0, y.size)) - set(clus[j].flatten())
ix_other = list(ix_other)
#print ix_other
X_other = X[ix_other , :]
#y_other = y[ix_other ]
# predict
#y_pred = f_tr_te(Xj, yj, X_other, model)
#if np.unique(yj).size != 1:
model_a_clust.fit(Xj, yj)
y_pred = model_a_clust.predict_proba(X_other)
y_pred = y_pred[:, 1].flatten()
# quantileNorm
templete = y_pred_whole[ix_other]
target = y_pred
y_pred = f_quantileNorm(templete, target)
#else:
# y_pred = np.zeros(X_other.size)
# y_pred.fill(np.unique(yj)[0])
# copy to the whole array
y_pred_multi[ix_other, j] = y_pred
print(' c-'+str(j)+' done predicting remote instances')
except ValueError as e:
print(e)
print(' skip this cluster')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
else:
if len(yj) <= 15:
print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
if np.unique(yj).size == 1:
print (' warning, #unique class label(s) == 1')
y_pred = np.zeros(y.size)
y_pred.fill(np.unique(yj)[0])
y_pred_multi[:, j] = y_pred
model_a_clust = np.unique(yj)[0]
models.append(model_a_clust)
return [y_pred_multi, models]
# test
#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)
#def f_dec_tab_4_bg_svm(X, y, clus):
# '''
# Calculate the decision table
# % This version changed from the cluster-cluster dec_mat to instance-cluster
# % dec_mat. This solution will avoid the case that if one cluster decision
# % is wrong leading entrie cluster prediction is wrong, which is the reason
# % of instability. However, we cannot use a systematic evaluation criteria
# % such as AUC, I will try using the predicted prob at first.
#
# % This version 3 adds the support for fuzzy clustering - one instance may
# % belongs to more than one cluster.
# % This updated version also outputs the predicted values of y.
# % support more than 3 clusters
# % normalization take place in y_pred_self and y_pred_other, thus do not
# % need normalization when predict y_pred_ICE.
# % ixsp is another cluster form.
#
# corresponds to f_dec_tab_4_bg_svm() in Matlab version
# '''
# #n_clusters = len(clus)
# ## dec_mat stores the prediction error.
# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# #
# ## k_fold of inner cross-validation
# #fo_inner = 10
# # --------------------------- WHOLE -------------------------
#
# # --------------------------- SELF -------------------------
def f_err_mat(X, y, clus, model):
'''
Calculate the decision table
corresponds to f_dec_tab_4_bg_svm() in Matlab version
'''
n_clusters = len(clus)
# err_mat stores the prediction error.
pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# col 0 to col n_clusters-1 store the predictions by each cluster
# the last col stores the pred by whole data
#models = []
# k_fold of inner cross-validation
fo_inner = 5
# --------------------------- WHOLE -------------------------
# Predict each cluster using the whole data.
model_whole = copy.deepcopy(model)
y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)
model_whole.fit(X, y) # fit a model using all data rather than only a fold
pred_prob_mat[:, n_clusters] = y_pred_whole
print (' Done evaluation using whole instances')
print (' Start to evaluate each cluster ')
# --------------------------- SELF -------------------------
# predict the whole instances using each cluster data, while self
# prediction using 10-fold CV.
[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \
y_pred_whole, model, fo_inner)
print (' Done evaluation using each cluster')
models.append(model_whole)
pred_prob_mat[:, 0:n_clusters] = y_pred_multi
# make a tmp array a stores y
tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)
err_mat = abs(pred_prob_mat - tmp )
print (' Done calculating error table and fitting ICE models')
return [err_mat, models]
"""
#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\
# '3_scripts/2017_4_4/data/names.mat')['names']
#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']
#test
pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
n_clus = 3
clus = f_fuzzy_rwr_clusters(X, n_clus)
tfs = f_clus_to_tfs(clus, len(X))
y = y.astype(float)
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \
model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X, y, clus, model)
"""
def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):
'''
Convert the err table to decision table.
'''
dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)
# dec_ixs: for each instance, which clusters should be used.
dec_ixs = []
inst_clus = f_tfs_2_instClus(tfs)
for i in range(0, len(err_mat)):
# Matlab code:
#dec_row = dec_mat(cur_nb_ix, :);
#dec_row(:, end ) = dec_row(:, end ) - adv_whole;
#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;
row = np.copy( err_mat[i, :] )
#print row
row[-1] = row[-1] - adv_whole
inst_i_clus = inst_clus[i]
if len(inst_i_clus) > 0:
row[inst_i_clus] = row[inst_i_clus] - adv_self
#print row
ix_good_clus = list( np.where( row < row[-1] ) [0] )
#print ix_good_clus
if len(ix_good_clus) > 0:
dec_mat[i, ix_good_clus] = True
dec_ixs.append(ix_good_clus)
else:
dec_ixs.append([])
return [dec_mat, dec_ixs]
#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)
def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):
'''
Use the training data to predict the testing data.
Use whole training data to predict
Use each cluster of training data to predict the testing data.
'''
y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))
# the first col is the prediction using the whole data
model_whole = models[-1]
y_pred_all[:, 0] = f_te(X_te, model_whole)
#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)
#print 'whole model good '
# start from the second col, the result is by each cluster
for i in range(0, len(clus)):
#Xi = X_tr[clus[i].flatten(), :]
#yi = y_tr[clus[i].flatten() ]
model_i = models[i]
#model_a_clust = copy.deepcopy(model)
try:
y_pred_te = f_te(X_te, model_i)
except :
if model_i == 0:
y_pred_te = np.zeros(len(X_te))
elif model_i == 1:
y_pred_te = np.ones(len(X_te))
else:
y_pred_te = np.zeros(len(X_te))
y_pred_te.fill(np.nan)
#except NotFittedError as e:
# print(repr(e))
# y_pred_te = np.zeros(len(X_te))
# y_pred_te.fill(np.nan)
#print 'model '+str(i)+' good '
#y_pred_te = f_tr_te(Xi, yi, X_te, model)
if doNorm == True:
templete = y_pred_all[:, 0]
target = y_pred_te
y_pred = f_quantileNorm(templete, target)
else:
y_pred = y_pred_te
y_pred_all[:, i+1] = y_pred
return y_pred_all
# test
#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)
def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
#def_deal_miss_v_1(d):
'''
deal with missing values by replacing them by mean.
'''
def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
This version use the err mat to re-clustering
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# ******************** re-clustering ********************
n_iter = 2
for i in range(0, n_iter):
clus = f_fuzzy_rwr_clusters(err_mat, n_clus)
tfs = f_clus_to_tfs(clus, len(X_tr))
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# *******************************************************
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):
'''
clus and inst_clus contains the same information that clus is the instances
ids for each cluster, while inst_clus stores that for each instance, which
cluster(s) it belongs to.
dec_ixs stores the good cluster(s) for each instance, which may include
even a remote cluster. each instance in dec_ixs does not contain the whole
set of instances.
'''
# the first col is the prediction using the whole data
# start from the second col, the result is by each cluster
y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)
y_pred_ICE = np.zeros( len(X_te) )
neighbour_mat = f_eu_dist2(X_tr, X_te)
# ---------- for each testing instance ----------
#n_partials = np.zeros( len(X_te) )
#n_wholes = np.zeros( len(X_te) )
for j in range(0, len(X_te) ):
# for each testing instance
# find the top 10 neighbors for each test instance
neighbour_col = neighbour_mat[:, j].flatten()
ix = np.argsort(neighbour_col )
ix = ix[::-1]
ix_top_neighbors = ix[0:N]
#print 'testing inst ' + str(j)
#print ' ix of top neighbors:'
#print ix_top_neighbors
# ---------- find all neighbors' picks ----------
clus_ids_to_use = []
nei_labels = []
for cur_nb in range(0, N):
# for each neighbour
# find each neighbour's pick
cur_nb_ix = ix_top_neighbors[cur_nb]
clus_id_to_use = list( dec_ixs[cur_nb_ix] )
clus_ids_to_use = clus_ids_to_use + clus_id_to_use
# also find neighbor's label. maybe will be used later as KNN pred
# instead of using whole to pred.
nei_labels = nei_labels + list( y_tr[cur_nb_ix] )
#print ' clus_ids_to_use:'
#print clus_ids_to_use
# cluster id + 1 to make the ix fit the col id in y_pred_all
a = clus_ids_to_use
a = list( np.array(a) + 1 )
clus_ids_to_use = a
# number of partial models used
n_partial = len(clus_ids_to_use)
# number of whole models used, based on parameters alpha, beta and N.
n_whole = int( round( alpha*n_partial + beta*N ) )
clus_ids_to_use = clus_ids_to_use + [0] * n_whole
#print ' clus_ids_to_use:'
#print clus_ids_to_use
#print nei_labels
y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])
print ('Done predicting testing instances.')
return y_pred_ICE
# test
# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
# pa = '/Users/zg/Dropbox/bio/ICE_2018/'
# pa = './'
pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'
n_clus = 100
w = 0.4
s = 0.5
N = 5
alpha = 1
beta = 1
k_fold = 10
aucs_ICE = []
aucs_whole = []
# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'
#f_res = pa + 'data/res_ICE_bg_svm_py.txt'
f_res = pa + 'data/res_ICE_SVM_py.txt'
f = open(f_res, 'w')
#for j in range(1, 50):
for j in range(1, 49):
try:
X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']
#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')
#plt.show()
#sim = np.corrcoef(X)
#np.fill_diagonal(sim, 0)
#n_clus = 100
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
model = svm.SVC(kernel='linear', probability = True)
skf = StratifiedKFold(n_splits=k_fold)
skf.get_n_splits(X, y)
y_preds_ICE = np.zeros( y.size )
y_preds_whole = np.zeros( y.size )
fold_i = 1
for train_index, test_index in skf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = X[train_index], X[test_index]
y_tr, y_te = y[train_index], y[test_index]
[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)
#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)
y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)
y_preds_ICE[test_index] = y_pred_ICE
y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)
y_preds_whole[test_index] = y_pred_whole
print( j)
print( 'fold ' + str(fold_i) + ' finished')
fold_i = fold_i + 1
auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )
auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )
print (auc_ICE, auc_whole)
aucs_ICE.append(auc_ICE)
aucs_whole.append(auc_whole)
f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n')
except:
continue
| 2.5 | 2 |
xc/common/utils/prjxray_routing_import.py | FireFox317/symbiflow-arch-defs | 1 | 2428 | <reponame>FireFox317/symbiflow-arch-defs<filename>xc/common/utils/prjxray_routing_import.py
#!/usr/bin/env python3
""" Imports 7-series routing fabric to the rr graph.
For ROI configurations, this also connects the synthetic IO tiles to the routing
node specified.
Rough structure:
Add rr_nodes for CHANX and CHANY from the database. IPIN and OPIN rr_nodes
should already be present from the input rr_graph.
Create a mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY
rr_node ids in the rr_graph.
Add rr_edge for each row in the graph_edge table.
Import channel XML node from connection database and serialize output to
rr_graph XML.
"""
import argparse
import os.path
from hilbertcurve.hilbertcurve import HilbertCurve
import math
import prjxray.db
from prjxray.roi import Roi
import prjxray.grid as grid
from lib.rr_graph import graph2
from lib.rr_graph import tracks
from lib.connection_database import get_wire_pkey, get_track_model
import lib.rr_graph_capnp.graph2 as capnp_graph2
from prjxray_constant_site_pins import feature_when_routed
from prjxray_tile_import import remove_vpr_tile_prefix
import simplejson as json
from lib import progressbar_utils
import datetime
import re
import functools
import pickle
import sqlite3
now = datetime.datetime.now
HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+')
CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)')
CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)')
CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*')
BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+')
BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+')
CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+')
HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)')
IOI_OCLK = re.compile('IOI_OCLK_([01])')
# Regex for [LR]IOI_SING tiles
IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_']
IOI_SING_REGEX = re.compile(
r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\.IOI_)({})([01])(.*)'.format(
"|".join(IOI_SITE_PIPS)
)
)
def reduce_connection_box(box):
""" Reduce the number of connection boxes by merging some.
Examples:
>>> reduce_connection_box('IMUX0')
'IMUX'
>>> reduce_connection_box('IMUX1')
'IMUX'
>>> reduce_connection_box('IMUX10')
'IMUX'
>>> reduce_connection_box('BRAM_ADDR')
'IMUX'
>>> reduce_connection_box('A_L10')
'A'
>>> reduce_connection_box('B')
'B'
>>> reduce_connection_box('B_L')
'B'
"""
box = CONNECTION_BOX_FILTER.match(box).group(1)
if 'BRAM_ADDR' in box:
box = 'IMUX'
if box.endswith('_L'):
box = box.replace('_L', '')
return box
REBUF_NODES = {}
REBUF_SOURCES = {}
def get_clk_hrow_and_rebuf_tiles_sorted(cur):
"""
Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles.
returns them in a list sorted according to their Y coordinates.
"""
cur.execute(
"""
SELECT name
FROM phy_tile
WHERE
name LIKE "CLK_HROW_BOT_R_%"
OR
name LIKE "CLK_HROW_TOP_R_%"
OR
name LIKE "CLK_BUFG_REBUF_%"
ORDER BY grid_y DESC;
"""
)
return [t[0] for t in cur.fetchall()]
def populate_bufg_rebuf_map(conn):
global REBUF_NODES
REBUF_NODES = {}
global REBUF_SOURCES
REBUF_SOURCES = {}
rebuf_wire_regexp = re.compile(
'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)'
)
cur = conn.cursor()
# Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles.
rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur)
# Append None on both ends of the list to simplify the code below.
rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None]
def maybe_get_clk_hrow(i):
"""
Returns a name of CLK_HROW tile only if its there on the list.
"""
tile = rebuf_and_hrow_tiles[i]
if tile is not None and tile.startswith("CLK_HROW"):
return tile
return None
# Assign each REBUF tile its above and below CLK_HROW tile. Note that in
# VPR coords terms. "above" and "below" mean the opposite...
rebuf_to_hrow_map = {}
for i, tile_name in enumerate(rebuf_and_hrow_tiles):
if tile_name is not None and tile_name.startswith("CLK_BUFG_REBUF"):
rebuf_to_hrow_map[tile_name] = {
"above": maybe_get_clk_hrow(i - 1),
"below": maybe_get_clk_hrow(i + 1),
}
# Find nodes touching rebuf wires.
cur.execute(
"""
WITH
rebuf_wires(wire_in_tile_pkey) AS (
SELECT pkey
FROM wire_in_tile
WHERE
name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_BOT"
OR
name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_TOP"
),
rebuf_nodes(node_pkey) AS (
SELECT DISTINCT node_pkey
FROM wire
WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)
)
SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name
FROM rebuf_nodes
INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey
INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey
INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey
WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)
ORDER BY rebuf_nodes.node_pkey;"""
)
for node_pkey, rebuf_tile, rebuf_wire_name in cur:
if node_pkey not in REBUF_NODES:
REBUF_NODES[node_pkey] = []
m = rebuf_wire_regexp.fullmatch(rebuf_wire_name)
if m.group(2) == 'TOP':
REBUF_NODES[node_pkey].append(
'{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1))
)
hrow_tile = rebuf_to_hrow_map[rebuf_tile]["below"]
if hrow_tile is not None:
REBUF_NODES[node_pkey].append(
"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format(
hrow_tile, m.group(1)
)
)
elif m.group(2) == 'BOT':
REBUF_NODES[node_pkey].append(
'{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1))
)
hrow_tile = rebuf_to_hrow_map[rebuf_tile]["above"]
if hrow_tile is not None:
REBUF_NODES[node_pkey].append(
"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format(
hrow_tile, m.group(1)
)
)
else:
assert False, (rebuf_tile, rebuf_wire_name)
for node_pkey in REBUF_NODES:
cur.execute(
"""
SELECT phy_tile.name, wire_in_tile.name
FROM wire
INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey
INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey
WHERE wire.node_pkey = ?;""", (node_pkey, )
)
for tile, wire_name in cur:
REBUF_SOURCES[(tile, wire_name)] = node_pkey
HCLK_CMT_TILES = {}
def populate_hclk_cmt_tiles(db):
global HCLK_CMT_TILES
HCLK_CMT_TILES = {}
grid = db.grid()
_, x_max, _, _ = grid.dims()
for tile in grid.tiles():
gridinfo = grid.gridinfo_at_tilename(tile)
if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']:
continue
hclk_x, hclk_y = grid.loc_of_tilename(tile)
hclk_cmt_x = hclk_x
hclk_cmt_y = hclk_y
while hclk_cmt_x > 0:
hclk_cmt_x -= 1
gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))
if gridinfo.tile_type == 'HCLK_CMT':
HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc(
(hclk_cmt_x, hclk_cmt_y)
)
break
hclk_cmt_x = hclk_x
while hclk_cmt_x < x_max:
hclk_cmt_x += 1
gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))
if gridinfo.tile_type == 'HCLK_CMT_L':
HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc(
(hclk_cmt_x, hclk_cmt_y)
)
break
def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number):
if (hclk_tile, lr) not in HCLK_CMT_TILES:
return []
hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)]
return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)]
def check_feature(feature):
""" Check if enabling this feature requires other features to be enabled.
Some pips imply other features. Example:
.HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10
implies:
.ENABLE_BUFFER.HCLK_CK_BUFHCLK10
"""
# IOI_SING tiles have bits in common with the IOI tiles.
#
# The difference is that the TOP IOI_SING tile shares bits with
# the bottom half of a normal IOI tile, while the BOTTOM IOI_SING
# shares bits with the top half of a normal IOI TILE.
#
# The following, is to change the edge feature to accomodate this
# need, as the IOI_SING tiles have the same wire, and pip names
# despite they are found on the TOP or BOTTOM of an IOI column
m = IOI_SING_REGEX.fullmatch(feature)
if m:
# Each clock region spans a total of 50 IOBs.
# The IOI_SING are found on top or bottom of the whole
# IOI/IOB column. The Y coordinate identified with the
# second capture group is dived by 50 to get the relative
# position of the IOI_SING within the clock region column
is_bottom_sing = int(m.group(2)) % 50 == 0
# This is the value to attach to the source pip name that
# changes based on which IOI_SING is selected (top or bottom)
#
# Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1
src_value = '1' if is_bottom_sing else '0'
# This is the value to attach to the IOI_SITE_PIPS names
# in the destination wire of the pip
#
# Example: IOI_OLOGIC0 -> IOI_OLOGIC1
dst_value = '0' if is_bottom_sing else '1'
unchanged_feature = "{}{}{}{}".format(
m.group(1), m.group(2), m.group(3), m.group(4)
)
src_wire = m.group(6).replace('_SING', '')
for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']:
if pip in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(src_value))
if 'IOI_OCLK' in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(dst_value))
changed_feature = "{}{}".format(dst_value, src_wire)
feature = "{}{}".format(unchanged_feature, changed_feature)
feature_path = feature.split('.')
# IOB_DIFFO_OUT0->IOB_DIFFO_IN1
#
# When this PIP is active the IOB operates in the differential output mode.
# There is no feature assosciated with that PIP in the prjxray db but there
# is a tile-wide feature named "DIFF_OUT".
#
# The "DIFF_OUT" cannot be set in the architecture as it is defined one
# level up in the hierarchy (its tile-wide, not site-wide). So here we
# map the PIP's feature to "DIFF_OUT"
if feature_path[2] == "IOB_DIFFO_OUT0" and \
feature_path[1] == "IOB_DIFFO_IN1":
return '{}.OUT_DIFF'.format(feature_path[0])
# IOB_PADOUT0->IOB_DIFFI_IN1
# IOB_PADOUT1->IOB_DIFFI_IN0
#
# These connections are hard wires that connect IOB33M and IOB33S sites.
# They are used in differential input mode.
#
# Vivado does not report this connection as a PIP but in the prjxray db it
# is a pip. Instead of making it a pseudo-pip we simply reject fasm
# features here.
if feature_path[2] == "IOB_PADOUT0" and feature_path[1] == "IOB_DIFFI_IN1":
return ''
if feature_path[2] == "IOB_PADOUT1" and feature_path[1] == "IOB_DIFFI_IN0":
return ''
# REBUF stuff
rebuf_key = (feature_path[0], feature_path[1])
if rebuf_key in REBUF_SOURCES:
return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]])
m = IOI_OCLK.fullmatch(feature_path[1])
if m:
enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format(
feature_path[0], m.group(1), feature_path[-1]
)
return ' '.join((feature, enable_oclkm_feature))
if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]):
enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_buffer_feature))
# BUFHCE sites are now routed through, without the need of placing them, therefore,
# when the relative pip is traversed, the correct fasm feature needs to be added.
# The relevant features are:
# - IN_USE: to enable the BUFHCE site
# - ZINV_CE: to disable the inverter on CE input which is connected to VCC.
# This sets the CE signal to constant 1
m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1])
if m:
x_loc_str = m.group(1)
if 'L' in x_loc_str:
x_loc = 0
elif 'R' in x_loc_str:
x_loc = 1
else:
assert False, "Impossible to determine X location of BUFHCE"
y_loc = m.group(2)
bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc)
enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format(
feature_path[0], bufhce_loc
)
enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\'b1'.format(
feature_path[0], bufhce_loc
)
return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce))
if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]):
features = [feature]
features.append(
'{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1])
)
features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1]))
return ' '.join(features)
m = HCLK_OUT.fullmatch(feature_path[-1])
if m:
return ' '.join(
[feature] + find_hclk_cmt_hclk_feature(
feature_path[0], m.group(1), m.group(2)
)
)
m = CASCOUT_REGEX.fullmatch(feature_path[-2])
if m:
enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format(
feature_path[0], m.group(1)
)
return ' '.join((feature, enable_cascout))
parts = feature.split('.')
wire_feature = feature_when_routed(parts[1])
if wire_feature is not None:
return '{} {}.{}'.format(feature, parts[0], wire_feature)
return feature
# CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1)
PIN_NAME_TO_PARTS = re.compile(r'^([^\.]+)\.([^\]]+)\[0\]$')
def set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
):
""" Assign a connection box to an IPIN node. """
node_dict = graph.nodes[node_idx]._asdict()
node_dict['connection_box'] = graph2.ConnectionBox(
x=grid_x,
y=grid_y,
id=box_id,
site_pin_delay=site_pin_delay,
)
graph.nodes[node_idx] = graph2.Node(**node_dict)
def update_connection_box(
conn, graph, graph_node_pkey, node_idx, connection_box_map
):
""" Update connection box of IPIN node if needed. """
cur = conn.cursor()
cur.execute(
"""
SELECT connection_box_wire_pkey
FROM graph_node WHERE pkey = ?""", (graph_node_pkey, )
)
connection_box_wire_pkey = cur.fetchone()[0]
if connection_box_wire_pkey is not None:
cur.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT phy_tile_pkey FROM wire WHERE pkey = ?
)""", (connection_box_wire_pkey, )
)
grid_x, grid_y = cur.fetchone()
cur.execute(
"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?",
(connection_box_wire_pkey, )
)
wire_in_tile_pkey = cur.fetchone()[0]
box_id = connection_box_map[wire_in_tile_pkey]
cur.execute(
"""
SELECT switch.intrinsic_delay
FROM switch
WHERE pkey = (
SELECT site_pin_switch_pkey
FROM wire_in_tile
WHERE pkey = (
SELECT wire_in_tile_pkey
FROM wire
WHERE pkey = (
SELECT site_wire_pkey
FROM node
WHERE pkey = (
SELECT node_pkey
FROM graph_node
WHERE pkey = ?
)
)
)
)""", (graph_node_pkey, )
)
site_pin_delay = cur.fetchone()[0]
set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
)
def create_get_tile_and_site_as_tile_pkey(cur):
tiles = {}
for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute("""
SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;"""):
tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey)
def get_tile_and_site_as_tile_pkey(x, y):
return tiles[(x, y)]
return get_tile_and_site_as_tile_pkey
def create_get_site_as_tile_wire(cur):
@functools.lru_cache(maxsize=0)
def get_site_from_site_as_tile(site_as_tile_pkey):
cur.execute(
"""
SELECT site.site_type_pkey, site_as_tile.site_pkey
FROM site_as_tile
INNER JOIN site ON site.pkey = site_as_tile.site_pkey
WHERE site_as_tile.pkey = ?""", (site_as_tile_pkey, )
)
results = cur.fetchall()
assert len(results) == 1, site_as_tile_pkey
return results[0]
@functools.lru_cache(maxsize=0)
def get_site_as_tile_wire(site_as_tile_pkey, pin):
site_type_pkey, site_pkey = get_site_from_site_as_tile(
site_as_tile_pkey
)
cur.execute(
"""
SELECT
pkey
FROM
wire_in_tile
WHERE
site_pin_pkey = (
SELECT
pkey
FROM
site_pin
WHERE
site_type_pkey = ?
AND name = ?
)
AND
site_pkey = ?
;""", (site_type_pkey, pin, site_pkey)
)
results = cur.fetchall()
assert len(results) == 1
wire_in_tile_pkey = results[0][0]
return wire_in_tile_pkey
return get_site_as_tile_wire
def import_graph_nodes(conn, graph, node_mapping, connection_box_map):
cur = conn.cursor()
get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur)
get_site_as_tile_wire = create_get_site_as_tile_wire(cur)
for node_idx, node in enumerate(graph.nodes):
if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN):
continue
gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)]
pin_name = graph.pin_ptc_to_name_map[
(gridloc.block_type_id, node.loc.ptc)]
# Synthetic blocks are handled below.
if pin_name.startswith('SYN-'):
set_connection_box(
graph,
node_idx,
node.loc.x_low,
node.loc.y_low,
box_id=graph.maybe_add_connection_box('IMUX'),
site_pin_delay=0.,
)
continue
m = PIN_NAME_TO_PARTS.match(pin_name)
assert m is not None, pin_name
tile_type = m.group(1)
tile_type = remove_vpr_tile_prefix(tile_type)
pin = m.group(2)
tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey(
node.loc.x_low, node.loc.y_low
)
if site_as_tile_pkey is not None:
wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin)
else:
cur.execute(
"""
SELECT
pkey
FROM
wire_in_tile
WHERE
name = ?
AND
phy_tile_type_pkey IN (
SELECT tile_type_pkey FROM phy_tile WHERE pkey IN (
SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ?
)
);""", (pin, tile_pkey)
)
results = cur.fetchall()
assert len(results) == 1
wire_in_tile_pkey = results[0][0]
tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1])
cur.execute(
"""
SELECT
top_graph_node_pkey, bottom_graph_node_pkey,
left_graph_node_pkey, right_graph_node_pkey FROM wire
WHERE
wire_in_tile_pkey = ? AND tile_pkey = ?;""",
(wire_in_tile_pkey, tile_pkey)
)
result = cur.fetchone()
assert result is not None, (wire_in_tile_pkey, tile_pkey)
(
top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey,
right_graph_node_pkey
) = result
side = node.loc.side
if side == tracks.Direction.LEFT:
assert left_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[left_graph_node_pkey] = node.id
update_connection_box(
conn, graph, left_graph_node_pkey, node_idx, connection_box_map
)
elif side == tracks.Direction.RIGHT:
assert right_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[right_graph_node_pkey] = node.id
update_connection_box(
conn, graph, right_graph_node_pkey, node_idx,
connection_box_map
)
elif side == tracks.Direction.TOP:
assert top_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[top_graph_node_pkey] = node.id
update_connection_box(
conn, graph, top_graph_node_pkey, node_idx, connection_box_map
)
elif side == tracks.Direction.BOTTOM:
assert bottom_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[bottom_graph_node_pkey] = node.id
update_connection_box(
conn, graph, bottom_graph_node_pkey, node_idx,
connection_box_map
)
else:
assert False, side
def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id):
cur = conn.cursor()
cur2 = conn.cursor()
for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low,
y_high, ptc, capacitance,
resistance) in progressbar_utils.progressbar(cur.execute("""
SELECT
pkey,
track_pkey,
graph_node_type,
x_low,
x_high,
y_low,
y_high,
ptc,
capacitance,
resistance
FROM
graph_node WHERE track_pkey IS NOT NULL;""")):
if track_pkey not in alive_tracks:
continue
cur2.execute(
"""
SELECT name FROM segment WHERE pkey = (
SELECT segment_pkey FROM track WHERE pkey = ?
)""", (track_pkey, )
)
result = cur2.fetchone()
if result is not None:
segment_name = result[0]
segment_id = graph.get_segment_id_from_name(segment_name)
else:
segment_id = default_segment_id
node_type = graph2.NodeType(graph_node_type)
if node_type == graph2.NodeType.CHANX:
direction = 'X'
x_low = max(x_low, 1)
elif node_type == graph2.NodeType.CHANY:
direction = 'Y'
y_low = max(y_low, 1)
else:
assert False, node_type
canonical_loc = None
cur2.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT canon_phy_tile_pkey FROM track WHERE pkey = ?
)""", (track_pkey, )
)
result = cur2.fetchone()
if result:
canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1])
track = tracks.Track(
direction=direction,
x_low=x_low,
x_high=x_high,
y_low=y_low,
y_high=y_high,
)
assert graph_node_pkey not in node_mapping
node_mapping[graph_node_pkey] = graph.add_track(
track=track,
segment_id=segment_id,
ptc=ptc,
timing=graph2.NodeTiming(
r=resistance,
c=capacitance,
),
canonical_loc=canonical_loc
)
def create_track_rr_graph(
conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id
):
cur = conn.cursor()
cur.execute("""SELECT count(*) FROM track;""")
(num_channels, ) = cur.fetchone()
print('{} Import alive tracks'.format(now()))
alive_tracks = set()
for (track_pkey,
) in cur.execute("SELECT pkey FROM track WHERE alive = 1;"):
alive_tracks.add(track_pkey)
print('{} Importing alive tracks'.format(now()))
import_tracks(conn, alive_tracks, node_mapping, graph, segment_id)
print('original {} final {}'.format(num_channels, len(alive_tracks)))
def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles):
cur = conn.cursor()
delayless_switch = graph.get_switch_id('__vpr_delayless_switch__')
for tile_name, synth_tile in synth_tiles['tiles'].items():
num_inpad = len(
list(
filter(
lambda t: t['port_type'] == 'output', synth_tile['pins']
)
)
)
num_outpad = len(
list(
filter(
lambda t: t['port_type'] == 'input', synth_tile['pins']
)
)
)
for pin in synth_tile['pins']:
if pin['port_type'] in ['input', 'output']:
wire_pkey = get_wire_pkey(conn, tile_name, pin['wire'])
cur.execute(
"""
SELECT
track_pkey
FROM
node
WHERE
pkey = (
SELECT
node_pkey
FROM
wire
WHERE
pkey = ?
);""", (wire_pkey, )
)
(track_pkey, ) = cur.fetchone()
assert track_pkey is not None, (
tile_name, pin['wire'], wire_pkey
)
elif pin['port_type'] == 'VCC':
cur.execute('SELECT vcc_track_pkey FROM constant_sources')
(track_pkey, ) = cur.fetchone()
elif pin['port_type'] == 'GND':
cur.execute('SELECT gnd_track_pkey FROM constant_sources')
(track_pkey, ) = cur.fetchone()
else:
assert False, pin['port_type']
tracks_model, track_nodes = get_track_model(conn, track_pkey)
option = list(
tracks_model.get_tracks_for_wire_at_coord(
tuple(synth_tile['loc'])
).values()
)
assert len(option) > 0, (pin, len(option))
if pin['port_type'] == 'input':
tile_type = synth_tile['tile_name']
wire = 'outpad'
elif pin['port_type'] == 'output':
tile_type = synth_tile['tile_name']
wire = 'inpad'
elif pin['port_type'] == 'VCC':
tile_type = 'SYN-VCC'
wire = 'VCC'
elif pin['port_type'] == 'GND':
tile_type = 'SYN-GND'
wire = 'GND'
else:
assert False, pin
track_node = track_nodes[option[0]]
assert track_node in node_mapping, (track_node, track_pkey)
if wire == 'inpad' and num_inpad > 1:
pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(
tile_type, pin['z_loc'], wire
)
elif wire == 'outpad' and num_outpad > 1:
pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(
tile_type, (pin['z_loc'] - num_inpad), wire
)
else:
pin_name = graph.create_pin_name_from_tile_type_and_pin(
tile_type, wire
)
pin_node = graph.get_nodes_for_pin(
tuple(synth_tile['loc']), pin_name
)
if pin['port_type'] == 'input':
graph.add_edge(
src_node=node_mapping[track_node],
sink_node=pin_node[0][0],
switch_id=delayless_switch,
name='synth_{}_{}'.format(tile_name, pin['wire']),
)
elif pin['port_type'] in ['VCC', 'GND', 'output']:
graph.add_edge(
src_node=pin_node[0][0],
sink_node=node_mapping[track_node],
switch_id=delayless_switch,
name='synth_{}_{}'.format(tile_name, pin['wire']),
)
else:
assert False, pin
def get_switch_name(conn, graph, switch_name_map, switch_pkey):
assert switch_pkey is not None
if switch_pkey not in switch_name_map:
cur = conn.cursor()
cur.execute(
"""SELECT name FROM switch WHERE pkey = ?;""", (switch_pkey, )
)
(switch_name, ) = cur.fetchone()
switch_id = graph.get_switch_id(switch_name)
switch_name_map[switch_pkey] = switch_id
else:
switch_id = switch_name_map[switch_pkey]
return switch_id
def create_get_tile_name(conn):
cur = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_name(tile_pkey):
cur.execute(
"""
SELECT name FROM phy_tile WHERE pkey = ?;
""", (tile_pkey, )
)
return cur.fetchone()[0]
return get_tile_name
def create_get_pip_wire_names(conn):
cur = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_pip_wire_names(pip_pkey):
cur.execute(
"""SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey
FROM pip_in_tile WHERE pkey = ?;""", (pip_pkey, )
)
src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone()
cur.execute(
"""SELECT name FROM wire_in_tile WHERE pkey = ?;""",
(src_wire_in_tile_pkey, )
)
(src_net, ) = cur.fetchone()
cur.execute(
"""SELECT name FROM wire_in_tile WHERE pkey = ?;""",
(dest_wire_in_tile_pkey, )
)
(dest_net, ) = cur.fetchone()
return (src_net, dest_net)
return get_pip_wire_names
def get_number_graph_edges(conn, graph, node_mapping):
num_edges = len(graph.edges)
print('{} Counting edges.'.format(now()))
cur = conn.cursor()
cur.execute("SELECT count() FROM graph_edge;" "")
for src_graph_node, dest_graph_node in cur.execute("""
SELECT
src_graph_node_pkey,
dest_graph_node_pkey
FROM
graph_edge;
"""):
if src_graph_node not in node_mapping:
continue
if dest_graph_node not in node_mapping:
continue
num_edges += 1
return num_edges
def import_graph_edges(conn, graph, node_mapping):
# First yield existing edges
print('{} Importing existing edges.'.format(now()))
for edge in graph.edges:
yield (edge.src_node, edge.sink_node, edge.switch_id, None)
# Then yield edges from database.
cur = conn.cursor()
cur.execute("SELECT count() FROM graph_edge;" "")
(num_edges, ) = cur.fetchone()
get_tile_name = create_get_tile_name(conn)
get_pip_wire_names = create_get_pip_wire_names(conn)
switch_name_map = {}
print('{} Importing edges from database.'.format(now()))
with progressbar_utils.ProgressBar(max_value=num_edges) as bar:
for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey,
pip_pkey, backward) in enumerate(cur.execute("""
SELECT
src_graph_node_pkey,
dest_graph_node_pkey,
switch_pkey,
phy_tile_pkey,
pip_in_tile_pkey,
backward
FROM
graph_edge;
""")):
if src_graph_node not in node_mapping:
continue
if dest_graph_node not in node_mapping:
continue
if pip_pkey is not None:
tile_name = get_tile_name(phy_tile_pkey)
src_net, dest_net = get_pip_wire_names(pip_pkey)
if not backward:
pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net)
else:
pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net)
else:
pip_name = None
switch_id = get_switch_name(
conn, graph, switch_name_map, switch_pkey
)
src_node = node_mapping[src_graph_node]
sink_node = node_mapping[dest_graph_node]
if pip_name is not None:
feature = check_feature(pip_name)
if feature:
yield (
src_node, sink_node, switch_id,
(('fasm_features', feature), )
)
else:
yield (src_node, sink_node, switch_id, ())
else:
yield (src_node, sink_node, switch_id, ())
if idx % 1024 == 0:
bar.update(idx)
def create_channels(conn):
cur = conn.cursor()
cur.execute(
"""
SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;"""
)
chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone()
cur.execute('SELECT idx, info FROM x_list;')
x_list = []
for idx, info in cur:
x_list.append(graph2.ChannelList(idx, info))
cur.execute('SELECT idx, info FROM y_list;')
y_list = []
for idx, info in cur:
y_list.append(graph2.ChannelList(idx, info))
return graph2.Channels(
chan_width_max=chan_width_max,
x_min=x_min,
y_min=y_min,
x_max=x_max,
y_max=y_max,
x_list=x_list,
y_list=y_list,
)
def create_connection_boxes(conn, graph):
""" Assign connection box ids for all connection box types. """
cur = conn.cursor()
cur.execute(
"""
SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN (
SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN (
SELECT connection_box_wire_pkey FROM graph_node
WHERE connection_box_wire_pkey IS NOT NULL
)
);"""
)
connection_box_map = {}
for wire_in_tile_pkey, tile_type_pkey, wire_name in cur:
connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box(
reduce_connection_box(wire_name)
)
return connection_box_map
def yield_nodes(nodes):
with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar:
for idx, node in enumerate(nodes):
yield node
if idx % 1024 == 0:
bar.update(idx)
def phy_grid_dims(conn):
""" Returns physical grid dimensions. """
cur = conn.cursor()
cur.execute("SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;")
x_max = cur.fetchone()[0]
cur.execute("SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;")
y_max = cur.fetchone()[0]
return x_max + 1, y_max + 1
def find_constant_network(graph):
""" Find VCC and GND tiles and create synth_tiles input.
All arches should have these synthetic tiles, search the input rr graph
for the SYN-GND and SYN-VCC tiles.
"""
block_types = {}
for block_type in graph.block_types:
block_types[block_type.name] = block_type.id
assert 'SYN-GND' in block_types
assert 'SYN-VCC' in block_types
gnd_block_id = block_types['SYN-GND']
vcc_block_id = block_types['SYN-VCC']
gnd_loc = None
vcc_loc = None
for grid_loc in graph.grid:
if gnd_block_id == grid_loc.block_type_id:
assert gnd_loc is None
gnd_loc = (grid_loc.x, grid_loc.y)
if vcc_block_id == grid_loc.block_type_id:
assert vcc_loc is None
vcc_loc = (grid_loc.x, grid_loc.y)
assert gnd_loc is not None
assert vcc_loc is not None
synth_tiles = {
'tiles':
{
"VCC":
{
'loc':
vcc_loc,
'pins':
[
{
'wire': 'VCC',
'pad': 'VCC',
'port_type': 'VCC',
'is_clock': False,
},
],
},
"GND":
{
'loc':
gnd_loc,
'pins':
[
{
'wire': 'GND',
'pad': 'GND',
'port_type': 'GND',
'is_clock': False,
},
],
},
}
}
return synth_tiles
def create_node_remap(nodes, channels_obj):
N = 2
p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max)))
point_map = {}
for node in nodes:
x = node.loc.x_low
y = node.loc.y_low
if (x, y) not in point_map:
point_map[(x, y)] = []
point_map[(x, y)].append(node.id)
hilbert_curve = HilbertCurve(p, N)
idx = 0
id_map = {}
for h in range(hilbert_curve.max_h + 1):
coord = tuple(hilbert_curve.coordinates_from_distance(h))
if coord not in point_map:
continue
for old_id in point_map[coord]:
id_map[old_id] = idx
idx += 1
del point_map[coord]
return lambda x: id_map[x]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--db_root', required=True, help='Project X-Ray Database'
)
parser.add_argument('--part', required=True, help='FPGA part')
parser.add_argument(
'--read_rr_graph', required=True, help='Input rr_graph file'
)
parser.add_argument(
'--write_rr_graph', required=True, help='Output rr_graph file'
)
parser.add_argument(
'--write_rr_node_map',
required=True,
help='Output map of graph_node_pkey to rr inode file'
)
parser.add_argument(
'--connection_database',
help='Database of fabric connectivity',
required=True
)
parser.add_argument(
'--synth_tiles',
help='If using an ROI, synthetic tile defintion from prjxray-arch-import'
)
parser.add_argument(
'--graph_limit',
help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max',
)
parser.add_argument(
'--vpr_capnp_schema_dir',
help='Directory container VPR schema files',
)
print('{} Starting routing import'.format(now()))
args = parser.parse_args()
db = prjxray.db.Database(args.db_root, args.part)
populate_hclk_cmt_tiles(db)
synth_tiles = None
if args.synth_tiles:
use_roi = True
with open(args.synth_tiles) as f:
synth_tiles = json.load(f)
roi = Roi(
db=db,
x1=synth_tiles['info']['GRID_X_MIN'],
y1=synth_tiles['info']['GRID_Y_MIN'],
x2=synth_tiles['info']['GRID_X_MAX'],
y2=synth_tiles['info']['GRID_Y_MAX'],
)
print('{} generating routing graph for ROI.'.format(now()))
elif args.graph_limit:
use_roi = True
x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(','))
roi = Roi(
db=db,
x1=x_min,
y1=y_min,
x2=x_max,
y2=y_max,
)
else:
use_roi = False
roi = None
synth_tiles = None
capnp_graph = capnp_graph2.Graph(
rr_graph_schema_fname=os.path.join(
args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp'
),
input_file_name=args.read_rr_graph,
progressbar=progressbar_utils.progressbar,
output_file_name=args.write_rr_graph,
)
graph = capnp_graph.graph
if synth_tiles is None:
synth_tiles = find_constant_network(graph)
with sqlite3.connect("file:{}?mode=ro".format(args.connection_database),
uri=True) as conn:
populate_bufg_rebuf_map(conn)
cur = conn.cursor()
for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \
switch_type in cur.execute("""
SELECT
name,
internal_capacitance,
drive_resistance,
intrinsic_delay,
penalty_cost,
switch_type
FROM
switch;"""):
# Add back missing switchs, which were unused in arch xml, and so
# were not emitted in rrgraph XML.
#
# TODO: This can be removed once
# https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354
# is fixed.
try:
graph.get_switch_id(name)
continue
except KeyError:
capnp_graph.add_switch(
graph2.Switch(
id=None,
name=name,
type=graph2.SwitchType[switch_type.upper()],
timing=graph2.SwitchTiming(
r=drive_resistance,
c_in=0.0,
c_out=0.0,
c_internal=internal_capacitance,
t_del=intrinsic_delay,
p_cost=penalty_cost,
),
sizing=graph2.SwitchSizing(
mux_trans_size=0,
buf_size=0,
),
)
)
# Mapping of graph_node.pkey to rr node id.
node_mapping = {}
print('{} Creating connection box list'.format(now()))
connection_box_map = create_connection_boxes(conn, graph)
# Match site pins rr nodes with graph_node's in the connection_database.
print('{} Importing graph nodes'.format(now()))
import_graph_nodes(conn, graph, node_mapping, connection_box_map)
# Walk all track graph nodes and add them.
print('{} Creating tracks'.format(now()))
segment_id = graph.get_segment_id_from_name('dummy')
create_track_rr_graph(
conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id
)
# Set of (src, sink, switch_id) tuples that pip edges have been sent to
# VPR. VPR cannot handle duplicate paths with the same switch id.
print('{} Adding synthetic edges'.format(now()))
add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles)
print('{} Creating channels.'.format(now()))
channels_obj = create_channels(conn)
node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj)
x_dim, y_dim = phy_grid_dims(conn)
connection_box_obj = graph.create_connection_box_object(
x_dim=x_dim, y_dim=y_dim
)
num_edges = get_number_graph_edges(conn, graph, node_mapping)
print('{} Serializing to disk.'.format(now()))
capnp_graph.serialize_to_capnp(
channels_obj=channels_obj,
connection_box_obj=connection_box_obj,
num_nodes=len(capnp_graph.graph.nodes),
nodes_obj=yield_nodes(capnp_graph.graph.nodes),
num_edges=num_edges,
edges_obj=import_graph_edges(conn, graph, node_mapping),
node_remap=node_remap,
)
for k in node_mapping:
node_mapping[k] = node_remap(node_mapping[k])
print('{} Writing node map.'.format(now()))
with open(args.write_rr_node_map, 'wb') as f:
pickle.dump(node_mapping, f)
print('{} Done writing node map.'.format(now()))
if __name__ == '__main__':
main()
| 1.8125 | 2 |
testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py | andrewbowen19/ClusterEclipsingBinaries | 0 | 2429 | #########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = pd.DataFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = len(rec.index)
#I'd like to account for all filters here to have more accurate numbers
recCombined = recCombined.append(rec)
prsaRecCombined = prsaRecCombined.append(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for all filters
eccRec.append(prsaRec['e'].values)
pRec.append(prsaRec['p'].values)
if (filt == 'all'):
recCombined.drop_duplicates(inplace=True)
prsaRecCombined.drop_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Nall*Nmult
qhRec[filt] += qhRec0/Nall*Nmult
ehRec[filt] += ehRec0/Nall*Nmult
lphRec[filt] += lphRec0/Nall*Nmult
dhRec[filt] += dhRec0/Nall*Nmult
maghRec[filt] += maghRec0/Nall*Nmult
rhRec[filt] += rhRec0/Nall*Nmult
#for the mollweide
if (filt == 'all'):
Nrec = len(recCombined.index)
rF = Nrec/Nall
rN = Nrec/Nall*Nmult
raN = Nmult
obN = Nobs/Nall*Nmult
fiN = Nall
fioN = Nobs
firN = Nrec
NrecPrsa = len(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Nall*Nmult
NobsPrsa = NobsPrsa/Nall*Nmult
NallPrsa = NallPrsa/Nall*Nmult
recFrac.append(rF)
recN.append(rN)
rawN.append(raN)
obsN.append(obN)
fileN.append(fiN)
fileObsN.append(fioN)
fileRecN.append(firN)
allNPrsa.append(NallPrsa)
obsNPrsa.append(NobsPrsa)
recNPrsa.append(NrecPrsa)
#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatenate(eccAll)
eccObs = np.concatenate(eccObs)
eccRec = np.concatenate(eccRec)
pAll = np.concatenate(pAll)
pObs = np.concatenate(pObs)
pRec = np.concatenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with all the p/ecc values to our dataframes
# All dataframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable dataframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered dataframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')
if (doIndividualPlots):
fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.)
print("###################")
print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN)))
print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN)))
print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN)))
print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
| 2.046875 | 2 |
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py | ckamtsikis/cmssw | 13 | 2430 | <filename>CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("summary")
process.MessageLogger = cms.Service( "MessageLogger",
debugModules = cms.untracked.vstring( "*" ),
cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ),
destinations = cms.untracked.vstring( "cout" )
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi")
### 2018 Prompt
process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
process.BeamSpotRcdPrinter.startIOV = 1350646955507767
process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt"
### 2017 ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1275820035276801
#process.BeamSpotRcdPrinter.endIOV = 1316235677532161
### 2018 ABC ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1354018504835073
#process.BeamSpotRcdPrinter.endIOV = 1374668707594734
### 2018D Prompt
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
#process.BeamSpotRcdPrinter.startIOV = 1377280047710242
#process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.p = cms.Path(process.BeamSpotRcdPrinter)
| 1.453125 | 1 |
django/authentication/api/urls.py | NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project | 2 | 2431 | from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
urlpatterns = [
path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| 1.804688 | 2 |
yue/core/explorer/ftpsource.py | nsetzer/YueMusicPlayer | 0 | 2432 | <filename>yue/core/explorer/ftpsource.py
from ftplib import FTP,error_perm, all_errors
import posixpath
from io import BytesIO,SEEK_SET
from .source import DataSource
import sys
import re
reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)')
def parseFTPurl( url ):
m = reftp.match( url )
if m:
g = m.groups()
result = {
"mode" : g[0],
"username" : g[2] or "",
"password" : g[3] or "",
"hostname" : g[4] or "",
"port" : int(g[5][1:]) if g[5] else 0,
"path" : g[6] or "/",
}
if result['port'] == 0:
if result['mode'] == ssh:
result['port'] = 22
else:
result['port'] = 21 # ftp port default
return result
raise ValueError("invalid: %s"%url)
def utf8_fix(s):
return ''.join([ a if ord(a)<128 else "%02X"%ord(a) for a in s])
class FTPWriter(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPWriter, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
def write(self,data):
return self.file.write(data)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.seek(0)
text = "STOR " + utf8_fix(self.path)
self.ftp.storbinary(text, self.file)
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPReader(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPReader, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
# open the file
text = "RETR " + utf8_fix(self.path)
self.ftp.retrbinary(text, self.file.write)
self.file.seek(0)
def read(self,n=None):
return self.file.read(n)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.close()
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPSource(DataSource):
"""
there is some sort of problem with utf-8/latin-1 and ftplib
storbinary must accepts a STRING, since it builds a cmd and add
the CRLF to the input argument using the plus operator.
the command fails when given unicode text (ord > 127) and also
fails whenm given a byte string.
"""
# TODO: turn this into a directory generator
# which first loads the directory, then loops over
# loaded items.
# TODO: on windows we need a way to view available
# drive letters
def __init__(self, host, port, username="", password=""):
super(FTPSource, self).__init__()
self.ftp = FTP()
self.ftp.connect(host,port)
self.ftp.login(username,password)
self.hostname = "%s:%d"%(host,port)
def root(self):
return "/"
def close(self):
try:
self.ftp.quit()
except all_errors as e:
sys.stderr.write("Error Closing FTP connection\n")
sys.stderr.write("%s\n"%e)
super().close()
def fix(self, path):
return utf8_fix(path)
def join(self,*args):
return posixpath.join(*args)
def breakpath(self,path):
return [ x for x in path.replace("/","\\").split("\\") if x ]
def relpath(self,path,base):
return posixpath.relpath(path,base)
def normpath(self,path,root=None):
if root and not path.startswith("/"):
path = posixpath.join(root,path)
return posixpath.normpath( path )
def listdir(self,path):
return self.ftp.nlst(path)
def parent(self,path):
# TODO: if path is C:\\ return empty string ?
# empty string returns drives
p,_ = posixpath.split(path)
return p
def move(self,oldpath,newpath):
self.ftp.rename(oldpath,newpath)
def delete(self,path):
# todo support removing directory rmdir()
path = utf8_fix(path)
if self.exists( path ):
if self.isdir(path):
try:
self.ftp.rmd(path)
except Exception as e:
print("ftp delete error: %s"%e)
else:
try:
self.ftp.delete(path)
except Exception as e:
print("ftp delete error: %s"%e)
def open(self,path,mode):
if mode=="wb":
return FTPWriter(self.ftp,path)
elif mode=="rb":
return FTPReader(self.ftp,path)
raise NotImplementedError(mode)
def exists(self,path):
path = utf8_fix(path)
p,n=posixpath.split(path)
lst = set(self.listdir(p))
return n in lst
def isdir(self,path):
path = utf8_fix(path)
try:
return self.ftp.size(path) is None
except error_perm:
# TODO: to think about more later,
# under my use-case, I'm only asking if a path is a directory
# if I Already think it exists. Under the current FTP impl
# ftp.size() fails for various reasons unless the file exists
# and is an accessable file. I can infer that a failure to
# determine the size means that the path is a directory,
# but this does not hold true under other use cases.
# I can't cache listdir calls, but if I could, then I could
# use that to determine if the file exists
return True#self.exists( path )
def mkdir(self,path):
# this is a really ugly quick and dirty solution
path = utf8_fix(path)
if not self.exists(path):
p = self.parent( path )
try:
if not self.exists(p):
self.ftp.mkd( p )
self.ftp.mkd(path)
except Exception as e:
print("ftp mkd error: %s"%e)
def split(self,path):
return posixpath.split(path)
def splitext(self,path):
return posixpath.splitext(path)
def stat(self,path):
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"isDir" : size is None,
"isLink": False,
"mtime" : 0,
"ctime" : 0,
"size" : size or 0,
"name" : self.split(path)[1],
"mode" : 0
}
return result
def stat_fast(self,path):
# not fast for thus file system :(
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"name" : self.split(path)[1],
"size" : size or 0,
"isDir" : size is None,
"isLink" : False,
}
return result
def chmod(self,path,mode):
print("chmod not implemented")
def getExportPath(self,path):
return self.hostname+path
| 2.578125 | 3 |
tests/engine/knowledge_base.py | roshanmaskey/plaso | 1,253 | 2433 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
class KnowledgeBaseTest(shared_test_lib.BaseTestCase):
"""Tests for the knowledge base."""
# pylint: disable=protected-access
_MACOS_PATHS = [
'/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions',
('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/'
'apdfllckaahabafndbhieahigkjlhalf'),
'/private/var/log/system.log',
'/Users/frank/Library/Application Data/Google/Chrome/Default',
'/Users/hans/Library/Application Data/Google/Chrome/Default',
('/Users/frank/Library/Application Data/Google/Chrome/Default/'
'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'),
'/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions']
_MACOS_USERS = [
{'name': 'root', 'path': '/var/root', 'sid': '0'},
{'name': 'frank', 'path': '/Users/frank', 'sid': '4052'},
{'name': 'hans', 'path': '/Users/hans', 'sid': '4352'},
{'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}]
_WINDOWS_PATHS = [
'C:\\Users\\Dude\\SomeFolder\\Chrome\\Default\\Extensions',
('C:\\Users\\Dude\\SomeNoneStandardFolder\\Chrome\\Default\\Extensions\\'
'hmjkmjkepdijhoojdojkdfohbdgmmhki'),
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'blpcfgokakmgnkcojhhkbfbldkacnbeo'),
'C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions',
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'icppfcnhkcmnfdhfhphakoifcfokfdhg'),
'C:\\Windows\\System32',
'C:\\Stuff/with path separator\\Folder']
_WINDOWS_USERS = [
{'name': 'dude', 'path': 'C:\\Users\\dude', 'sid': 'S-1'},
{'name': 'frank', 'path': 'C:\\Users\\frank', 'sid': 'S-2'}]
def _SetUserAccounts(self, knowledge_base_object, users):
"""Sets the user accounts in the knowledge base.
Args:
knowledge_base_object (KnowledgeBase): knowledge base.
users (list[dict[str,str])): users.
"""
for user in users:
identifier = user.get('sid', user.get('uid', None))
if not identifier:
continue
user_account = artifacts.UserAccountArtifact(
identifier=identifier, user_directory=user.get('path', None),
username=user.get('name', None))
knowledge_base_object.AddUserAccount(user_account)
def testCodepageProperty(self):
"""Tests the codepage property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.codepage, 'cp1252')
def testHostnameProperty(self):
"""Tests the hostname property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.hostname, '')
def testOperatingSystemProperty(self):
"""Tests the operating_system property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertIsNone(operating_system)
knowledge_base_object.SetValue('operating_system', 'Windows')
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertEqual(operating_system, 'Windows')
def testTimezoneProperty(self):
"""Tests the timezone property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')
def testUserAccountsProperty(self):
"""Tests the user accounts property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(len(knowledge_base_object.user_accounts), 0)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertEqual(len(knowledge_base_object.user_accounts), 1)
def testYearProperty(self):
"""Tests the year property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.year, 0)
def testAddUserAccount(self):
"""Tests the AddUserAccount function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
with self.assertRaises(KeyError):
knowledge_base_object.AddUserAccount(user_account)
def testAddEnvironmentVariable(self):
"""Tests the AddEnvironmentVariable function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
with self.assertRaises(KeyError):
knowledge_base_object.AddEnvironmentVariable(environment_variable)
def testGetEnvironmentVariable(self):
"""Tests the GetEnvironmentVariable functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'SystemRoot')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'sYsTeMrOoT')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'Bogus')
self.assertIsNone(test_environment_variable)
def testGetEnvironmentVariables(self):
"""Tests the GetEnvironmentVariables function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='WinDir', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variables = knowledge_base_object.GetEnvironmentVariables()
self.assertEqual(len(environment_variables), 2)
def testGetHostname(self):
"""Tests the GetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, '')
# TODO: add tests for GetMountPoint.
def testGetSourceConfigurationArtifacts(self):
"""Tests the GetSourceConfigurationArtifacts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
source_configurations = (
knowledge_base_object.GetSourceConfigurationArtifacts())
self.assertEqual(len(source_configurations), 1)
self.assertIsNotNone(source_configurations[0])
system_configuration = source_configurations[0].system_configuration
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
def testGetSystemConfigurationArtifact(self):
"""Tests the _GetSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
system_configuration = (
knowledge_base_object._GetSystemConfigurationArtifact())
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
# TODO: add tests for GetTextPrepend.
def testGetUsernameByIdentifier(self):
"""Tests the GetUsernameByIdentifier function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
usename = knowledge_base_object.GetUsernameByIdentifier('1000')
self.assertEqual(usename, 'testuser')
usename = knowledge_base_object.GetUsernameByIdentifier(1000)
self.assertEqual(usename, '')
usename = knowledge_base_object.GetUsernameByIdentifier('1001')
self.assertEqual(usename, '')
def testGetUsernameForPath(self):
"""Tests the GetUsernameForPath function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[4])
self.assertEqual(username, 'hans')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertIsNone(username)
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[2])
self.assertEqual(username, 'frank')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[2])
self.assertIsNone(username)
def testGetSetValue(self):
"""Tests the Get and SetValue functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
expected_value = 'test value'
knowledge_base_object.SetValue('Test', expected_value)
value = knowledge_base_object.GetValue('Test')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('tEsT')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('Bogus')
self.assertIsNone(value)
def testHasUserAccounts(self):
"""Tests the HasUserAccounts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertFalse(knowledge_base_object.HasUserAccounts())
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertTrue(knowledge_base_object.HasUserAccounts())
def testReadSystemConfigurationArtifact(self):
"""Tests the ReadSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.hostname = artifacts.HostnameArtifact(
name='myhost.mydomain')
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
system_configuration.user_accounts.append(user_account)
knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, 'myhost.mydomain')
def testSetActiveSession(self):
"""Tests the SetActiveSession function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')
self.assertEqual(
knowledge_base_object._active_session,
'ddda05bedf324cbd99fa8c24b8a0037a')
knowledge_base_object.SetActiveSession(
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
self.assertEqual(
knowledge_base_object._active_session,
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
def testSetCodepage(self):
"""Tests the SetCodepage function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetCodepage('cp1252')
with self.assertRaises(ValueError):
knowledge_base_object.SetCodepage('bogus')
def testSetHostname(self):
"""Tests the SetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
# TODO: add tests for SetMountPoint.
# TODO: add tests for SetTextPrepend.
def testSetTimeZone(self):
"""Tests the SetTimeZone function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
time_zone_artifact = artifacts.TimeZoneArtifact(
localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112',
name='Eastern Standard Time')
knowledge_base_object.AddAvailableTimeZone(time_zone_artifact)
# Set an IANA time zone name.
knowledge_base_object.SetTimeZone('Europe/Zurich')
self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')
# Set a Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern Standard Time')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a localized Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern (standaardtijd)')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a MUI form Windows time zone name.
knowledge_base_object.SetTimeZone('@tzres.dll,-112')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
with self.assertRaises(ValueError):
knowledge_base_object.SetTimeZone('Bogus')
if __name__ == '__main__':
unittest.main()
| 1.921875 | 2 |
Problems/Dynamic Programming/140. Word Break II.py | BYJRK/LeetCode-Solutions | 0 | 2434 | <filename>Problems/Dynamic Programming/140. Word Break II.py
# https://leetcode.com/problems/word-break-ii/
from typing import List
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
# 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出
set1 = set(s)
set2 = set(''.join(wordDict))
if not set1.issubset(set2):
return []
# dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割
# 如果是 [[]] 则表示开头
# 如果是 [None],则表示还没有访问到,或没有办法进行分割
# 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串
dp = [None] * (len(s) + 1)
dp[0] = [[]]
for i in range(len(s) + 1):
# 如果当前子字符串无法分割,则跳过
if dp[i] is None:
continue
tmp = s[i:]
for w in wordDict:
idx = len(w) + i
if idx > len(s):
continue
if tmp.startswith(w):
if dp[idx] is None:
dp[idx] = []
# 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词
for dic in dp[i]:
dp[idx].append(dic + [w])
if dp[-1] is None:
return []
return [' '.join(res) for res in dp[-1]]
def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]:
def dfs(s: str, memo={}):
if s in memo:
return memo[s]
if len(s) == 0:
return [[]]
res = []
for w in wordDict:
if s.startswith(w):
tmp = s[len(w):]
combos = dfs(tmp, memo)
for combo in combos:
res.append([w] + combo)
memo[s] = res
return res
return dfs(s)
s = Solution()
print(s.wordBreak_dfs('catsanddog', ["cat", "cats", "and", "sand", "dog"]))
print(s.wordBreak_dfs('pineapplepenapple', [
"apple", "pen", "applepen", "pine", "pineapple"]))
# text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# words = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa",
# "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"]
# print(s.wordBreak(text, words))
| 3.640625 | 4 |
neutron/tests/unit/db/test_migration.py | banhr/neutron | 1 | 2435 | <filename>neutron/tests/unit/db/test_migration.py
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import sys
import textwrap
from alembic.autogenerate import api as alembic_ag_api
from alembic import config as alembic_config
from alembic.operations import ops as alembic_ops
from alembic import script as alembic_script
import fixtures
import mock
from neutron_lib.utils import helpers
from oslo_utils import fileutils
import pkg_resources
import sqlalchemy as sa
from testtools import matchers
from neutron.conf.db import migration_cli
from neutron.db import migration
from neutron.db.migration import autogen
from neutron.db.migration import cli
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit import testlib_api
class FakeConfig(object):
service = ''
class FakeRevision(object):
path = 'fakepath'
def __init__(self, labels=None, down_revision=None, is_branch_point=False):
if not labels:
labels = set()
self.branch_labels = labels
self.down_revision = down_revision
self.is_branch_point = is_branch_point
self.revision = helpers.get_random_string(10)
self.module = mock.MagicMock()
class MigrationEntrypointsMemento(fixtures.Fixture):
'''Create a copy of the migration entrypoints map so it can be restored
during test cleanup.
'''
def _setUp(self):
self.ep_backup = {}
for proj, ep in migration_cli.migration_entrypoints.items():
self.ep_backup[proj] = copy.copy(ep)
self.addCleanup(self.restore)
def restore(self):
migration_cli.migration_entrypoints = self.ep_backup
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.mock_alembic_err = mock.patch('alembic.util.err').start()
self.mock_alembic_warn = mock.patch('alembic.util.warn').start()
self.mock_alembic_err.side_effect = SystemExit
def mocked_root_dir(cfg):
return os.path.join('/fake/dir', cli._get_project_base(cfg))
mock_root = mock.patch.object(cli, '_get_package_root_dir').start()
mock_root.side_effect = mocked_root_dir
# Avoid creating fake directories
mock.patch('oslo_utils.fileutils.ensure_tree').start()
# Set up some configs and entrypoints for tests to chew on
self.configs = []
self.projects = ('neutron', 'networking-foo', 'neutron-fwaas')
ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini')
self.useFixture(MigrationEntrypointsMemento())
migration_cli.migration_entrypoints = {}
for project in self.projects:
config = alembic_config.Config(ini)
config.set_main_option('neutron_project', project)
module_name = project.replace('-', '_') + '.db.migration'
attrs = ('alembic_migrations',)
script_location = ':'.join([module_name, attrs[0]])
config.set_main_option('script_location', script_location)
self.configs.append(config)
entrypoint = pkg_resources.EntryPoint(project,
module_name,
attrs=attrs)
migration_cli.migration_entrypoints[project] = entrypoint
def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]):
with mock.patch.object(sys, 'argv', argv),\
mock.patch.object(cli, 'run_sanity_checks'),\
mock.patch.object(cli, 'validate_revisions'):
cli.main()
def _append_version_path(args):
args = copy.copy(args)
if 'autogenerate' in args and not args['autogenerate']:
args['version_path'] = mock.ANY
return args
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, **_append_version_path(kwargs))
for kwargs in exp_kwargs]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
[{'revision': 'foo', 'sql': False}]
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
[{'revision': 'foo', 'sql': True}]
)
def _validate_cmd(self, cmd):
self._main_test_helper(
['prog', cmd],
cmd,
[{'verbose': False}])
self._main_test_helper(
['prog', cmd, '--verbose'],
cmd,
[{'verbose': True}])
def test_branches(self):
self._validate_cmd('branches')
def test_current(self):
self._validate_cmd('current')
def test_history(self):
self._validate_cmd('history')
def test_heads(self):
self._validate_cmd('heads')
def test_check_migration(self):
with mock.patch.object(cli, 'validate_head_files') as validate:
self._main_test_helper(['prog', 'check_migration'], 'branches')
self.assertEqual(len(self.projects), validate.call_count)
def _test_database_sync_revision(self, separate_branches=True):
with mock.patch.object(cli, 'update_head_files') as update:
if separate_branches:
mock.patch('os.path.exists').start()
expected_kwargs = [{
'message': 'message', 'sql': False, 'autogenerate': True,
}]
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': True,
'autogenerate': False,
'head': cli._get_branch_head(branch)
} for branch in cli.MIGRATION_BRANCHES]
for kwarg in expected_kwargs:
kwarg['autogenerate'] = False
kwarg['sql'] = True
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': False,
'autogenerate': False,
'head': 'expand@head'
}]
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--expand'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
for kwarg in expected_kwargs:
kwarg['head'] = 'contract@head'
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--contract'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
def test_database_sync_revision(self):
self._test_database_sync_revision()
def test_database_sync_revision_no_branches(self):
# Test that old branchless approach is still supported
self._test_database_sync_revision(separate_branches=False)
def test_upgrade_revision(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
[{'desc': None, 'revision': 'heads', 'sql': True}]
)
def test_upgrade_delta(self):
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': '+3', 'sql': False}]
)
def test_upgrade_revision_delta(self):
self._main_test_helper(
['prog', 'upgrade', 'kilo', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': 'kilo+3', 'sql': False}]
)
def test_upgrade_expand(self):
self._main_test_helper(
['prog', 'upgrade', '--expand'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': 'expand@head',
'sql': False}]
)
def test_upgrade_expand_contract_are_mutually_exclusive(self):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--expand --contract'], 'upgrade')
def _test_upgrade_conflicts_with_revision(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s revision1' % mode], 'upgrade')
def _test_upgrade_conflicts_with_delta(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s +3' % mode], 'upgrade')
def _test_revision_autogenerate_conflicts_with_branch(self, branch):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '--%s' % branch],
'revision')
def test_revision_autogenerate_conflicts_with_expand(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.EXPAND_BRANCH)
def test_revision_autogenerate_conflicts_with_contract(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.CONTRACT_BRANCH)
def test_upgrade_expand_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('expand')
def test_upgrade_contract_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('contract')
def test_upgrade_expand_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('expand')
def test_upgrade_contract_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('contract')
def test_upgrade_contract(self):
self._main_test_helper(
['prog', 'upgrade', '--contract'],
'upgrade',
[{'desc': cli.CONTRACT_BRANCH,
'revision': 'contract@head',
'sql': False}]
)
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_upgrade_milestone_expand_before_contract(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
self._main_test_helper(
['prog', '--subproject', 'neutron', 'upgrade', 'liberty'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': e_revs[3].revision,
'sql': False},
{'desc': cli.CONTRACT_BRANCH,
'revision': c_revs[1].revision,
'sql': False}]
)
def assert_command_fails(self, command):
# Avoid cluttering stdout with argparse error messages
mock.patch('argparse.ArgumentParser._print_message').start()
with mock.patch.object(sys, 'argv', command), mock.patch.object(
cli, 'run_sanity_checks'):
self.assertRaises(SystemExit, cli.main)
def test_downgrade_fails(self):
self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno'])
def test_upgrade_negative_relative_revision_fails(self):
self.assert_command_fails(['prog', 'upgrade', '-2'])
def test_upgrade_negative_delta_fails(self):
self.assert_command_fails(['prog', 'upgrade', '--delta', '-2'])
def test_upgrade_rejects_delta_with_relative_revision(self):
self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3'])
def _test_validate_head_files_helper(self, heads, contract_head='',
expand_head=''):
fake_config = self.configs[0]
head_files_not_exist = (contract_head == expand_head == '')
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\
mock.patch('os.path.exists') as os_mock:
if head_files_not_exist:
os_mock.return_value = False
else:
os_mock.return_value = True
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
fake_config), contract_head + '\n')).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
fake_config), expand_head + '\n')).mock_open
if contract_head in heads and expand_head in heads:
cli.validate_head_files(fake_config)
elif head_files_not_exist:
cli.validate_head_files(fake_config)
self.assertTrue(self.mock_alembic_warn.called)
else:
self.assertRaises(
SystemExit,
cli.validate_head_files,
fake_config
)
self.assertTrue(self.mock_alembic_err.called)
if contract_head in heads and expand_head in heads:
mock_open_ex.assert_called_with(
cli._get_expand_head_file_path(fake_config))
mock_open_con.assert_called_with(
cli._get_contract_head_file_path(fake_config))
if not head_files_not_exist:
fc.assert_called_once_with(fake_config)
def test_validate_head_files_success(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='a',
expand_head='b')
def test_validate_head_files_missing_file(self):
self._test_validate_head_files_helper(['a', 'b'])
def test_validate_head_files_wrong_contents(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='c',
expand_head='d')
@mock.patch.object(fileutils, 'delete_if_exists')
def test_update_head_files_success(self, *mocks):
heads = ['a', 'b']
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
self.configs[0]))).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
self.configs[0]))).mock_open
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
cli.update_head_files(self.configs[0])
mock_open_con.return_value.write.assert_called_with(
heads[0] + '\n')
mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n')
old_head_file = cli._get_head_file_path(
self.configs[0])
old_heads_file = cli._get_heads_file_path(
self.configs[0])
delete_if_exists = mocks[0]
self.assertIn(mock.call(old_head_file),
delete_if_exists.call_args_list)
self.assertIn(mock.call(old_heads_file),
delete_if_exists.call_args_list)
def test_get_project_base(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
proj_base = cli._get_project_base(config)
self.assertEqual('a', proj_base)
def test_get_root_versions_dir(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
versions_dir = cli._get_root_versions_dir(config)
self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir)
def test_get_subproject_script_location(self):
foo_ep = cli._get_subproject_script_location('networking-foo')
expected = 'networking_foo.db.migration:alembic_migrations'
self.assertEqual(expected, foo_ep)
def test_get_subproject_script_location_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_script_location, 'not-installed')
def test_get_subproject_base_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_base, 'not-installed')
def test__compare_labels_ok(self):
labels = {'label1', 'label2'}
fake_revision = FakeRevision(labels)
cli._compare_labels(fake_revision, {'label1', 'label2'})
def test__compare_labels_fail_unexpected_labels(self):
labels = {'label1', 'label2', 'label3'}
fake_revision = FakeRevision(labels)
self.assertRaises(
SystemExit,
cli._compare_labels, fake_revision, {'label1', 'label2'})
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branchless_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(script_dir, fake_revision,
label=None)
expected_labels = set()
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branches_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(
script_dir, fake_revision, label='fakebranch')
expected_labels = {'fakebranch'}
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branches(self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
branch = cli.MIGRATION_BRANCHES[0]
fake_revision.path = os.path.join('/fake/path', branch)
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(
script_dir, fake_revision, label=branch)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branchless_migrations(
self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(script_dir, fake_revision)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_walks_thru_all_revisions(
self, walk_mock, validate_mock):
revisions = [FakeRevision() for i in range(10)]
walk_mock.return_value = revisions
cli.validate_revisions(self.configs[0])
validate_mock.assert_has_calls(
[mock.call(mock.ANY, revision) for revision in revisions]
)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_fails_on_multiple_branch_points(
self, walk_mock, validate_mock):
revisions = [FakeRevision(is_branch_point=True) for i in range(2)]
walk_mock.return_value = revisions
self.assertRaises(
SystemExit, cli.validate_revisions, self.configs[0])
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__get_branch_points(self, walk_mock):
revisions = [FakeRevision(is_branch_point=tools.get_random_boolean)
for i in range(50)]
walk_mock.return_value = revisions
script_dir = alembic_script.ScriptDirectory.from_config(
self.configs[0])
self.assertEqual(set(rev for rev in revisions if rev.is_branch_point),
set(cli._get_branch_points(script_dir)))
@mock.patch.object(cli, '_get_version_branch_path')
def test_autogen_process_directives(self, get_version_branch_path):
get_version_branch_path.side_effect = lambda cfg, release, branch: (
"/foo/expand" if branch == 'expand' else "/foo/contract")
migration_script = alembic_ops.MigrationScript(
'eced083f5df',
# these directives will be split into separate
# expand/contract scripts
alembic_ops.UpgradeOps(
ops=[
alembic_ops.CreateTableOp(
'organization',
[
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('name', sa.String(50), nullable=False)
]
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.AddColumnOp(
'user',
sa.Column('organization_id', sa.Integer())
),
alembic_ops.CreateForeignKeyOp(
'org_fk', 'user', 'organization',
['organization_id'], ['id']
),
alembic_ops.DropConstraintOp(
'user', 'uq_user_org'
),
alembic_ops.DropColumnOp(
'user', 'organization_name'
)
]
)
]
),
# these will be discarded
alembic_ops.DowngradeOps(
ops=[
alembic_ops.AddColumnOp(
'user', sa.Column(
'organization_name', sa.String(50), nullable=True)
),
alembic_ops.CreateUniqueConstraintOp(
'uq_user_org', 'user',
['user_name', 'organization_name']
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.DropConstraintOp('org_fk', 'user'),
alembic_ops.DropColumnOp('user', 'organization_id')
]
),
alembic_ops.DropTableOp('organization')
]
),
message='create the organization table and '
'replace user.organization_name'
)
directives = [migration_script]
autogen.process_revision_directives(
mock.Mock(), mock.Mock(), directives
)
expand = directives[0]
contract = directives[1]
self.assertEqual("/foo/expand", expand.version_path)
self.assertEqual("/foo/contract", contract.version_path)
self.assertTrue(expand.downgrade_ops.is_empty())
self.assertTrue(contract.downgrade_ops.is_empty())
def _get_regex(s):
s = textwrap.dedent(s)
s = re.escape(s)
# alembic 0.8.9 added additional leading '# ' before comments
return s.replace('\\#\\#\\#\\ ', '(# )?### ')
expected_regex = ("""\
### commands auto generated by Alembic - please adjust! ###
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('user', """
"""sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key('org_fk', 'user', """
"""'organization', ['organization_id'], ['id'])
### end Alembic commands ###""")
self.assertThat(
alembic_ag_api.render_python_code(expand.upgrade_ops),
matchers.MatchesRegex(_get_regex(expected_regex)))
expected_regex = ("""\
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user', 'uq_user_org', type_=None)
op.drop_column('user', 'organization_name')
### end Alembic commands ###""")
self.assertThat(
alembic_ag_api.render_python_code(contract.upgrade_ops),
matchers.MatchesRegex(_get_regex(expected_regex)))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_one_branch(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.CONTRACT_BRANCH)
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.EXPAND_BRANCH)
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_two_branches(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(2, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_branchless(self, walk_mock):
revisions = [FakeRevision() for r in range(5)]
revisions[2].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = revisions
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
class TestSafetyChecks(base.BaseTestCase):
def test_validate_revisions(self, *mocks):
cli.validate_revisions(cli.get_neutron_config())
| 1.789063 | 2 |
withdrawal/floor_ceiling.py | hoostus/prime-harvesting | 23 | 2436 | <filename>withdrawal/floor_ceiling.py<gh_stars>10-100
from decimal import Decimal
from .abc import WithdrawalStrategy
# Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money
class FloorCeiling(WithdrawalStrategy):
def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25):
super().__init__(portfolio, harvest_strategy)
self.floor = Decimal(floor)
self.ceiling = Decimal(ceiling)
self.rate = Decimal(rate)
def start(self):
amount = self.rate * self.portfolio.value
self.initial_amount = amount
return amount
def next(self):
amount = self.rate * self.portfolio.value
initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation
floor = initial_amount_inflation_adjusted * self.floor
ceiling = initial_amount_inflation_adjusted * self.ceiling
amount = max(amount, floor)
amount = min(amount, ceiling)
return amount
| 2.9375 | 3 |
20190426/6_BME280_WiFi/bme280.py | rcolistete/MicroPython_MiniCurso_ProjOrientado | 0 | 2437 | <reponame>rcolistete/MicroPython_MiniCurso_ProjOrientado
"""
MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C sensor:
https://www.bosch-sensortec.com/bst/products/all_products/bme280
Authors: <NAME>, <NAME>
Version: 3.1.2 @ 2018/04
License: MIT License (https://opensource.org/licenses/MIT)
"""
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address
BME280_I2CADDR = 0x76
# BME280_I2CADDR = 0x77
OSAMPLE_0 = 0
OSAMPLE_1 = 1
OSAMPLE_2 = 2
OSAMPLE_4 = 3
OSAMPLE_8 = 4
OSAMPLE_16 = 5
BME280_REGISTER_STATUS = 0xF3
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
BME280_REGISTER_CONTROL_IIR = 0xF5
FILTER_OFF = 0
FILTER_2 = 1
FILTER_4 = 2
FILTER_8 = 3
FILTER_16 = 4
CELSIUS = 'C'
FAHRENHEIT = 'F'
KELVIN = 'K'
class BME280(object):
def __init__(self,
temperature_mode=OSAMPLE_2,
pressure_mode=OSAMPLE_16,
humidity_mode=OSAMPLE_1,
temperature_scale=CELSIUS,
iir=FILTER_16,
address=BME280_I2CADDR,
i2c=None):
osamples = [
OSAMPLE_0,
OSAMPLE_1,
OSAMPLE_2,
OSAMPLE_4,
OSAMPLE_8,
OSAMPLE_16]
msg_error = 'Unexpected {} operating mode value {0}.'
if temperature_mode not in osamples:
raise ValueError(msg_error.format("temperature", temperature_mode))
self.temperature_mode = temperature_mode
if pressure_mode not in osamples:
raise ValueError(msg_error.format("pressure", pressure_mode))
self.pressure_mode = pressure_mode
if humidity_mode not in osamples:
raise ValueError(msg_error.format("humidity", humidity_mode))
self.humidity_mode = humidity_mode
msg_error = 'Unexpected low pass IIR filter setting value {0}.'
if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]:
raise ValueError(msg_error.format(iir))
self.iir = iir
msg_error = 'Unexpected temperature scale value {0}.'
if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]:
raise ValueError(msg_error.format(temperature_scale))
self.temperature_scale = temperature_scale
del msg_error
self.address = address
if i2c is None:
raise ValueError('An I2C object is required.')
self.i2c = i2c
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \
self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \
self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \
_, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3 = unpack("<hB", dig_e1_e7)
e4_sign = unpack_from("<b", dig_e1_e7, 3)[0]
self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF)
e6_sign = unpack_from("<b", dig_e1_e7, 5)[0]
self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4)
self.dig_H6 = unpack_from("<b", dig_e1_e7, 6)[0]
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL,
bytearray([0x24]))
time.sleep(0.002)
self.t_fine = 0
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
self._l1_barray[0] = self.iir << 2
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL_IIR,
self._l1_barray)
time.sleep(0.002)
self._l1_barray[0] = self.humidity_mode
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL_HUM,
self._l1_barray)
def read_raw_data(self, result):
self._l1_barray[0] = (
self.pressure_mode << 5 |
self.temperature_mode << 2 | 1)
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL,
self._l1_barray)
osamples_1_16 = [
OSAMPLE_1,
OSAMPLE_2,
OSAMPLE_4,
OSAMPLE_8,
OSAMPLE_16]
sleep_time = 1250
if self.temperature_mode in osamples_1_16:
sleep_time += 2300*(1 << self.temperature_mode)
if self.pressure_mode in osamples_1_16:
sleep_time += 575 + (2300*(1 << self.pressure_mode))
if self.humidity_mode in osamples_1_16:
sleep_time += 575 + (2300*(1 << self.humidity_mode))
time.sleep_us(sleep_time)
while (unpack('<H',
self.i2c.readfrom_mem(
self.address,
BME280_REGISTER_STATUS, 2))[0] & 0x08):
time.sleep(0.001)
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
""" Get raw data and compensa the same """
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)
var2 = (raw_temp >> 4) - self.dig_T1
var2 = var2 * ((raw_temp >> 4) - self.dig_T1)
var2 = ((var2 >> 12) * self.dig_T3) >> 14
self.t_fine = var1 + var2
temp = (self.t_fine * 5 + 128) >> 8
var1 = self.t_fine - 128000
var2 = var1 * var1 * self.dig_P6
var2 = var2 + ((var1 * self.dig_P5) << 17)
var2 = var2 + (self.dig_P4 << 35)
var1 = (((var1 * var1 * self.dig_P3) >> 8) +
((var1 * self.dig_P2) << 12))
var1 = (((1 << 47) + var1) * self.dig_P1) >> 33
if var1 == 0:
pressure = 0
else:
p = 1048576 - raw_press
p = (((p << 31) - var2) * 3125) // var1
var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25
var2 = (self.dig_P8 * p) >> 19
pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)
h = self.t_fine - 76800
h = (((((raw_hum << 14) - (self.dig_H4 << 20) -
(self.dig_H5 * h)) + 16384)
>> 15) * (((((((h * self.dig_H6) >> 10) *
(((h * self.dig_H3) >> 11) + 32768)) >> 10) +
2097152) * self.dig_H2 + 8192) >> 14))
h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4)
h = 0 if h < 0 else h
h = 419430400 if h > 419430400 else h
humidity = h >> 12
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("i", (temp, pressure, humidity))
@property
def values(self):
temp, pres, humi = self.read_compensated_data()
temp = temp/100
if self.temperature_scale == 'F':
temp = 32 + (temp*1.8)
elif self.temperature_scale == 'K':
temp = temp + 273.15
pres = pres/256
humi = humi/1024
return (temp, pres, humi)
@property
def formated_values(self):
t, p, h = self.values
temp = "{} "+self.temperature_scale
return (temp.format(t), "{} Pa".format(p), "{} %".format(h))
@property
def temperature(self):
t, _, _ = self.values
return t
@property
def pressure(self):
_, p, _ = self.values
return p
@property
def pressure_precision(self):
_, p, _ = self.read_compensated_data()
pi = float(p // 256)
pd = (p % 256)/256
return (pi, pd)
@property
def humidity(self):
_, _, h = self.values
return h
def altitude(self, pressure_sea_level=1013.25):
pi, pd = self.pressure_precision()
return 44330*(1-((float(pi+pd)/100)/pressure_sea_level)**(1/5.255))
| 2.65625 | 3 |
airflow/contrib/secrets/hashicorp_vault.py | colpal/airfloss | 0 | 2438 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Objects relating to sourcing connections & variables from Hashicorp Vault
"""
from typing import Optional
import hvac
from cached_property import cached_property
from hvac.exceptions import InvalidPath, VaultError
from airflow.exceptions import AirflowException
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
class VaultBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connections and Variables from Hashicorp Vault
Configurable via ``airflow.cfg`` as follows:
.. code-block:: ini
[secrets]
backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend
backend_kwargs = {
"connections_path": "connections",
"url": "http://127.0.0.1:8200",
"mount_point": "airflow"
}
For example, if your keys are under ``connections`` path in ``airflow`` mount_point, this
would be accessible if you provide ``{"connections_path": "connections"}`` and request
conn_id ``smtp_default``.
:param connections_path: Specifies the path of the secret to read to get Connections.
(default: 'connections')
:type connections_path: str
:param variables_path: Specifies the path of the secret to read to get Variables.
(default: 'variables')
:type variables_path: str
:param config_path: Specifies the path of the secret to read Airflow Configurations
(default: 'configs').
:type config_path: str
:param url: Base URL for the Vault instance being addressed.
:type url: str
:param auth_type: Authentication Type for Vault (one of 'token', 'ldap', 'userpass', 'approle',
'github', 'gcp', 'kubernetes'). Default is ``token``.
:type auth_type: str
:param mount_point: The "path" the secret engine was mounted on. (Default: ``secret``)
:type mount_point: str
:param token: Authentication token to include in requests sent to Vault.
(for ``token`` and ``github`` auth_type)
:type token: str
:param kv_engine_version: Select the version of the engine to run (``1`` or ``2``, default: ``2``)
:type kv_engine_version: int
:param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type)
:type username: str
:param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type)
:type password: str
:param role_id: Role ID for Authentication (for ``approle`` auth_type)
:type role_id: str
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type)
:type kubernetes_role: str
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, deafult:
``/var/run/secrets/kubernetes.io/serviceaccount/token``)
:type kubernetes_jwt_path: str
:param secret_id: Secret ID for Authentication (for ``approle`` auth_type)
:type secret_id: str
:param gcp_key_path: Path to GCP Credential JSON file (for ``gcp`` auth_type)
:type gcp_key_path: str
:param gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp`` auth_type)
:type gcp_scopes: str
"""
def __init__( # pylint: disable=too-many-arguments
self,
connections_path='connections', # type: str
variables_path='variables', # type: str
config_path='config', # type: str
url=None, # type: Optional[str]
auth_type='token', # type: str
mount_point='secret', # type: str
kv_engine_version=2, # type: int
token=None, # type: Optional[str]
username=None, # type: Optional[str]
password=<PASSWORD>, # type: Optional[str]
role_id=None, # type: Optional[str]
kubernetes_role=None, # type: Optional[str]
kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str
secret_id=None, # type: Optional[str]
gcp_key_path=None, # type: Optional[str]
gcp_scopes=None, # type: Optional[str]
**kwargs
):
super(VaultBackend, self).__init__()
self.connections_path = connections_path.rstrip('/')
if variables_path != None:
self.variables_path = variables_path.rstrip('/')
else:
self.variables_path = variables_path
self.config_path = config_path.rstrip('/')
self.url = url
self.auth_type = auth_type
self.kwargs = kwargs
self.token = token
self.username = username
self.password = password
self.role_id = role_id
self.kubernetes_role = kubernetes_role
self.kubernetes_jwt_path = kubernetes_jwt_path
self.secret_id = secret_id
self.mount_point = mount_point
self.kv_engine_version = kv_engine_version
self.gcp_key_path = gcp_key_path
self.gcp_scopes = gcp_scopes
@cached_property
def client(self):
# type: () -> hvac.Client
"""
Return an authenticated Hashicorp Vault client
"""
_client = hvac.Client(url=self.url, **self.kwargs)
if self.auth_type == "token":
if not self.token:
raise VaultError("token cannot be None for auth_type='token'")
_client.token = self.token
elif self.auth_type == "ldap":
_client.auth.ldap.login(
username=self.username, password=self.password)
elif self.auth_type == "userpass":
_client.auth_userpass(username=self.username, password=self.password)
elif self.auth_type == "approle":
_client.auth_approle(role_id=self.role_id, secret_id=self.secret_id)
elif self.auth_type == "kubernetes":
if not self.kubernetes_role:
raise VaultError("kubernetes_role cannot be None for auth_type='kubernetes'")
with open(self.kubernetes_jwt_path) as f:
jwt = f.read()
_client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt)
elif self.auth_type == "github":
_client.auth.github.login(token=self.token)
elif self.auth_type == "gcp":
from airflow.contrib.utils.gcp_credentials_provider import (
get_credentials_and_project_id,
_get_scopes
)
scopes = _get_scopes(self.gcp_scopes)
credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes)
_client.auth.gcp.configure(credentials=credentials)
else:
raise AirflowException("Authentication type '{}' not supported".format(self.auth_type))
if _client.is_authenticated():
return _client
else:
raise VaultError("Vault Authentication Error!")
def get_conn_uri(self, conn_id):
# type: (str) -> Optional[str]
"""
Get secret value from Vault. Store the secret in the form of URI
:param conn_id: connection id
:type conn_id: str
"""
response = self._get_secret(self.connections_path, conn_id)
return response.get("conn_uri") if response else None
def get_variable(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Variable
:param key: Variable Key
:return: Variable Value
"""
if self.variables_path == None:
return None
else:
response = self._get_secret(self.variables_path, key)
return response.get("value") if response else None
def _get_secret(self, path_prefix, secret_id):
# type: (str, str) -> Optional[dict]
"""
Get secret value from Vault.
:param path_prefix: Prefix for the Path to get Secret
:type path_prefix: str
:param secret_id: Secret Key
:type secret_id: str
"""
secret_path = self.build_path(path_prefix, secret_id)
try:
if self.kv_engine_version == 1:
response = self.client.secrets.kv.v1.read_secret(
path=secret_path, mount_point=self.mount_point
)
else:
response = self.client.secrets.kv.v2.read_secret_version(
path=secret_path, mount_point=self.mount_point)
except InvalidPath:
self.log.info("Secret %s not found in Path: %s", secret_id, secret_path)
return None
return_data = response["data"] if self.kv_engine_version == 1 else response["data"]["data"]
return return_data
def get_config(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Configuration
:param key: Configuration Option Key
:type key: str
:rtype: str
:return: Configuration Option Value retrieved from the vault
"""
response = self._get_secret(self.config_path, key)
return response.get("value") if response else None
| 1.976563 | 2 |
Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py | AdamCoscia/eve-trajectory-mining | 0 | 2439 | <filename>Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py
# -*- coding: utf-8 -*-
"""Computes distance between killmails by text similarity.
Edit Distance Metrics
- Levenshtein Distance
- Damerau-Levenshtein Distance
- Jaro Distance
- Jaro-Winkler Distance
- Match Rating Approach Comparison
- Hamming Distance
Vector Distance Metrics
- Jaccard Similarity
- Cosine Distance
Written By: <NAME>
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
from functools import reduce
import os
import sys
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def get_long_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of long text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
def get_short_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of short text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical and 0
being complete different.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
# Load CSV from local file
lap("Loading CSV data from local file...")
df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8')
df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill'])
df = df.dropna()
# Convert items column to correct data type
lap("Converting 'item' column value types...")
df['items'] = df['items'].apply(literal_eval)
# Group DataFrame by character_id and compute distance series for each group
lap("Computing cosine distances and change in kd by grouping character_id's...")
groupby = df.groupby('character_id') # group dataframe by character_id
num_groups = len(groupby) # get number of groups
count = 0 # current group number out of number of groups
groups = [] # list to append modified group dataframes to
for name, gp in groupby:
# Order the observations and prepare the dataframe
gp = (gp.sort_values(by=['killmail_id'])
.reset_index()
.drop('index', axis=1))
# Generate change in kills over change in deaths and change in kd ratio
kills1 = gp['k_count']
kills2 = gp['k_count'].shift()
deaths1 = gp['d_count']
deaths2 = gp['d_count'].shift()
idx = len(gp.columns)
gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1))
gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift())
# Generate pairs of observations sequentially to compare
pairs = []
items1 = gp['items']
items2 = gp['items'].shift()
for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair
los1 = items1.iloc[i]
los2 = items2.iloc[i]
pairs.append((los2, los1))
# Generate distance series using pairs list and different metrics
# start distance series with nan due to starting range at 1
cos_dist_lt = [np.nan] # cosine distance b/w long text BoW
cos_dist_st = [np.nan] # cosine distance b/w short text BoW
for pair in pairs:
cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1]))
cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1]))
idx = len(gp.columns)
gp.insert(idx, 'cos_dist_lt', cos_dist_lt)
gp.insert(idx, 'cos_dist_st', cos_dist_st)
groups.append(gp)
# Record progress
count += 1
print(f"Progress {count/num_groups:2.1%}", end="\r")
lap("Concatenating resulting groups and writing to file...")
df_res = pd.concat(groups)
df_res.to_csv(f'data/useable_victims_distancesAndKD.csv')
lap("Exit")
| 2.328125 | 2 |
src/chess/utils.py | Dalkio/custom-alphazero | 0 | 2440 | import numpy as np
from itertools import product
from typing import List
from src.config import ConfigChess
from src.chess.board import Board
from src.chess.move import Move
def get_all_possible_moves() -> List[Move]:
all_possible_moves = set()
array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype("int8")
for i, j, piece in product(
range(ConfigChess.board_size), range(ConfigChess.board_size), ["Q", "N"]
):
array[i][j] = Board.piece_symbol_to_int(piece)
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[i][j] = 0
# underpromotion moves
array[1, :] = Board.piece_symbol_to_int("P")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[0, :] = Board.piece_symbol_to_int("p")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
# no need to add castling moves: they have already be added with queen moves under UCI notation
return sorted(list(all_possible_moves))
| 2.828125 | 3 |
multirotor.py | christymarc/mfac | 0 | 2441 | from random import gauss
class MultiRotor:
"""Simple vertical dynamics for a multirotor vehicle."""
GRAVITY = -9.81
def __init__(
self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1
):
"""
Args:
altitude (float): initial altitude of the vehicle
velocity (float): initial velocity of the vehicle
mass (float): mass of the vehicle
emc (float): electromechanical constant for the vehicle
dt (float): simulation time step
noise (float): standard deviation of normally distributed simulation noise
"""
self.y0 = altitude
self.y1 = velocity
self.mass = mass
self.emc = emc
self.dt = dt
self.noise = noise
def step(self, effort):
"""Advance the multirotor simulation and apply motor forces.
Args:
effort (float): related to the upward thrust of the vehicle,
it must be >= 0
Return:
The current state (altitude, velocity) of the vehicle.
"""
effort = max(0, effort)
scaled_effort = self.emc / self.mass * effort
net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort
# Don't let the vehcicle fall through the ground
if self.y0 <= 0 and net_acceleration < 0:
y0dot = 0
y1dot = 0
else:
y0dot = self.y1
y1dot = net_acceleration
self.y0 += y0dot * self.dt
self.y1 += y1dot * self.dt
self.y0 += gauss(0, self.noise)
return self.y0, self.y1
def get_altitude(self):
"""Return the current altitude."""
return self.y0
def get_delta_time(self):
"""Return the simulation time step."""
return self.dt
| 3.59375 | 4 |
stpmex/client.py | cuenca-mx/stpmex-python | 37 | 2442 | <reponame>cuenca-mx/stpmex-python
import re
from typing import Any, ClassVar, Dict, List, NoReturn, Union
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from requests import Response, Session
from .exc import (
AccountDoesNotExist,
BankCodeClabeMismatch,
ClaveRastreoAlreadyInUse,
DuplicatedAccount,
InvalidAccountType,
InvalidAmount,
InvalidField,
InvalidInstitution,
InvalidPassphrase,
InvalidRfcOrCurp,
InvalidTrackingKey,
MandatoryField,
NoOrdenesEncontradas,
NoServiceResponse,
PldRejected,
SameAccount,
SignatureValidationError,
StpmexException,
)
from .resources import CuentaFisica, Orden, Resource, Saldo
from .version import __version__ as client_version
DEMO_HOST = 'https://demo.stpmex.com:7024'
PROD_HOST = 'https://prod.stpmex.com'
class Client:
base_url: str
soap_url: str
session: Session
# resources
cuentas: ClassVar = CuentaFisica
ordenes: ClassVar = Orden
saldos: ClassVar = Saldo
def __init__(
self,
empresa: str,
priv_key: str,
priv_key_passphrase: str,
demo: bool = False,
base_url: str = None,
soap_url: str = None,
timeout: tuple = None,
):
self.timeout = timeout
self.session = Session()
self.session.headers['User-Agent'] = f'stpmex-python/{client_version}'
if demo:
host_url = DEMO_HOST
self.session.verify = False
else:
host_url = PROD_HOST
self.session.verify = True
self.base_url = base_url or f'{host_url}/speiws/rest'
self.soap_url = (
soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices'
)
try:
self.pkey = serialization.load_pem_private_key(
priv_key.encode('utf-8'),
priv_key_passphrase.encode('ascii'),
default_backend(),
)
except (ValueError, TypeError, UnsupportedAlgorithm):
raise InvalidPassphrase
Resource.empresa = empresa
Resource._client = self
def post(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('post', endpoint, data)
def put(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('put', endpoint, data)
def delete(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('delete', endpoint, data)
def request(
self, method: str, endpoint: str, data: Dict[str, Any], **kwargs: Any
) -> Union[Dict[str, Any], List[Any]]:
url = self.base_url + endpoint
response = self.session.request(
method,
url,
json=data,
timeout=self.timeout,
**kwargs,
)
self._check_response(response)
resultado = response.json()
if 'resultado' in resultado: # Some responses are enveloped
resultado = resultado['resultado']
return resultado
@staticmethod
def _check_response(response: Response) -> None:
if not response.ok:
response.raise_for_status()
resp = response.json()
if isinstance(resp, dict):
try:
_raise_description_error_exc(resp)
except KeyError:
...
try:
assert resp['descripcion']
_raise_description_exc(resp)
except (AssertionError, KeyError):
...
response.raise_for_status()
def _raise_description_error_exc(resp: Dict) -> NoReturn:
id = resp['resultado']['id']
error = resp['resultado']['descripcionError']
if id == 0 and error == 'No se recibió respuesta del servicio':
raise NoServiceResponse(**resp['resultado'])
elif id == 0 and error == 'Error validando la firma':
raise SignatureValidationError(**resp['resultado'])
elif id == 0 and re.match(r'El campo .+ es obligatorio', error):
raise MandatoryField(**resp['resultado'])
elif id == -1 and re.match(
r'La clave de rastreo .+ ya fue utilizada', error
):
raise ClaveRastreoAlreadyInUse(**resp['resultado'])
elif id == -7 and re.match(r'La cuenta .+ no existe', error):
raise AccountDoesNotExist(**resp['resultado'])
elif id == -9 and re.match(r'La Institucion \d+ no es valida', error):
raise InvalidInstitution(**resp['resultado'])
elif id == -11 and re.match(r'El tipo de cuenta \d+ es invalido', error):
raise InvalidAccountType(**resp['resultado'])
elif id == -20 and re.match(r'El monto {.+} no es válido', error):
raise InvalidAmount(**resp['resultado'])
elif id == -22 and 'no coincide para la institucion operante' in error:
raise BankCodeClabeMismatch(**resp['resultado'])
elif id == -24 and re.match(r'Cuenta {\d+} - {MISMA_CUENTA}', error):
raise SameAccount(**resp['resultado'])
elif id == -34 and 'Clave rastreo invalida' in error:
raise InvalidTrackingKey(**resp['resultado'])
elif id == -100 and error.startswith('No se encontr'):
raise NoOrdenesEncontradas
elif id == -200 and 'Se rechaza por PLD' in error:
raise PldRejected(**resp['resultado'])
else:
raise StpmexException(**resp['resultado'])
def _raise_description_exc(resp: Dict) -> NoReturn:
id = resp['id']
desc = resp['descripcion']
if id == 0 and 'Cuenta en revisión' in desc:
# STP regresa esta respuesta cuando se registra
# una cuenta. No se levanta excepción porque
# todas las cuentas pasan por este status.
...
elif id == 1 and desc == 'rfc/curp invalido':
raise InvalidRfcOrCurp(**resp)
elif id == 1 and re.match(r'El campo \w+ es invalido', desc):
raise InvalidField(**resp)
elif id == 3 and desc == 'Cuenta Duplicada':
raise DuplicatedAccount(**resp)
elif id == 5 and re.match(r'El campo .* obligatorio \w+', desc):
raise MandatoryField(**resp)
else:
raise StpmexException(**resp)
| 2.0625 | 2 |
aql/tests/types/aql_test_list_types.py | menify/sandbox | 0 | 2443 | import sys
import os.path
import timeit
sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') ))
from aql_tests import skip, AqlTestCase, runLocalTests
from aql.util_types import UniqueList, SplitListType, List, ValueListType
#//===========================================================================//
class TestListTypes( AqlTestCase ):
def test_unique_list(self):
ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest()
self.assertEqual( ul, [2,3,1])
self.assertEqual( list(ul), [1,2,3])
ul = UniqueList()
ul.append( 1 ); ul.selfTest()
ul.append( 3 ); ul.selfTest()
ul.append( 1 ); ul.selfTest()
ul.append( 2 ); ul.selfTest()
ul.append( 3 ); ul.selfTest()
ul.append( 1 ); ul.selfTest()
self.assertEqual( list(ul), [1,3,2])
ul.append_front( 2 ); ul.selfTest()
self.assertEqual( list(ul), [2,1,3])
ul.extend( [4,1,2,2,5] ); ul.selfTest()
self.assertEqual( list(ul), [2,1,3,4,5])
ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest()
self.assertEqual( list(ul), [1,2,3,5,4])
self.assertEqual( list(ul), [1,2,3,5,4])
ul.remove( 1 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,5,4])
ul.remove( 5 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,4])
ul.remove( 55 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,4])
self.assertEqual( ul.pop(), 4 ); ul.selfTest()
self.assertEqual( ul.pop_front(), 2 ); ul.selfTest()
self.assertEqual( ul.pop_front(), 3 ); ul.selfTest()
ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest()
self.assertEqual( list(ul), [1,2,3,4,5])
ul -= [2,2,2,4,33]; ul.selfTest()
self.assertEqual( list(ul), [1,3,5])
self.assertEqual( ul[0], 1)
self.assertEqual( ul[2], 5)
self.assertEqual( ul[1], 3)
self.assertIn( 1, ul)
self.assertEqual( list(reversed(ul)), [5,3,1])
ul.reverse(); ul.selfTest()
self.assertEqual( ul, [5,3,1] )
ul.reverse(); ul.selfTest()
self.assertEqual( str(ul), "[1, 3, 5]" )
self.assertEqual( ul, UniqueList([1, 3, 5]) )
self.assertEqual( ul, UniqueList(ul) )
self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) )
self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] )
#//===========================================================================//
def test_splitlist(self):
l = SplitListType( List, ", \t\n\r" )("1,2, 3,,, \n\r\t4")
self.assertEqual( l, ['1','2','3','4'] )
self.assertEqual( l, "1,2,3,4" )
self.assertEqual( l, "1 2 3 4" )
self.assertEqual( str(l), "1,2,3,4" )
l += "7, 8"
self.assertEqual( l, ['1','2','3','4','7','8'] )
l -= "2, 3"
self.assertEqual( l, ['1','4','7','8'] )
l -= "5"
self.assertEqual( l, ['1','4','7','8'] )
l.extend_front( "10,12" )
self.assertEqual( l, ['10','12','1','4','7','8'] )
l.extend( "0,-1" )
self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] )
#//===========================================================================//
def test_valuelist(self):
l = SplitListType( ValueListType( List, int ), ", \t\n\r" )("1,2, 3,,, \n\r\t4")
self.assertEqual( l, [1,2,3,4] )
self.assertEqual( l, "1,2,3,4" )
self.assertEqual( l, "1 2 3 4" )
self.assertEqual( str(l), "1,2,3,4" )
l += [7, 8]
self.assertEqual( l, ['1','2','3','4','7','8'] )
l += 78
self.assertEqual( l, ['1','2','3','4','7','8', 78] )
l -= 78
self.assertEqual( l, ['1','2','3','4','7','8'] )
l -= "2, 3"
self.assertEqual( l, ['1','4','7','8'] )
l -= "5"
self.assertEqual( l, ['1','4','7','8'] )
l.extend_front( "10,12" )
self.assertEqual( l, ['10','12','1','4','7','8'] )
l.extend( "0,-1" )
self.assertEqual( l, [10,12,1,4,7,8,0,-1] )
l[0] = "5"
self.assertEqual( l, [5,12,1,4,7,8,0,-1] )
#//===========================================================================//
def test_list(self):
l = List([1,2,3,4])
self.assertEqual( l, [1,2,3,4] )
l += [7, 8]
self.assertEqual( l, [1,2,3,4,7,8] )
l += 78
self.assertEqual( l, [1,2,3,4,7,8,78] )
l -= 78
self.assertEqual( l, [1,2,3,4,7,8] )
l -= [2, 3]
self.assertEqual( l, [1,4,7,8] )
l -= 5
self.assertEqual( l, [1,4,7,8] )
l.extend_front( [10,12] )
self.assertEqual( l, [10,12,1,4,7,8] )
l.extend( [0,-1] )
self.assertEqual( l, [10,12,1,4,7,8, 0, -1] )
#//===========================================================================//
if __name__ == "__main__":
runLocalTests()
| 2.4375 | 2 |
logger_application/logger.py | swatishayna/OnlineEDAAutomation | 1 | 2444 | <reponame>swatishayna/OnlineEDAAutomation<filename>logger_application/logger.py
from datetime import datetime
from src.utils import uploaded_file
import os
class App_Logger:
def __init__(self):
pass
def log(self, file_object, email, log_message, log_writer_id):
self.now = datetime.now()
self.date = self.now.date()
self.current_time = self.now.strftime("%H:%M:%S")
file_object.write(
email+ "_eda_" + log_writer_id + "\t\t" +str(self.date) + "/" + str(self.current_time) + "\t\t" +email+ "\t\t" +log_message +"\n")
| 2.8125 | 3 |
metasync/params.py | dstarikov/metavault | 1 | 2445 | # config params
KB = 1024
MB = 1024*KB
GB = 1024*MB
# name of meta root dir
META_DIR = ".metasync"
# batching time for daemon
SYNC_WAIT = 3
# blob size
BLOB_UNIT = 32*MB
# Increase of Paxos proposal number
PAXOS_PNUM_INC = 10
# authentication directory
import os
AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
| 1.429688 | 1 |
py/tests/test_valid_parentheses.py | Dragonway/LeetCode | 0 | 2446 | import unittest
from py.tests.utils import test
from py import valid_parentheses as vp
class TestValidParentheses(unittest.TestCase):
@test(vp.Solution.is_valid)
def test_valid_parentheses(self) -> None:
test("()", result=True)
test("()[]{}", result=True)
test("(]", result=False)
test("([)]", result=False)
test("{[]}", result=True)
test("", result=True)
test(")()", result=False)
test("(())((())))", result=False)
| 3.15625 | 3 |
hitnet/hitnet.py | AchintyaSrivastava/HITNET-Stereo-Depth-estimation | 38 | 2447 | <reponame>AchintyaSrivastava/HITNET-Stereo-Depth-estimation
import tensorflow as tf
import numpy as np
import time
import cv2
from hitnet.utils_hitnet import *
drivingStereo_config = CameraConfig(0.546, 1000)
class HitNet():
def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config):
self.fps = 0
self.timeLastPrediction = time.time()
self.frameCounter = 0
self.camera_config = camera_config
# Initialize model
self.model = self.initialize_model(model_path, model_type)
def __call__(self, left_img, right_img):
return self.estimate_disparity(left_img, right_img)
def initialize_model(self, model_path, model_type):
self.model_type = model_type
with tf.io.gfile.GFile(model_path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(f.read())
# Wrap frozen graph to ConcreteFunctions
if self.model_type == ModelType.flyingthings:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs=["reference_output_disparity:0","secondary_output_disparity:0"])
else:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs="reference_output_disparity:0")
return model
def estimate_disparity(self, left_img, right_img):
input_tensor = self.prepare_input(left_img, right_img)
# Perform inference on the image
if self.model_type == ModelType.flyingthings:
left_disparity, right_disparity = self.inference(input_tensor)
self.disparity_map = left_disparity
else:
self.disparity_map = self.inference(input_tensor)
return self.disparity_map
def get_depth(self):
return self.camera_config.f*self.camera_config.baseline/self.disparity_map
def prepare_input(self, left_img, right_img):
if (self.model_type == ModelType.eth3d):
# Shape (1, None, None, 2)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
left_img = np.expand_dims(left_img,2)
right_img = np.expand_dims(right_img,2)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
else:
# Shape (1, None, None, 6)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32)
def inference(self, input_tensor):
output = self.model(input_tensor)
return np.squeeze(output)
| 2.359375 | 2 |
fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py | datamade/just-spaces | 6 | 2448 | from django import forms
from fobi.base import FormFieldPlugin, form_element_plugin_registry
from .forms import HouseholdTenureForm
class HouseholdTenurePlugin(FormFieldPlugin):
"""HouseholdTenurePlugin."""
uid = "household_tenure"
name = "What year did you move into your current address?"
form = HouseholdTenureForm
group = "Intercept" # Group to which the plugin belongs to
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
field_kwargs = {
'required': self.data.required,
'label': self.data.label,
'widget': forms.widgets.NumberInput(attrs={}),
}
return [(self.data.name, forms.IntegerField, field_kwargs)]
form_element_plugin_registry.register(HouseholdTenurePlugin)
| 2.15625 | 2 |
utils/scripts/OOOlevelGen/src/sprites/__init__.py | fullscreennl/monkeyswipe | 0 | 2449 | <gh_stars>0
__all__ = ['EnemyBucketWithStar',
'Nut',
'Beam',
'Enemy',
'Friend',
'Hero',
'Launcher',
'Rotor',
'SpikeyBuddy',
'Star',
'Wizard',
'EnemyEquipedRotor',
'CyclingEnemyObject',
'Joints',
'Bomb',
'Contacts']
| 1.1875 | 1 |
code/trainer.py | mazzaAnt/StackGAN-v2 | 1 | 2450 | <reponame>mazzaAnt/StackGAN-v2<gh_stars>1-10
from __future__ import print_function
from six.moves import range
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
import numpy as np
import os
import time
from PIL import Image, ImageFont, ImageDraw
from copy import deepcopy
from miscc.config import cfg
from miscc.utils import mkdir_p
from CaptionDatasets import *
from tensorboard import summary
from tensorboard import FileWriter
from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3
# ################## Shared functions ###################
def compute_mean_covariance(img):
batch_size = img.size(0)
channel_num = img.size(1)
height = img.size(2)
width = img.size(3)
num_pixels = height * width
# batch_size * channel_num * 1 * 1
mu = img.mean(2, keepdim=True).mean(3, keepdim=True)
# batch_size * channel_num * num_pixels
img_hat = img - mu.expand_as(img)
img_hat = img_hat.view(batch_size, channel_num, num_pixels)
# batch_size * num_pixels * channel_num
img_hat_transpose = img_hat.transpose(1, 2)
# batch_size * channel_num * channel_num
covariance = torch.bmm(img_hat, img_hat_transpose)
covariance = covariance / num_pixels
return mu, covariance
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def compute_inception_score(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
kl = part * \
(np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def negative_log_posterior_probability(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
result = -1. * np.log(np.max(part, 1))
result = np.mean(result)
scores.append(result)
return np.mean(scores), np.std(scores)
def load_network(gpus):
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=gpus)
print(netG)
netsD = []
if cfg.TREE.BRANCH_NUM > 0:
netsD.append(D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
netsD.append(D_NET128())
if cfg.TREE.BRANCH_NUM > 2:
netsD.append(D_NET256())
if cfg.TREE.BRANCH_NUM > 3:
netsD.append(D_NET512())
if cfg.TREE.BRANCH_NUM > 4:
netsD.append(D_NET1024())
# TODO: if cfg.TREE.BRANCH_NUM > 5:
for i in range(len(netsD)):
netsD[i].apply(weights_init)
netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
# print(netsD[i])
print('# of netsD', len(netsD))
count = 0
if cfg.TRAIN.NET_G != '':
state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
count = cfg.TRAIN.NET_G[istart:iend]
count = int(count) + 1
if cfg.TRAIN.NET_D != '':
for i in range(len(netsD)):
print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
netsD[i].load_state_dict(state_dict)
inception_model = INCEPTION_V3()
if cfg.CUDA:
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
inception_model = inception_model.cuda()
inception_model.eval()
return netG, netsD, len(netsD), inception_model, count
def define_optimizers(netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
# G_opt_paras = []
# for p in netG.parameters():
# if p.requires_grad:
# G_opt_paras.append(p)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def save_model(netG, avg_param_G, netsD, epoch, model_dir):
load_params(netG, avg_param_G)
torch.save(
netG.state_dict(),
'%s/netG_%d.pth' % (model_dir, epoch))
for i in range(len(netsD)):
netD = netsD[i]
torch.save(
netD.state_dict(),
'%s/netD%d.pth' % (model_dir, i))
print('Save G/Ds models.')
def save_real(imgs_tcpu, image_dir):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
sup_real_img = summary.image('real_img', real_img_set)
def save_img_results(imgs_tcpu, fake_imgs, num_imgs,
count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
sup_real_img = summary.image('real_img', real_img_set)
summary_writer.add_summary(sup_real_img, count)
for i in range(num_imgs):
fake_img = fake_imgs[i][0:num]
# The range of fake_img.data (i.e., self.fake_imgs[i][0:num])
# is still [-1. 1]...
vutils.save_image(
fake_img.data, '%s/count_%09d_fake_samples_%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
sup_fake_img = summary.image('fake_img%d' % i, fake_img_set)
summary_writer.add_summary(sup_fake_img, count)
summary_writer.flush()
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, imsize):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
self.summary_writer = FileWriter(self.log_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
def prepare_data(self, data):
imgs, w_imgs, t_embedding, _ = data
real_vimgs, wrong_vimgs = [], []
if cfg.CUDA:
vembedding = Variable(t_embedding).cuda()
else:
vembedding = Variable(t_embedding)
for i in range(self.num_Ds):
if cfg.CUDA:
real_vimgs.append(Variable(imgs[i]).cuda())
wrong_vimgs.append(Variable(w_imgs[i]).cuda())
else:
real_vimgs.append(Variable(imgs[i]))
wrong_vimgs.append(Variable(w_imgs[i]))
return imgs, real_vimgs, wrong_vimgs, vembedding
def train_Dnet(self, idx, count):
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu = self.criterion, self.mu
netD, optD = self.netsD[idx], self.optimizersD[idx]
real_imgs = self.real_imgs[idx]
wrong_imgs = self.wrong_imgs[idx]
fake_imgs = self.fake_imgs[idx]
#
netD.zero_grad()
# Forward
real_labels = self.real_labels[:batch_size]
fake_labels = self.fake_labels[:batch_size]
# for real
real_logits = netD(real_imgs, mu.detach())
wrong_logits = netD(wrong_imgs, mu.detach())
fake_logits = netD(fake_imgs.detach(), mu.detach())
#
errD_real = criterion(real_logits[0], real_labels)
errD_wrong = criterion(wrong_logits[0], fake_labels)
errD_fake = criterion(fake_logits[0], fake_labels)
if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:
errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(real_logits[1], real_labels)
errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(wrong_logits[1], real_labels)
errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(fake_logits[1], fake_labels)
#
errD_real = errD_real + errD_real_uncond
errD_wrong = errD_wrong + errD_wrong_uncond
errD_fake = errD_fake + errD_fake_uncond
#
errD = errD_real + errD_wrong + errD_fake
else:
errD = errD_real + 0.5 * (errD_wrong + errD_fake)
# backward
errD.backward()
# update parameters
optD.step()
# log
if flag == 0:
summary_D = summary.scalar('D_loss%d' % idx, errD.item())
self.summary_writer.add_summary(summary_D, count)
return errD
def train_Gnet(self, count):
self.netG.zero_grad()
errG_total = 0
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu, logvar = self.criterion, self.mu, self.logvar
real_labels = self.real_labels[:batch_size]
for i in range(self.num_Ds):
outputs = self.netsD[i](self.fake_imgs[i], mu)
errG = criterion(outputs[0], real_labels)
if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:
errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\
criterion(outputs[1], real_labels)
errG = errG + errG_patch
errG_total = errG_total + errG
if flag == 0:
summary_D = summary.scalar('G_loss%d' % i, errG.item())
self.summary_writer.add_summary(summary_D, count)
# Compute color consistency losses
if cfg.TRAIN.COEFF.COLOR_LOSS > 0:
if self.num_Ds > 1:
mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1])
mu2, covariance2 = \
compute_mean_covariance(self.fake_imgs[-2].detach())
like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)
like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \
nn.MSELoss()(covariance1, covariance2)
errG_total = errG_total + like_mu2 + like_cov2
if flag == 0:
sum_mu = summary.scalar('G_like_mu2', like_mu2.item())
self.summary_writer.add_summary(sum_mu, count)
sum_cov = summary.scalar('G_like_cov2', like_cov2.item())
self.summary_writer.add_summary(sum_cov, count)
if self.num_Ds > 2:
mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2])
mu2, covariance2 = \
compute_mean_covariance(self.fake_imgs[-3].detach())
like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)
like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \
nn.MSELoss()(covariance1, covariance2)
errG_total = errG_total + like_mu1 + like_cov1
if flag == 0:
sum_mu = summary.scalar('G_like_mu1', like_mu1.item())
self.summary_writer.add_summary(sum_mu, count)
sum_cov = summary.scalar('G_like_cov1', like_cov1.item())
self.summary_writer.add_summary(sum_cov, count)
kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL
errG_total = errG_total + kl_loss
# Postpone the backward propagation
# errG_total.backward()
# self.optimizerG.step()
return kl_loss, errG_total
def train(self):
self.netG, self.netsD, self.num_Ds,\
self.inception_model, start_count = load_network(self.gpus)
avg_param_G = copy_G_params(self.netG)
self.optimizerG, self.optimizersD = \
define_optimizers(self.netG, self.netsD)
self.criterion = nn.BCELoss()
self.SATcriterion = nn.CrossEntropyLoss()
self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1))
self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0))
self.gradient_one = torch.FloatTensor([1.0])
self.gradient_half = torch.FloatTensor([0.5])
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1))
# Data parameters
data_folder = 'birds_output' # folder with data files saved by create_input_files.py
data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# Show, Attend, and Tell Dataloader
train_loader = torch.utils.data.DataLoader(
CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),
batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True)
if cfg.CUDA:
self.criterion.cuda()
self.SATcriterion.cuda() # Compute SATloss
self.real_labels = self.real_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
self.gradient_one = self.gradient_one.cuda()
self.gradient_half = self.gradient_half.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
predictions = []
count = start_count
start_epoch = start_count // (self.num_batches)
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
# for step, data in enumerate(self.data_loader, 0):
for step, data in enumerate(zip(self.data_loader, train_loader), 0):
data_1 = data[0]
_, caps, caplens = data[1]
data = data_1
#######################################################
# (0) Prepare training data
######################################################
self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \
self.txt_embedding = self.prepare_data(data)
# Testing line for real samples
if epoch == start_epoch and step == 0:
print ('Checking real samples at first...')
save_real(self.imgs_tcpu, self.image_dir)
#######################################################
# (1) Generate fake images
######################################################
noise.data.normal_(0, 1)
self.fake_imgs, self.mu, self.logvar = \
self.netG(noise, self.txt_embedding)
# len(self.fake_imgs) = NUM_BRANCHES
# self.fake_imgs[0].shape = [batch_size, 3, 64, 64]
# self.fake_imgs[1].shape = [batch_size, 3, 128, 128]
# self.fake_imgs[2].shape = [batch_size, 3, 256, 256]
#######################################################
# (*) Forward fake images to SAT
######################################################
from SATmodels import Encoder, DecoderWithAttention
from torch.nn.utils.rnn import pack_padded_sequence
fine_tune_encoder = False
# Read word map
word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
with open(word_map_file, 'r') as j:
word_map = json.load(j)
# Define the encoder/decoder structure for SAT model
decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=len(word_map),
dropout=0.5).cuda()
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=4e-4)
encoder = Encoder().cuda()
encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=1e-4) if fine_tune_encoder else None
SATloss = 0
# Compute the SAT loss after forwarding the SAT model
for idx in range(len(self.fake_imgs)):
img = encoder(self.fake_imgs[idx])
scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens)
targets = caps_sorted[:, 1:]
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda()
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda()
SATloss += self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Set zero_grad for encoder/decoder
decoder_optimizer.zero_grad()
if encoder_optimizer is not None:
encoder_optimizer.zero_grad()
#######################################################
# (2) Update D network
######################################################
errD_total = 0
for i in range(self.num_Ds):
errD = self.train_Dnet(i, count)
errD_total += errD
#######################################################
# (3) Update G network: maximize log(D(G(z)))
######################################################
kl_loss, errG_total = self.train_Gnet(count)
for p, avg_p in zip(self.netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
# Combine with G and SAT first, then back propagation
errG_total += SATloss
errG_total.backward()
self.optimizerG.step()
#######################################################
# (*) Update SAT network:
######################################################
# Update weights
decoder_optimizer.step()
if encoder_optimizer is not None:
encoder_optimizer.step()
#######################################################
# (*) Prediction and Inception score:
######################################################
pred = self.inception_model(self.fake_imgs[-1].detach())
predictions.append(pred.data.cpu().numpy())
if count % 100 == 0:
summary_D = summary.scalar('D_loss', errD_total.item())
summary_G = summary.scalar('G_loss', errG_total.item())
summary_KL = summary.scalar('KL_loss', kl_loss.item())
self.summary_writer.add_summary(summary_D, count)
self.summary_writer.add_summary(summary_G, count)
self.summary_writer.add_summary(summary_KL, count)
count += 1
#######################################################
# (*) Save Images/Log/Model per SNAPSHOT_INTERVAL:
######################################################
if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
# Save images
backup_para = copy_G_params(self.netG)
load_params(self.netG, avg_param_G)
#
self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding)
save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds,
count, self.image_dir, self.summary_writer)
#
load_params(self.netG, backup_para)
# Compute inception score
if len(predictions) > 500:
predictions = np.concatenate(predictions, 0)
mean, std = compute_inception_score(predictions, 10)
# print('mean:', mean, 'std', std)
m_incep = summary.scalar('Inception_mean', mean)
self.summary_writer.add_summary(m_incep, count)
#
mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10)
m_nlpp = summary.scalar('NLPP_mean', mean_nlpp)
self.summary_writer.add_summary(m_nlpp, count)
#
predictions = []
end_t = time.time()
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs
''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f
% (epoch, self.max_epoch, self.num_batches,
errD_total.item(), errG_total.item(),
kl_loss.item(), end_t - start_t))
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
self.summary_writer.close()
def save_superimages(self, images_list, filenames,
save_dir, split_dir, imsize):
batch_size = images_list[0].size(0)
num_sentences = len(images_list)
for i in range(batch_size):
s_tmp = '%s/super/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
#
savename = '%s_%d.png' % (s_tmp, imsize)
super_img = []
for j in range(num_sentences):
img = images_list[j][i]
# print(img.size())
img = img.view(1, 3, imsize, imsize)
# print(img.size())
super_img.append(img)
# break
super_img = torch.cat(super_img, 0)
vutils.save_image(super_img, savename, nrow=10, normalize=True)
def save_singleimages(self, images, filenames,
save_dir, split_dir, sentenceID, imsize):
for i in range(images.size(0)):
s_tmp = '%s/single_samples/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID)
# range from [-1, 1] to [0, 255]
img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
def evaluate(self, split_dir):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
if split_dir == 'test':
split_dir = 'valid'
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
print(netG)
# state_dict = torch.load(cfg.TRAIN.NET_G)
state_dict = \
torch.load(cfg.TRAIN.NET_G,
map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
# the path to save generated images
s_tmp = cfg.TRAIN.NET_G
istart = s_tmp.rfind('_') + 1
iend = s_tmp.rfind('.')
iteration = int(s_tmp[istart:iend])
s_tmp = s_tmp[:s_tmp.rfind('/')]
save_dir = '%s/iteration%d' % (s_tmp, iteration)
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
if cfg.CUDA:
netG.cuda()
noise = noise.cuda()
# switch to evaluate mode
netG.eval()
for step, data in enumerate(self.data_loader, 0):
imgs, t_embeddings, filenames = data
if cfg.CUDA:
t_embeddings = Variable(t_embeddings).cuda()
else:
t_embeddings = Variable(t_embeddings)
# print(t_embeddings[:, 0, :], t_embeddings.size(1))
embedding_dim = t_embeddings.size(1)
batch_size = imgs[0].size(0)
noise.data.resize_(batch_size, nz)
noise.data.normal_(0, 1)
fake_img_list = []
for i in range(embedding_dim):
fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :])
if cfg.TEST.B_EXAMPLE:
# fake_img_list.append(fake_imgs[0].data.cpu())
# fake_img_list.append(fake_imgs[1].data.cpu())
fake_img_list.append(fake_imgs[2].data.cpu())
else:
self.save_singleimages(fake_imgs[-1], filenames,
save_dir, split_dir, i, 256)
# self.save_singleimages(fake_imgs[-2], filenames,
# save_dir, split_dir, i, 128)
# self.save_singleimages(fake_imgs[-3], filenames,
# save_dir, split_dir, i, 64)
# break
if cfg.TEST.B_EXAMPLE:
# self.save_superimages(fake_img_list, filenames,
# save_dir, split_dir, 64)
# self.save_superimages(fake_img_list, filenames,
# save_dir, split_dir, 128)
self.save_superimages(fake_img_list, filenames,
save_dir, split_dir, 256)
| 1.789063 | 2 |
spletni_vmesnik.py | LeaHolc/recepcija | 1 | 2451 | from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file
import bottle
import controller
from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna
import datetime as dt
@bottle.get('/')
def root():
redirect('/domov')
@bottle.get('/domov')
def index():
parcele = dobi_parcele_za_prikaz(dt.date.today())
return template("domov", parcele=parcele, hide_header_back=True)
@bottle.get("/parcela/<id_parcele>")
def parcela(id_parcele):
'Preverimo stanje parcele'
rez, gostje = dobi_info_parcele(id_parcele, dt.date.today())
if rez is not None:
stanje = "Parcela je trenutno zasedena"
else:
stanje = "Parcela je trenutno na voljo"
return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje)
@bottle.get("/naredi-rezervacijo/<id_parcele>")
def nova_rezervacija(id_parcele=None):
print(id_parcele)
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow)
@bottle.post("/naredi-rezervacijo")
def naredi_novo_rezervacijo():
" V modelu naredi novo rezervacijo in ji doda prvega gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime#get("")
priimek = request.forms.priimek#get("")
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_parcele = request.forms.id_parcele#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
print(ime, priimek)
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/naredi-rezervacijo")
rezervacija = naredi_rezervacijo(id_parcele)
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
}, datum_od, datum_do)
return redirect(f"/parcela/{id_parcele}")
@bottle.get("/dodaj-gosta/<id_rezervacije>")
def get_dodaj_gosta_na_rezervacijo(id_rezervacije):
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
return template("dodajanje_gosta", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow)
@bottle.post("/dodaj-gosta-na-rezervacijo")
def post_dodaj_gosta_na_rezervacijo():
" V modelu rezervaciji doda gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime
priimek = request.forms.priimek
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_rezervacije = request.forms.rez#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/dodaj-gosta")
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
},datum_od,datum_do)
print(id_rezervacije)
return redirect(f"/parcela/{rezervacija.id_parcele}")
@bottle.get("/predracun/<id_rezervacije>")
def predracun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = dobi_postavke_racuna(rezervacija)
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.get("/zakljuci/<id_rezervacije>")
def racun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today())
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.error(404)
def napaka404(a):
return template("error", sporocilo="Stran ne obstaja!", naslov="404")
@bottle.error(500)
def napaka500(a):
return template("error", sporocilo="Napaka streznika!", naslov="500")
bottle.run(reloader=True, debug=True)
| 2.40625 | 2 |
espnet/nets/pytorch_backend/transducer/initializer.py | magictron/espnet | 2 | 2452 | <filename>espnet/nets/pytorch_backend/transducer/initializer.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parameter initialization for transducer RNN/Transformer parts."""
import six
from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters
from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one
from espnet.nets.pytorch_backend.transformer.initializer import initialize
def initializer(model, args):
"""Initialize transducer model.
Args:
model (torch.nn.Module): transducer instance
args (Namespace): argument Namespace containing options
"""
if args.dtype != "transformer":
if args.etype == "transformer":
initialize(model.encoder, args.transformer_init)
lecun_normal_init_parameters(model.dec)
else:
lecun_normal_init_parameters(model)
model.dec.embed.weight.data.normal_(0, 1)
for l in six.moves.range(len(model.dec.decoder)):
set_forget_bias_to_one(model.dec.decoder[l].bias_ih)
else:
if args.etype == "transformer":
initialize(model, args.transformer_init)
else:
lecun_normal_init_parameters(model.encoder)
initialize(model.decoder, args.transformer_init)
| 2.40625 | 2 |
evaluate.py | adelmassimo/EM-Algorithm-for-MMPP | 0 | 2453 | <filename>evaluate.py
import model
import numpy as np
import datasetReader as df
import main
# Number of traces loaded T
T = 1
# Generate traces
traces_factory = df.DatasetFactory()
traces_factory.createDataset(T)
traces = traces_factory.traces
P0 = np.matrix("[ .02 0;"
"0 0 0.5;"
"0 0 0]")
P1 = np.matrix("[0.1 0 0;"
"0 0.5 0;"
"0 0 0.9]")
M = np.matrix("[0.25 0 0;"
"0 0.23 0;"
"0 0 0.85]")
def backward_likelihood(i, trace):
N = model.N
M = len( trace )
likelihoods = np.ones((N, 1))
if i < M:
P = main.randomization(P0, model.uniformization_rate, trace[i][0])
# P = stored_p_values[i, :, :]
likelihoods = np.multiply(
P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ),
model.M[:, trace[i][1]] )
if likelihoods.sum() != 0:
likelihoods = likelihoods / likelihoods.sum()
return likelihoods | 2.6875 | 3 |
Imaging/Core/Testing/Python/TestHSVToRGB.py | forestGzh/VTK | 1,755 | 2454 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.SetSize(320,320)
viewer.Render()
# --- end of script --
| 2.21875 | 2 |
kelas_2b/echa.py | barizraihan/belajarpython | 0 | 2455 | <reponame>barizraihan/belajarpython
import csv
class echa:
def werehousing(self):
with open('kelas_2b/echa.csv', 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
print("menampilkan data barang:", row[0], row[1], row[2], row[3], row[4])
| 3.1875 | 3 |
tests/test_handler_surface_distance.py | dyollb/MONAI | 2,971 | 2456 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerSurfaceDistance(unittest.TestCase):
# TODO test multi node Surface Distance
def test_compute(self):
sur_metric = SurfaceDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
sur_metric.attach(engine, "surface_distance")
y_pred, y = TEST_SAMPLE_1
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)
y_pred, y = TEST_SAMPLE_2
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)
y_pred, y = TEST_SAMPLE_3
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
def test_shape_mismatch(self):
sur_metric = SurfaceDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
sur_metric.update([y_pred, y])
if __name__ == "__main__":
unittest.main()
| 2.078125 | 2 |
benchmarks/eval.py | rom1mouret/anoflows | 0 | 2457 | #!/usr/bin/env python3
import sys
import logging
import yaml
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.impute import SimpleImputer
from anoflows.hpo import find_best_flows
from data_loading import load_data
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) == 1:
logging.error("YAML data specification missing from the command line arguments")
exit(1)
spec_file = sys.argv[1]
df, spec = load_data(spec_file)
max_rows = min(len(df), spec.get("max_rows", 40000))
novelty_detection = spec.get("novelty", True)
normal_classes = spec["normal_classes"]
precision = defaultdict(list)
for rounds in range(spec.get("rounds", 1)):
# random sampling
df = df.sample(n=max_rows, replace=False)
label_col = spec["label_column"]
y = df[label_col].values
other = df.drop(label_col, inplace=False, axis=1)
X = other.values
# imputing
X = SimpleImputer(copy=False).fit_transform(X)
# train/test split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=False, test_size=0.5)
if novelty_detection:
keep = np.where(np.isin(y_train, normal_classes))[0]
X_train = X_train[keep, :]
y_train = y_train[keep]
# training
#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)
from anoflows.anoflow_bagging import AnoFlowBagging
flows = AnoFlowBagging()
flows.fit(X_train)
iforest = IsolationForest().fit(X_train)
# prediction
pred = {
"anoflows": flows.likelihood(X_test),
"iforest": iforest.decision_function(X_test)
}
# evaluation
y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0]
ref = np.zeros(len(y_test))
ref[y_true] = 1
k = len(y_true)
for name, y_pred in pred.items():
anomaly_indices = y_pred.argsort()[:k]
prec = ref[anomaly_indices].sum() / k
logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test)))
precision[name].append(prec)
logging.info("* SUMMARY %s", spec_file)
for name, prec in precision.items():
prec = 100 * np.array(prec)
mean = np.mean(prec)
std = np.std(prec)
logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
| 2.40625 | 2 |
pydantic/version.py | jamescurtin/pydantic | 1 | 2458 | <reponame>jamescurtin/pydantic
__all__ = ['VERSION', 'version_info']
VERSION = '1.4a1'
def version_info() -> str:
import platform
import sys
from importlib import import_module
from pathlib import Path
from .main import compiled
optional_deps = []
for p in ('typing-extensions', 'email-validator', 'devtools'):
try:
import_module(p.replace('-', '_'))
except ImportError:
continue
optional_deps.append(p)
info = {
'pydantic version': VERSION,
'pydantic compiled': compiled,
'install path': Path(__file__).resolve().parent,
'python version': sys.version,
'platform': platform.platform(),
'optional deps. installed': optional_deps,
}
return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
| 2.125 | 2 |
spire/core/registry.py | siq/spire | 0 | 2459 | <filename>spire/core/registry.py
from scheme import Structure
__all__ = ('Configurable', 'Registry')
class Configurable(object):
"""A sentry class which indicates that subclasses can establish a configuration chain."""
class Registry(object):
"""The unit registry."""
dependencies = {}
schemas = {}
units = {}
@classmethod
def is_configurable(cls, obj):
return (obj is not Configurable and issubclass(obj, Configurable) and
Configurable not in obj.__bases__)
@classmethod
def purge(cls):
cls.schemas = {}
cls.units = {}
@classmethod
def register_dependency(cls, dependency):
token = dependency.token
if not token:
return
if token not in cls.dependencies:
cls.dependencies[token] = type(dependency)
if not dependency.configurable:
return
configuration = dependency.unit.configuration
if token in cls.schemas:
structure = cls.schemas[token]
if configuration.required and not dependency.optional and not structure.required:
structure.required = True
else:
schema = dependency.construct_schema(generic=True, name=token)
if dependency.optional:
schema = schema.clone(required=False)
cls.schemas[token] = schema
@classmethod
def register_unit(cls, unit):
cls.units[unit.identity] = unit
if cls.is_configurable(unit):
queue = [(unit, [unit.identity], None)]
while queue:
subject, tokens, dependency = queue.pop(0)
if subject.configuration:
token = '/'.join(tokens)
if dependency:
structure = dependency.construct_schema(name=token)
if dependency.token and structure.required:
structure = structure.clone(required=False)
else:
structure = subject.configuration.schema.clone(required=False,
name=token)
cls.schemas[token] = structure
for attr, subdependency in subject.dependencies.iteritems():
queue.append((subdependency.unit, tokens + [attr], subdependency))
| 2.515625 | 3 |
oslo_devsupport/model/__init__.py | berrange/oslo.devsupport | 0 | 2460 | <filename>oslo_devsupport/model/__init__.py
from .command import *
from .database import *
from .entrypoint import *
from .group import *
from .http import *
from .messaging import *
from .method import *
from .operation import *
from .stack import *
from .threads import *
| 1.1875 | 1 |
scripts/extract.py | nng555/fairseq | 2 | 2461 | #!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
Main(args)
| 2.890625 | 3 |
AppImageBuilder/commands/file.py | gouchi/appimage-builder | 0 | 2462 | <filename>AppImageBuilder/commands/file.py
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
from .command import Command
class FileError(RuntimeError):
pass
class File(Command):
def __init__(self):
super().__init__('file')
self.log_stdout = False
self.log_command = False
def query(self, path):
self._run(['file', '-b', '--exclude', 'ascii', path])
if self.return_code != 0:
raise FileError('\n'.join(self.stderr))
return '\n'.join(self.stdout)
def is_executable_elf(self, path):
output = self.query(path)
result = ('ELF' in output) and ('executable' in output)
return result
| 2.40625 | 2 |
text_selection/analyse_zenon_scrape.py | dainst/chronoi-corpus-processing | 0 | 2463 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import furl
import json
import re
import sys
from collections import defaultdict
def filter_records_without_url(records: []) -> []:
return [r for r in records if any(r.get("urls"))]
def build_furl(url: str) -> furl.furl:
try:
furl_obj = furl.furl(url)
if not furl_obj.host:
furl_obj = furl.furl("http://" + url)
return furl_obj
except ValueError:
return furl.furl("https://invalid-url.xyz")
def determine_host(url: str) -> str:
furl_obj = build_furl(url)
return re.sub(r"^www[0-9]*\.", "", furl_obj.host)
def build_hosts_to_urls(records: []) -> {str: {str}}:
result = defaultdict(set)
for record in records:
for url in record.get("urls"):
host = determine_host(url.get("url"))
result[host].add(url.get("url"))
return result
def print_most_common_url_hosts(hosts_to_urls: {}, n: int):
hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n]
hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h]))
for host in hosts:
print("% 6d\t%s" % (len(hosts_to_urls[host]), host))
def print_urls_for_host(hosts_to_urls: {}, host: str):
urls = hosts_to_urls.get(host, [])
for url in urls:
print(url)
if not any(urls):
print(f"No urls for host: '{host}'", file=sys.stderr)
def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str):
# It should be ok, to only pattern match the hosts here...
ids1 = {r.get("id") for r in records if record_has_matching_url(r, pattern1)}
ids2 = {r.get("id") for r in records if record_has_matching_url(r, pattern2)}
ids_both = ids1.intersection(ids2)
for host, number in {pattern1: len(ids1), pattern2: len(ids2), "both": len(ids_both)}.items():
print(f"{host}: {number}")
def record_has_matching_url(record: {}, pattern: str) -> bool:
return any(record_get_urls_matching(record, pattern))
def record_get_urls_matching(record: {}, pattern: str) -> [{}]:
result = []
for url in record.get("urls"):
if any(re.findall(pattern, url.get("url"))):
result.append(url)
return result
def record_remove_urls_not_matching(record: {}, pattern: str):
record["urls"] = record_get_urls_matching(record, pattern)
def earliest_year(year_strings: [str]) -> str:
years = []
for year_s in year_strings:
try:
years.append(int(year_s))
except ValueError:
print(f"Not a string that is a year: '{year_s}'", file=sys.stderr)
continue
return str(sorted(years)[0]) if any(years) else ""
def main(args: argparse.Namespace):
with open(args.scrape_file, "r") as file:
records = json.load(file)
records = filter_records_without_url(records)
# filter urls by the user-provided filter list
if args.desc_filters:
with open(args.desc_filters, "r") as file:
filters = file.read().splitlines()
for record in records:
record["urls"] = [url for url in record.get("urls") if url.get("desc") not in filters]
records = filter_records_without_url(records)
# print unique hosts or urls, then exit
if args.print_host_urls or args.print_common_hosts >= 0:
hosts_to_urls = build_hosts_to_urls(records)
if args.print_common_hosts >= 0:
print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts)
elif args.print_host_urls:
print_urls_for_host(hosts_to_urls, host=args.print_host_urls)
exit(0)
# check in how many records the two given hosts co-occur, then exit
if args.patterns_cooccur:
host1, host2 = args.patterns_cooccur.split(",")
print_how_often_url_patterns_cooccur(records, host1, host2)
exit(0)
# do some selection based on a url pattern, remove all non-matching urls from the record
if args.select_by_url:
pattern = args.select_by_url
records = [r for r in records if record_has_matching_url(r, pattern)]
for record in records:
record_remove_urls_not_matching(record, pattern)
# sort the records by id, to be extra sure, that we get the same order every time this is called
# print each line as a csv column
records = sorted(records, key=lambda r: r.get("id"))
writer = csv.writer(sys.stdout, delimiter=",", quoting=csv.QUOTE_ALL)
for record in records:
to_print = []
if args.print_id:
to_print.append(record.get("id", ""))
if args.print_url:
to_print.append(record.get("urls")[0].get("url") if any(record.get("urls")) else "")
if args.print_pub_date:
to_print.append(earliest_year(record.get("publicationDates", [])))
if args.print_languages:
to_print.append("|".join(record.get("languages", [])))
writer.writerow(to_print)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Process a file with zenon json records and print some information about them.")
parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.")
parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.")
# these are arguments to print some specific information
parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.")
parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.")
parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.")
# these are meant to work together select by a url pattern then print information about the records
parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.")
parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)")
parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.")
parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids")
parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages")
main(parser.parse_args())
| 2.90625 | 3 |
src/python/twitter/pants/targets/java_antlr_library.py | wfarner/commons | 1 | 2464 | # ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = '<NAME>'
from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary
class JavaAntlrLibrary(ExportableJvmLibrary):
"""Defines a target that builds java stubs from an Antlr grammar file."""
def __init__(self,
name,
sources,
provides = None,
dependencies = None,
excludes = None,
compiler = 'antlr3'):
"""name: The name of this module target, addressable via pants via the portion of the spec
following the colon
sources: A list of paths containing the Antlr source files this module's jar is compiled from
provides: An optional Dependency object indicating the The ivy artifact to export
dependencies: An optional list of Dependency objects specifying the binary (jar) dependencies of
this module.
excludes: An optional list of dependency exclude patterns to filter all of this module's
transitive dependencies against.
compiler: The name of the compiler used to compile the ANTLR files.
Currently only supports 'antlr3' and 'antlr4'"""
ExportableJvmLibrary.__init__(self,
name,
sources,
provides,
dependencies,
excludes)
self.add_labels('codegen')
if compiler not in ['antlr3', 'antlr4']:
raise ValueError("Illegal value for 'compiler': {}".format(compiler))
self.compiler = compiler
def _as_jar_dependency(self):
return ExportableJvmLibrary._as_jar_dependency(self).with_sources()
| 1.734375 | 2 |
bigml/tests/create_pca_steps_bck.py | devs-cloud/python_ml | 0 | 2465 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from nose.tools import eq_, assert_less
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_pca_steps import i_get_the_pca
#@step(r'the pca name is "(.*)"')
def i_check_pca_name(step, name):
pca_name = world.pca['name']
eq_(name, pca_name)
#@step(r'I create a PCA from a dataset$')
def i_create_a_pca_from_dataset(step):
dataset = world.dataset.get('resource')
resource = world.api.create_pca(dataset, {'name': 'new PCA'})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.pca = resource['object']
world.pcas.append(resource['resource'])
#@step(r'I create a PCA from a dataset$')
def i_create_a_pca_with_params(step, params):
params = json.loads(params)
dataset = world.dataset.get('resource')
resource = world.api.create_pca(dataset, params)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.pca = resource['object']
world.pcas.append(resource['resource'])
def i_create_a_pca(step):
i_create_a_pca_from_dataset(step)
#@step(r'I update the PCA name to "(.*)"$')
def i_update_pca_name(step, name):
resource = world.api.update_pca(world.pca['resource'],
{'name': name})
world.status = resource['code']
eq_(world.status, HTTP_ACCEPTED)
world.location = resource['location']
world.pca = resource['object']
#@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)')
def wait_until_pca_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
pca_id = world.pca['resource']
i_get_the_pca(step, pca_id)
status = get_status(world.pca)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less(datetime.utcnow() - start, timedelta(seconds=delta))
i_get_the_pca(step, pca_id)
status = get_status(world.pca)
eq_(status['code'], int(code1))
#@step(r'I wait until the PCA is ready less than (\d+)')
def the_pca_is_finished_in_less_than(step, secs):
wait_until_pca_status_code_is(step, FINISHED, FAULTY, secs)
| 2.234375 | 2 |
config.py | Pasmikh/quiz_please_bot | 0 | 2466 | <filename>config.py
days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday']
operation = ''
options = ['Info', 'Check-in/Out', 'Edit games', 'Back']
admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname']
avail_days = []
TOKEN = 'bot_<PASSWORD>'
group_id = id_of_group_chat | 1.640625 | 2 |
Chapter 8/sandwich-maker.py | ostin-r/automate-boring-stuff-solutions | 4 | 2467 | '''
<NAME> 2/20/21
sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences
'''
import pyinputplus as ip
def get_cost(food_name):
'''gets the cost of items in sandwich_builder'''
food_dict = {
'sourdough':1.75,
'rye':2.0,
'wheat':1.50,
'white':1.25,
'chicken':2.0,
'turkey':1.50,
'ham':2.0,
'tofu':1.25,
'cheddar':2.0,
'swiss':2.5,
'mozzarella':2.5,
'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25
'no':0 # saying no to a topping costs nothing
}
return food_dict[food_name]
def sandwich_builder():
print('Enter your sandwich preferences below:\n')
bread_prompt = 'What bread type would you like? (sourdough, rye, wheat, or white)\n'
bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt)
protein_prompt = 'What type of protein would you like? (chicken, turkey, ham, or tofu)\n'
protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt)
mayo = ip.inputYesNo(prompt='Would you like mayo?\n')
mustard = ip.inputYesNo(prompt='Would you like mustard?\n')
tomato = ip.inputYesNo(prompt='Would you like tomato?\n')
lettuce = ip.inputYesNo(prompt='Would you like lettuce?\n')
like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\n')
if like_cheese is 'yes':
cheese_prompt = 'What kind of cheese would you like? (cheddar, swiss, mozzarella)\n'
cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt)
sandwich = []
cost = 0
sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce])
for item in sandwich:
cost += get_cost(item)
else:
sandwich = []
cost = 0
sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce])
for item in sandwich:
cost += get_cost(item)
how_many_prompt = 'How many sandwiches would you like?\n'
how_many = ip.inputInt(min=1, prompt=how_many_prompt)
print('\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2)))
sandwich_builder() | 3.953125 | 4 |
tests/core/test_headerupdater.py | My-Novel-Management/storybuilderunite | 1 | 2468 | <filename>tests/core/test_headerupdater.py
# -*- coding: utf-8 -*-
'''
HeaderUpdater class test
========================
'''
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.commands.scode import SCode, SCmd
from builder.containers.chapter import Chapter
from builder.containers.episode import Episode
from builder.containers.scene import Scene
from builder.containers.story import Story
from builder.core import headerupdater as hd
class HeaderUpdaterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print_testtitle(hd.__name__, 'HeaderUpdater class')
def test_instance(self):
tmp = hd.HeaderUpdater()
self.assertIsInstance(tmp, hd.HeaderUpdater)
def test_title_of(self):
data = [
# (src, expect, exp_opt)
(True, Story('test',), ('test',), 1),
]
def checker(src, expect, exp_opt):
tmp = hd.HeaderUpdater()._title_of(src)
self.assertIsInstance(tmp, SCode)
self.assertEqual(tmp.cmd, SCmd.TAG_TITLE)
self.assertEqual(tmp.script, expect)
self.assertEqual(tmp.option, exp_opt)
validate_with_fail(self, 'title_of', checker, data)
def test_outline_of(self):
data = [
# (src, expect)
(True, Story('test',outline='apple'), ('apple',)),
]
def checker(src, expect):
tmp = hd.HeaderUpdater()._outline_of(src)
self.assertIsInstance(tmp, SCode)
self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT)
self.assertEqual(tmp.script, expect)
validate_with_fail(self, 'outline_of', checker, data)
def test_end_of(self):
data = [
# (src, expect)
(True, Chapter('test',), SCmd.END_CHAPTER),
]
validate_with_fail(self, 'end_of',
lambda src, expect: self.assertEqual(
hd.HeaderUpdater()._end_of(src).cmd, expect),
data)
| 2.65625 | 3 |
dotsDB/test_vlen_datasets.py | aernesto/Lab_DotsDB_Utilities | 1 | 2469 | <gh_stars>1-10
import numpy as np
import h5py
filename = "test_vlen_datasets_np_bool.h5"
rows = [np.array([np.True_, np.False_]),
np.array([np.True_, np.True_, np.False_])]
f = h5py.File(filename, 'x') # create file, fails if exists
vlen_data_type = h5py.special_dtype(vlen=np.bool_)
dset = f.create_dataset("vlen_matrix", (2,),
compression="gzip",
compression_opts=9,
fletcher32=True,
dtype=vlen_data_type)
for r in range(len(rows)):
dset[r] = rows[r]
f.flush()
f.close()
f = h5py.File(filename, 'r')
dsetr = f["vlen_matrix"]
for r in range(dsetr.shape[0]):
print(dsetr[r])
| 2.625 | 3 |
utils.py | g4idrijs/CardiacUltrasoundPhaseEstimation | 1 | 2470 | import os, time
import numpy as np
import scipy.signal
import scipy.misc
import scipy.ndimage.filters
import matplotlib.pyplot as plt
import PIL
from PIL import ImageDraw
import angles
import cv2
import SimpleITK as sitk
def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),
resizeAmount=None):
if resizeAmount is not None:
imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,
fy=resizeAmount)
imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB)
if len(strAnnotation) > 0:
cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,
2.0, textColor, thickness=2)
cv2.imshow(strName, imDisp)
def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),
resizeAmount=None):
if resizeAmount is not None:
imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,
fy=resizeAmount)
if len(strAnnotation) > 0:
cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,
2.0, textColor, thickness=2)
cv2.imshow(strName, imDisp)
def mplotShowImage(imInput):
plt.imshow(imInput, cmap=plt.cm.gray)
plt.grid(False)
plt.xticks(())
plt.yticks(())
def normalizeArray(a):
return np.single(0.0 + a - a.min()) / (a.max() - a.min())
def AddTextOnImage(imInput, strText, loc=(2, 2), color=255):
imInputPIL = PIL.Image.fromarray(imInput)
d = ImageDraw.Draw(imInputPIL)
d.text(loc, strText, fill=color)
return np.asarray(imInputPIL)
def AddTextOnVideo(imVideo, strText, loc=(2, 2)):
imVideoOut = np.zeros_like(imVideo)
for i in range(imVideo.shape[2]):
imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText, loc)
return imVideoOut
def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None):
if not isinstance(imVideo, list):
imVideo = [imVideo]
strWindowName = [strWindowName]
# find max number of frames
maxFrames = 0
for vid in range(len(imVideo)):
if imVideo[vid].shape[-1] > maxFrames:
maxFrames = imVideo[vid].shape[2]
# display video
blnLoop = True
fid = 0
while True:
for vid in range(len(imVideo)):
curVideoFid = fid % imVideo[vid].shape[2]
imCur = imVideo[vid][:, :, curVideoFid]
# resize image if requested
if resizeAmount:
imCur = scipy.misc.imresize(imCur, resizeAmount)
# show image
cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1))
# look for "esc" key
k = cv2.waitKey(waitTime) & 0xff
if blnLoop:
if k == 27:
break
elif k == ord(' '):
blnLoop = False
else:
fid = (fid + 1) % maxFrames
else:
if k == 27: # escape
break
elif k == ord(' '): # space
blnLoop = True
elif k == 81: # left arrow
fid = (fid - 1) % maxFrames
elif k == 83: # right arrow
fid = (fid + 1) % maxFrames
for vid in range(len(imVideo)):
cv2.destroyWindow(strWindowName[vid])
def normalizeArray(a, bounds=None):
if bounds is None:
return (0.0 + a - a.min()) / (a.max() - a.min())
else:
b = (0.0 + a - bounds[0]) / (bounds[1] - bounds[0])
b[b < 0] = bounds[0]
b[b > bounds[1]] = bounds[1]
return b
def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None):
vidseq = cv2.VideoCapture(dataFilePath)
print vidseq, vidseq.isOpened()
# print metadata
metadata = {}
numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT)
print '\tFRAME_COUNT = ', numFrames
metadata['FRAME_COUNT'] = numFrames
frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT)
if frameHeight > 0:
print '\tFRAME HEIGHT = ', frameHeight
metadata['FRAME_HEIGHT'] = frameHeight
frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH)
if frameWidth > 0:
print '\tFRAME WIDTH = ', frameWidth
metadata['FRAME_WIDTH'] = frameWidth
fps = vidseq.get(cv2.CAP_PROP_FPS)
if fps > 0:
print '\tFPS = ', fps
metadata['FPS'] = fps
fmt = vidseq.get(cv2.CAP_PROP_FORMAT)
if fmt > 0:
print '\FORMAT = ', fmt
metadata['FORMAT'] = fmt
vmode = vidseq.get(cv2.CAP_PROP_MODE)
if vmode > 0:
print '\MODE = ', vmode
metadata['MODE'] = MODE
# smooth if wanted
if sigmaSmooth:
wSmooth = 4 * sigmaSmooth + 1
print metadata
# read video frames
imInput = []
fid = 0
prevPercent = 0
print '\n'
while True:
valid_object, frame = vidseq.read()
if not valid_object:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if resizeAmount:
frame = scipy.misc.imresize(frame, resizeAmount)
if sigmaSmooth:
frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0)
imInput.append(frame)
# update progress
fid += 1
curPercent = np.floor(100.0 * fid / numFrames)
if curPercent > prevPercent:
prevPercent = curPercent
print '%.2d%%' % curPercent,
print '\n'
imInput = np.dstack(imInput)
vidseq.release()
return (imInput, metadata)
def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False):
# start timer
tStart = time.time()
# write video
# fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4
fourcc = cv2.VideoWriter_fourcc(*list(codec))
height, width = imVideo.shape[:2]
writer = cv2.VideoWriter(filename, fourcc, fps=fps,
frameSize=(width, height), isColor=isColor)
print writer.isOpened()
numFrames = imVideo.shape[-1]
for fid in range(numFrames):
if isColor:
writer.write(imVideo[:, :, :, fid].astype('uint8'))
else:
writer.write(imVideo[:, :, fid].astype('uint8'))
# end timer
tEnd = time.time()
print 'Writing video {} took {} seconds'.format(filename, tEnd - tStart)
# release
writer.release()
def writeVideoAsTiffStack(imVideo, strFilePrefix):
# start timer
tStart = time.time()
for fid in range(imVideo.shape[2]):
plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:, :, fid])
# end timer
tEnd = time.time()
print 'Writing video {} took {} seconds'.format(strFilePrefix,
tEnd - tStart)
def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None):
plt.imshow(im.max(axis))
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
def convertFromRFtoBMode(imInputRF):
return np.abs(scipy.signal.hilbert(imInputRF, axis=0))
def normalizeAngles(angleList, angle_range):
return np.array(
[angles.normalize(i, angle_range[0], angle_range[1]) for i in
angleList])
def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs):
for ext in saveext:
plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs)
def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)):
for ext in saveext:
plt.imsave(os.path.join(saveDir, fileName + ext), im)
def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame,
maxFrame, splineOrder):
tZoom = np.float(numOutFrames) / (maxFrame - minFrame + 1)
return scipy.ndimage.interpolation.zoom(
imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder)
def ncorr(imA, imB):
imA = (imA - imA.mean()) / imA.std()
imB = (imB - imB.mean()) / imB.std()
return np.mean(imA * imB)
def vis_checkerboard(im1, im2):
im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1),
sitk.GetImageFromArray(im2))
return sitk.GetArrayFromImage(im_chk)
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with
RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode.
# Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf | 2.421875 | 2 |
weasyl/emailer.py | akash143143/weasyl | 0 | 2471 | <filename>weasyl/emailer.py
from __future__ import absolute_import
import re
from email.mime.text import MIMEText
from smtplib import SMTP
from weasyl import define, macro
EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z")
def normalize_address(address):
"""
Converts an e-mail address to a consistent representation.
Returns None if the given address is not considered valid.
"""
address = address.strip()
if not EMAIL_ADDRESS.match(address):
return None
local, domain = address.split("@", 1)
return "%s@%s" % (local, domain.lower())
def send(mailto, subject, content):
"""Send an e-mail.
`mailto` must be a normalized e-mail address to send this e-mail to. The
system email will be designated as the sender.
"""
message = MIMEText(content.strip())
message["To"] = mailto
message["From"] = macro.MACRO_EMAIL_ADDRESS
message["Subject"] = subject
# smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this:
msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string())
smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp'))
try:
smtp.sendmail(
from_addr=macro.MACRO_EMAIL_ADDRESS,
to_addrs=[mailto],
msg=msg_crlf,
)
finally:
smtp.quit()
define.metric('increment', 'emails')
| 3 | 3 |
tests/test_missing_process.py | ricklupton/sphinx_probs_rdf | 1 | 2472 | <filename>tests/test_missing_process.py
import pytest
from rdflib import Graph, Namespace, Literal
from rdflib.namespace import RDF, RDFS
from sphinx_probs_rdf.directives import PROBS
SYS = Namespace("http://example.org/system/")
@pytest.mark.sphinx(
'probs_rdf', testroot='missing',
confoverrides={'probs_rdf_system_prefix': str(SYS)})
def test_builder_reports_warning_for_missing_process(app, status, warning):
app.builder.build_all()
assert "build succeeded" not in status.getvalue()
warnings = warning.getvalue().strip()
assert 'WARNING: Requested child "http://example.org/system/Missing" of "http://example.org/system/ErrorMissingProcess" is not a Process' in warnings
| 2.15625 | 2 |
analysis_functionarcademix.py | thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis | 0 | 2473 | <reponame>thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
#analysis function for three level game
def stat_analysis(c1,c2,c3):
#ask question for viewing analysis of game
analysis=input('\nDo you want to see your game analysis? (Yes/No) ')
if analysis=='Yes':
levels=['Level 1','Level 2','Level 3']
#calculating the score of levels
l1_score= c1*10
l2_score= c2*10
l3_score= c3*10
level_score=[l1_score,l2_score,l3_score]
#plot bar chart
plt.bar(levels,level_score,color='blue',edgecolor='black')
plt.title('Levelwise Scores',fontsize=16)#add title
plt.xlabel('Levels')#set x-axis label
plt.ylabel('Scores')#set y-axis label
plt.show()
print('\nDescriptive Statistics of Scores:')
#find mean value
print('\nMean: ',statistics.mean(level_score))
#find median value
print('\nMediand: ',statistics.median(level_score))
#Mode calculation
#create numPy array of values with only one mode
arr_val = np.array(level_score)
#find unique values in array along with their counts
vals, uni_val_counts = np.unique(arr_val, return_counts=True)
#find mode
mode_value = np.argwhere(counts == np.max(uni_val_counts))
print('\nMode: ',vals[mode_value].flatten().tolist())
#find variance
print('\nVariance: ',np.var(level_score))
#find standard deviation
print('\nStandard Deviation: ',statistics.stdev(level_score))
print('\nGood Bye.See you later!!!')
elif analysis=='No':
print('\nGood Bye.See you later!!!')
else:
print('Invalid value enter')
stat_analysis(c1,c2,c3)
| 3.640625 | 4 |
Hello_Cone.py | TechnoTanuki/Python_BMP | 3 | 2474 | notice = """
Cone Demo
-----------------------------------
| Copyright 2022 by <NAME> |
| [<EMAIL>] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
newBMP,
centercoord,
plot3Dsolid,
getRGBfactors,
rotvec3D,
conevertandsurface,
saveBMP
)
import subprocess as proc
from os import path
def main():
print(notice)
imgedt = 'mspaint' # replace with another editor if Unix
rootdir = path.dirname(__file__) # get path of this script
mx = my = 250 # x=y square bmp
file = 'HelloCone.bmp' # some random file name as string
bmp = newBMP(mx, my, 24) # RGB bmp
cenpt = centercoord(bmp) # helper method to get center of a bitmap
cf = getRGBfactors() # color info with presets
d, translationvector = 400, [0, 0, 200] # be careful with these variables or object goes offscreen
isSolid = True # toggle solid or outline
showoutline = False # can show outline even if solid
cf = getRGBfactors() # color list
color = cf['brightyellow'] # color of solid
outlinecolor = 0 # outline color
rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees
vcen = (1,0,0) # x y z coords
r = 40 # radius of cone
zlen = 40 # height of cone
deganglestep = 5 # how finely we tile flat surfaces around the cone
obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by vertices and surfaces
plot3Dsolid(bmp, obj3D, isSolid, color,
showoutline, outlinecolor,
rotation, translationvector, d, cenpt)
saveBMP(file, bmp) # save file
print('Saved to %s in %s\nAll done close %s to finish' % \
(file, rootdir, imgedt))
ret = proc.call([imgedt, file])
if __name__=="__main__":
main()
| 2.1875 | 2 |
analysis/training_curve_6D.py | AndrewKirby2/data_synthesis | 0 | 2475 | <reponame>AndrewKirby2/data_synthesis
""" Plot a training curve for the 6D data simulator of CT*
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
import sys
sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis')
from GP_machine_learning.GP_machine_learning_functions import *
from regular_array_sampling.functions import regular_array_monte_carlo
# create array to store results for plotting
rmse = np.ones((25, 2))
noise = 0.01
# create array of sampled regular array layouts
#cand_points = regular_array_monte_carlo(10000)
# create testing points
X_test, y_test = create_testing_points_regular(noise)
n = 0
n_target = 0
n_train = 0
while n_train < 200:
n_target = 100 +100*n
# create training points
X_train, y_train, n_train = \
create_training_points_irregular(n_target, noise)
# fit GP regression and calculate rmse
kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1])
pipe = Pipeline([('scaler', StandardScaler()),
('gp', GaussianProcessRegressor(kernel=kernel,
n_restarts_optimizer=20))])
pipe.fit(X_train, y_train)
y_predict = pipe.predict(X_test)
mse = mean_squared_error(y_test, y_predict)
# report rmse
print(n_train, np.sqrt(mse))
rmse[n, 0] = n_train
rmse[n, 1] = np.sqrt(mse)
n += 1
plt.scatter(rmse[:, 0], rmse[:, 1])
plt.yscale('log')
plt.ylim([1e-3, 1e-1])
plt.xlim([0, 200])
plt.title('Training curve RBF - 6D 1% noise - irregular array training - max change halved')
plt.ylabel('RMSE')
plt.xlabel('Training points')
plt.savefig('analysis/GP_machine_learning_plots/\
gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')
| 3.078125 | 3 |
website/raspac.py | tpudlik/RaspAC | 28 | 2476 | import sqlite3
import subprocess, datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
from tquery import get_latest_record
from config import *
app = Flask(__name__)
app.config.from_object(__name__)
# DB helper functions
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""Initializes the sqlite3 database. This function must be imported and
executed from the Python interpreter before the application is first run."""
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# Auto-open and close DB when serving requests
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/', methods=['GET', 'POST'])
def welcome_page():
if 'username' in session and session['username']:
return redirect(url_for('submit_page'))
error = None
if request.method == 'POST': # someone's logging in
if not request.form['username'] in app.config['USERNAMES']:
error = 'username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'password'
else: # successful login
session['username'] = request.form['username']
flash('Hi ' + session['username'] + '!')
return redirect(url_for('submit_page'))
return render_template('welcome_page.html', commands=command_history(),
error=error, last_record=last_record())
@app.route('/submit', methods=['GET', 'POST'])
def submit_page():
error = None
if not session.get('username'):
abort(401)
if request.method == 'POST': # command is being issued to AC
user_mode = request.form['mode']
user_temperature = request.form['temperature']
validation_codes = validate_AC_command(user_mode, user_temperature)
if (validation_codes['mode_error'] or
validation_codes['temperature_error']):
error=validation_codes
else:
subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac',
validation_codes['command']])
g.db.execute('insert into commands (command, ts, user) values (?, ?, ?)',
[validation_codes['command'],
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
session['username']])
g.db.commit()
flash('Command submitted')
return render_template('submit_page.html', commands=command_history(),
error=error, last_record=last_record())
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('welcome_page'))
def validate_AC_command(user_mode, user_temperature):
"""Validates and sanitizes user-input command; translates command
into irsend call."""
codes = dict()
if user_mode not in app.config['ACMODES']:
codes['mode_error'] = True
else:
codes['mode_error'] = False
if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']:
codes['temperature_error'] = True
else:
codes['temperature_error'] = False
if not codes['mode_error'] and not codes['temperature_error']:
codes['mode'] = user_mode
codes['temperature'] = user_temperature
if codes['mode'] == 'off':
command_postfix = 'off'
elif codes['mode'] == 'heat':
command_postfix = 'heat' + codes['temperature']
else:
command_postfix = codes['temperature']
codes['command'] = command_postfix
return codes
def command_history():
"""Returns a list of dictionaries, each containing a command issued
to the AC previously. The list is ordered chronologically, from newest
to oldest."""
cur = g.db.execute('select command, ts, user from commands order by id desc')
command_history = []
for row in cur.fetchall():
if row[0][0] == 'h':
cmd = 'heat to ' + row[0][4:]
elif row[0] == 'off':
cmd = 'off'
else:
cmd = 'cool to ' + row[0]
command_history.append(dict(command=cmd, ts=row[1], user=row[2]))
return command_history
def last_record():
"""Returns the last temperature and humidity record data.
The returned object is a dict with keys ts, fahrenheit, celsius and
humidity.
"""
db_record = get_latest_record()
out_record = dict()
out_record['date'] = db_record[0].strftime("%Y-%m-%d")
out_record['time'] = db_record[0].strftime("%H:%M")
out_record['celsius'] = db_record[1]
out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32))
out_record['humidity'] = int(round(db_record[2]))
return out_record
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 2.53125 | 3 |
tests/util_test.py | NLESC-JCER/pyspectra | 1 | 2477 | """Helper functions to tests."""
import numpy as np
def norm(vs: np.array) -> float:
"""Compute the norm of a vector."""
return np.sqrt(np.dot(vs, vs))
def create_random_matrix(size: int) -> np.array:
"""Create a numpy random matrix."""
return np.random.normal(size=size ** 2).reshape(size, size)
def create_symmetic_matrix(size: int) -> np.array:
"""Create a numpy symmetric matrix."""
xs = create_random_matrix(size)
return xs + xs.T
def check_eigenpairs(
matrix: np.ndarray, eigenvalues: np.ndarray,
eigenvectors: np.ndarray) -> bool:
"""Check that the eigenvalue equation holds."""
for i, value in enumerate(eigenvalues):
residue = np.dot(
matrix, eigenvectors[:, i]) - value * eigenvectors[:, i]
assert norm(residue) < 1e-8
| 3.265625 | 3 |
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py | leetcode-notebook/wonz | 12 | 2478 | <filename>solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py<gh_stars>10-100
from typing import List
class Solution:
def findRepeatNumber(self, nums: List[int]) -> int:
# solution one: 哈希表
n = len(nums)
flag = [False for i in range(n)]
for i in range(n):
if flag[nums[i]] == False:
flag[nums[i]] = True
else:
return nums[i]
return -1
# solution two: 排序
nums.sort()
pre = nums[0]
for i in range(1, len(nums)):
if pre == nums[i]:
return nums[i]
else:
pre = nums[i]
return -1
# solution three: 两个萝卜一个坑
n = len(nums)
for i in range(n):
if nums[i] == i:
continue
# 有重复
elif nums[nums[i]] == nums[i]:
return nums[i]
# 交换
else:
nums[nums[i]], nums[i] = nums[i], nums[nums[i]]
return -1
if __name__ == "__main__":
nums = [2, 3, 1, 0, 2, 5, 3]
print(Solution().findRepeatNumber(nums)) | 3.578125 | 4 |
examples/test_network.py | Charles-Peeke/gwu_nn | 4 | 2479 | <reponame>Charles-Peeke/gwu_nn<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from gwu_nn.gwu_network import GWUNetwork
from gwu_nn.layers import Dense
from gwu_nn.activation_layers import Sigmoid
np.random.seed(8)
num_obs = 8000
# Create our features to draw from two distinct 2D normal distributions
x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs)
x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs)
# Stack our inputs into one feature space
X = np.vstack((x1, x2))
print(X.shape)
y = np.hstack((np.zeros(num_obs), np.ones(num_obs)))
print(y.shape)
# colors = ['red'] * num_obs + ['blue'] * num_obs
# plt.figure(figsize=(12,8))
# plt.scatter(X[:, 0], X[:, 1], c = colors, alpha = 0.5)
# Lets randomly split things into training and testing sets so we don't cheat
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# Create our model
network = GWUNetwork()
network.add(Dense(2, 1, True, 'sigmoid'))
network.add(Sigmoid())
#network.set_loss('mse')
network.compile('log_loss', 0.001)
network.fit(X_train, y_train, epochs=100)
from scipy.special import logit
colors = ['red'] * num_obs + ['blue'] * num_obs
plt.figure(figsize=(12, 8))
plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5)
# Range of our X values
start_x1 = -5
end_x1 = 7
weights = network.layers[0].weights.reshape(-1).tolist()
bias = network.layers[0].bias[0][0]
start_y = (bias + start_x1 * weights[0] - logit(0.5)) / - weights[1]
end_y = (bias + end_x1 * weights[0] - logit(0.5)) / -weights[1]
plt.plot([start_x1, end_x1], [start_y, end_y], color='grey') | 2.875 | 3 |
scattering/van_hove.py | XiaoboLinlin/scattering | 0 | 2480 | <reponame>XiaoboLinlin/scattering
import itertools as it
import numpy as np
import mdtraj as md
from progressbar import ProgressBar
from scattering.utils.utils import get_dt
from scattering.utils.constants import get_form_factor
def compute_van_hove(trj, chunk_length, water=False,
r_range=(0, 1.0), bin_width=0.005, n_bins=None,
self_correlation=True, periodic=True, opt=True, partial=False):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
water : bool
use X-ray form factors for water that account for polarization
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0])
unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass > 0]))
partial_dict = dict()
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
print('doing {0} and {1} ...'.format(elem1, elem2))
r, g_r_t_partial = compute_partial_van_hove(trj=trj,
chunk_length=chunk_length,
selection1='element {}'.format(elem1.symbol),
selection2='element {}'.format(elem2.symbol),
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
self_correlation=self_correlation,
periodic=periodic,
opt=opt)
partial_dict[(elem1, elem2)] = g_r_t_partial
if partial:
return partial_dict
norm = 0
g_r_t = None
for key, val in partial_dict.items():
elem1, elem2 = key
concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms
concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms
form_factor1 = get_form_factor(element_name=elem1.symbol, water=water)
form_factor2 = get_form_factor(element_name=elem2.symbol, water=water)
coeff = form_factor1 * concentration1 * form_factor2 * concentration2
if g_r_t is None:
g_r_t = np.zeros_like(val)
g_r_t += val * coeff
norm += coeff
# Reshape g_r_t to better represent the discretization in both r and t
g_r_t_final = np.empty(shape=(chunk_length, len(r)))
for i in range(chunk_length):
g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0)
g_r_t_final /= norm
t = trj.time[:chunk_length]
return r, t, g_r_t_final
def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None,
r_range=(0, 1.0), bin_width=0.005, n_bins=200,
self_correlation=True, periodic=True, opt=True):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
selection1 : str
selection to be considered, in the style of MDTraj atom selection
selection2 : str
selection to be considered, in the style of MDTraj atom selection
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
unique_elements = (
set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]),
set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]),
)
if any([len(val) > 1 for val in unique_elements]):
raise UserWarning(
'Multiple elements found in a selection(s). Results may not be '
'direcitly comprable to scattering experiments.'
)
# Don't need to store it, but this serves to check that dt is constant
dt = get_dt(trj)
pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2)
n_chunks = int(trj.n_frames / chunk_length)
g_r_t = None
pbar = ProgressBar()
for i in pbar(range(n_chunks)):
times = list()
for j in range(chunk_length):
times.append([chunk_length*i, chunk_length*i+j])
r, g_r_t_frame = md.compute_rdf_t(
traj=trj,
pairs=pairs,
times=times,
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
period_length=chunk_length,
self_correlation=self_correlation,
periodic=periodic,
opt=opt,
)
if g_r_t is None:
g_r_t = np.zeros_like(g_r_t_frame)
g_r_t += g_r_t_frame
return r, g_r_t
| 2.453125 | 2 |
nn_benchmark/networks/__init__.py | QDucasse/nn_benchmark | 18 | 2481 | # -*- coding: utf-8 -*-
# nn_benchmark
# author - <NAME>
# https://github.com/QDucasse
# <EMAIL>
from __future__ import absolute_import
__all__ = ["lenet","lenet5","quant_lenet5",
"quant_cnv", "quant_tfc",
"mobilenetv1","quant_mobilenetv1",
"vggnet", "quant_vggnet",
"common", "alexnet", "quant_alexnet"]
from .alexnet import *
from .lenet import *
from .lenet5 import *
from .mobilenetv1 import *
from .quant_mobilenetv1 import *
from .quant_alexnet import *
from .quant_lenet5 import *
from .quant_cnv import *
from .quant_tfc import *
from .vggnet import *
from .quant_vggnet import *
from .common import *
| 1.171875 | 1 |
Section1_Basics/contours.py | NeeharikaDva/opencv_course | 0 | 2482 | #pylint:disable=no-member
import cv2 as cv
import numpy as np
img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
#
blank = np.zeros(img.shape[:2], dtype='uint8')
cv.imshow('Blank', blank)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
#
blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
cv.imshow('Blur', blur)
canny = cv.Canny(blur, 125, 175)
cv.imshow('Canny Edges', canny)
#
ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
cv.imshow('Thresh', thresh)
#
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contour(s) found!')
#
cv.drawContours(blank, contours, -1, (200,120,100), 1)
cv.imshow('Contours Drawn', blank)
cv.waitKey(0) | 2.828125 | 3 |
mmdet/ops/fcosr_tools/__init__.py | RangeKing/FCOSR | 38 | 2483 | from . import fcosr_tools
__all__ = ['fcosr_tools'] | 1.117188 | 1 |
health_care/health_care/doctype/practitioner/practitioner.py | Jnalis/frappe-health-care | 0 | 2484 | # Copyright (c) 2022, Juve and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Practitioner(Document):
def before_save(self):
self.practitioner_full_name = f'{self.first_name} {self.second_name or ""}'
| 1.992188 | 2 |
install-hooks.py | JustasGau/DonjinKrawler | 0 | 2485 | <reponame>JustasGau/DonjinKrawler
import sys
from os import path
import urllib; from urllib.request import urlretrieve
from subprocess import call
def install_hooks(directory):
checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar'
preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit'
checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1]
basePath = path.abspath(directory)
print("Downloading checkstyle to %s..." % basePath + "/.git/hooks/" + checkstyleName)
urlretrieve(checkstyleUrl, basePath + "/.git/hooks/" + checkstyleName)
print("Downloading pre-commit script to %s" % basePath + "/.git/hooks/pre-commit")
urlretrieve(preCommitUrl, basePath + "/.git/hooks/pre-commit")
with open(basePath + '/.git/config', 'a+') as gitConfig:
if ("[checkstyle]" not in gitConfig.read()):
print("Adding git configurations to .git/config")
gitConfig.write("[checkstyle]\n")
gitConfig.write("jar = %s\n" % (basePath + "/.git/hooks/" + checkstyleName))
gitConfig.write("checkfile = %s\n" % (basePath + "/checkstyle_config.xml"))
print("Changing permissions for pre-commit. Has to run as root, enter password plz")
call(["sudo", "chmod", "+x", (basePath + "/.git/hooks/pre-commit")])
if __name__ == "__main__":
if (len(sys.argv) < 2):
print("Enter a directory to install hooks")
else:
if (path.exists(sys.argv[1])):
install_hooks(sys.argv[1])
| 2.40625 | 2 |
09_MicroServer_Cookies/micro_server.py | Rockfish/PythonCourse | 0 | 2486 | """
Micro webapp based on WebOb, Jinja2, WSGI with a simple router
"""
import os
import hmac
import hashlib
import mimetypes
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from webob import Request
from webob import Response
from jinja2 import Environment, FileSystemLoader
class MicroServer(object):
"""Small web server."""
def __init__(self):
"""Initializes the class and configures the paths
and the Jinja2 environment so it can find and render pages."""
if self.static_root is None:
self.static_root = 'static'
if self.templates_root is None:
self.templates_root = 'templates'
if self.routes is None:
self.routes = {}
# Set up the paths and environment for Jinja. This is how it finds the templates.
self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root)
self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path))
# Figure out what directory the server is running it as save the path.
# The path will be used later to find the site's resources.
self.current_dir = os.path.dirname(os.path.realpath(__file__))
def __call__(self, environ, start_response):
"""This method is called by the HTTPServer when
there is a request to be handled."""
# Create the WebOb Request and Response objects for
# used to read the request and write the response.
self.request = Request(environ)
self.response = Response()
# Find a handler for the path if there is one.
handler = self.routes.get(self.request.path_info)
# If there is call it. If not call the static handler.
if handler:
handler()
else:
self.static()
return self.response(environ, start_response)
def static(self, resource=''):
"""Handles request for static pages. It is the default handler."""
# Build a file path using either the resource parameter or the path in the request.
if resource:
file_path = os.path.join(self.current_dir, self.static_root, resource)
else:
file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:])
print("File path:", file_path)
# Try to open the file. If we can then guess its type and write its
# content to the response object to send it to the client.
# If we can't find the file then return an error to the client.
try:
file_type = mimetypes.guess_type(file_path)[0]
self.response.content_type = file_type
data = open(file_path, 'rb').read()
self.response.body_file.write(data)
except Exception as e:
self.response.status = 404
self.response.write(str(e))
def render_template(self, template_name, template_values={}):
"""Renders Jinja2 templates into HTML"""
# Find the template and render it to HTML
# then write it to the response object to send it to the client.
template = self.env.get_template(template_name)
html = template.render(template_values)
self.response.write(html)
def get_signature(self, passphrase, *parts):
"""Creates a hash from strings based on a passphrase."""
cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1)
for part in parts:
cookiehash.update(part.encode())
return cookiehash.hexdigest()
def run(self, port):
"""Starts the HTTP server and tells it what port to listen on"""
# Create the WSGI HTTP server. Set the port it should listen on.
# And start the server.
server = WSGIServer(('', 8000), WSGIRequestHandler)
server.set_app(self)
print("Serving on http://localhost:8000/ ...")
server.serve_forever()
| 3.140625 | 3 |
apps/addons/management/commands/jetpackers.py | clouserw/olympia | 1 | 2487 | import logging
from django.core import mail
from django.conf import settings
from django.core.management.base import BaseCommand
import amo.utils
from users.models import UserProfile
log = logging.getLogger('z.mailer')
FROM = settings.DEFAULT_FROM_EMAIL
class Command(BaseCommand):
help = "Send the email for bug 662571"
def handle(self, *args, **options):
sendmail()
def sendmail():
addrs = set(UserProfile.objects.values_list('email', flat=True)
# whoa
.filter(addons__versions__files__jetpack_version__isnull=False))
log.info('There are %d emails to send.' % len(addrs))
count = 0
for addr in addrs:
count += 1
try:
mail.send_mail(SUBJECT, MSG, FROM, [addr])
log.info('%s. DONE: %s' % (count, addr))
except Exception, e:
log.info('%s. FAIL: %s (%s)' % (count, addr, e))
SUBJECT = 'Instructions for Automatic Upgrade to Add-on SDK 1.0'
MSG = """\
Hello Mozilla Add-ons Developer!
With the final version of the Add-on SDK only a week away, we wanted to
get in touch with all add-on developers who have existing SDK-based
(Jetpack) add-ons. We would like you to know that going forward AMO
will be auto-updating add-ons with new versions of the Add-on SDK upon
release.
To ensure that your add-on(s) are auto-updated with the 1.0 final
version of the SDK, we would ask that you download the latest release
candidate build -
https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.tar.gz,
https://ftp.mozilla.org/pub/mozilla.org/labs/jetpack/addon-sdk-1.0rc2.zip
- and update your add-on(s) on AMO. After the 1.0 release, we will scan
our add-ons database and automatically upgrade any SDK-based add-ons we
find that are using verions 1.0RC2 or greater to the 1.0 final version
of the SDK. Any add-ons we find using versions of the SDK below 1.0RC2
will not be auto-updated and you will need to upgrade them to the 1.0
version of the SDK manually.
Thank you for participating in the early stages of the Add-on SDK's
development. Feedback and engagement from developers like you are the
foundations for success in our open source community!
Sincerely,
The Mozilla Add-ons Team
"""
| 2.046875 | 2 |
astroplan/constraints.py | edose/astroplan | 160 | 2488 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Specify and constraints to determine which targets are observable for
an observer.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# Standard library
from abc import ABCMeta, abstractmethod
import datetime
import time
import warnings
# Third-party
from astropy.time import Time
import astropy.units as u
from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord
from astropy import table
import numpy as np
from numpy.lib.stride_tricks import as_strided
# Package
from .moon import moon_illumination
from .utils import time_grid_from_range
from .target import get_skycoord
__all__ = ["AltitudeConstraint", "AirmassConstraint", "AtNightConstraint",
"is_observable", "is_always_observable", "time_grid_from_range",
"GalacticLatitudeConstraint", "SunSeparationConstraint",
"MoonSeparationConstraint", "MoonIlluminationConstraint",
"LocalTimeConstraint", "PrimaryEclipseConstraint",
"SecondaryEclipseConstraint", "Constraint", "TimeConstraint",
"observability_table", "months_observable", "max_best_rescale",
"min_best_rescale", "PhaseConstraint", "is_event_observable"]
_current_year = time.localtime().tm_year # needed for backward compatibility
_current_year_time_range = Time( # needed for backward compatibility
[str(_current_year) + '-01-01',
str(_current_year) + '-12-31']
)
def _make_cache_key(times, targets):
"""
Make a unique key to reference this combination of ``times`` and ``targets``.
Often, we wish to store expensive calculations for a combination of
``targets`` and ``times`` in a cache on an ``observer``` object. This
routine will provide an appropriate, hashable, key to store these
calculations in a dictionary.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
targets : `~astropy.coordinates.SkyCoord`
Target or list of targets.
Returns
-------
cache_key : tuple
A hashable tuple for use as a cache key
"""
# make a tuple from times
try:
timekey = tuple(times.jd) + times.shape
except BaseException: # must be scalar
timekey = (times.jd,)
# make hashable thing from targets coords
try:
if hasattr(targets, 'frame'):
# treat as a SkyCoord object. Accessing the longitude
# attribute of the frame data should be unique and is
# quicker than accessing the ra attribute.
targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape
else:
# assume targets is a string.
targkey = (targets,)
except BaseException:
targkey = (targets.frame.data.lon,)
return timekey + targkey
def _get_altaz(times, observer, targets, force_zero_pressure=False):
"""
Calculate alt/az for ``target`` at times linearly spaced between
the two times in ``time_range`` with grid spacing ``time_resolution``
for ``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets.
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``.
force_zero_pressure : bool
Forcefully use 0 pressure.
Returns
-------
altaz_dict : dict
Dictionary containing two key-value pairs. (1) 'times' contains the
times for the alt/az computations, (2) 'altaz' contains the
corresponding alt/az coordinates at those times.
"""
if not hasattr(observer, '_altaz_cache'):
observer._altaz_cache = {}
# convert times, targets to tuple for hashing
aakey = _make_cache_key(times, targets)
if aakey not in observer._altaz_cache:
try:
if force_zero_pressure:
observer_old_pressure = observer.pressure
observer.pressure = 0
altaz = observer.altaz(times, targets, grid_times_targets=False)
observer._altaz_cache[aakey] = dict(times=times,
altaz=altaz)
finally:
if force_zero_pressure:
observer.pressure = observer_old_pressure
return observer._altaz_cache[aakey]
def _get_moon_data(times, observer, force_zero_pressure=False):
"""
Calculate moon altitude az and illumination for an array of times for
``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint.
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``.
force_zero_pressure : bool
Forcefully use 0 pressure.
Returns
-------
moon_dict : dict
Dictionary containing three key-value pairs. (1) 'times' contains the
times for the computations, (2) 'altaz' contains the
corresponding alt/az coordinates at those times and (3) contains
the moon illumination for those times.
"""
if not hasattr(observer, '_moon_cache'):
observer._moon_cache = {}
# convert times to tuple for hashing
aakey = _make_cache_key(times, 'moon')
if aakey not in observer._moon_cache:
try:
if force_zero_pressure:
observer_old_pressure = observer.pressure
observer.pressure = 0
altaz = observer.moon_altaz(times)
illumination = np.array(moon_illumination(times))
observer._moon_cache[aakey] = dict(times=times,
illum=illumination,
altaz=altaz)
finally:
if force_zero_pressure:
observer.pressure = observer_old_pressure
return observer._moon_cache[aakey]
def _get_meridian_transit_times(times, observer, targets):
"""
Calculate next meridian transit for an array of times for ``targets`` and
``observer``.
Cache the result on the ``observer`` object.
Parameters
----------
times : `~astropy.time.Time`
Array of times on which to test the constraint
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
Returns
-------
time_dict : dict
Dictionary containing a key-value pair. 'times' contains the
meridian_transit times.
"""
if not hasattr(observer, '_meridian_transit_cache'):
observer._meridian_transit_cache = {}
# convert times to tuple for hashing
aakey = _make_cache_key(times, targets)
if aakey not in observer._meridian_transit_cache:
meridian_transit_times = observer.target_meridian_transit_time(times, targets)
observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times)
return observer._meridian_transit_cache[aakey]
@abstractmethod
class Constraint(object):
"""
Abstract class for objects defining observational constraints.
"""
__metaclass__ = ABCMeta
def __call__(self, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour,
grid_times_targets=False):
"""
Compute the constraint for this class
Parameters
----------
observer : `~astroplan.Observer`
the observation location from which to apply the constraints
targets : sequence of `~astroplan.Target`
The targets on which to apply the constraints.
times : `~astropy.time.Time`
The times to compute the constraint.
WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET?
time_range : `~astropy.time.Time` (length = 2)
Lower and upper bounds on time sequence.
time_grid_resolution : `~astropy.units.quantity`
Time-grid spacing
grid_times_targets : bool
if True, grids the constraint result with targets along the first
index and times along the second. Otherwise, we rely on broadcasting
the shapes together using standard numpy rules.
Returns
-------
constraint_result : 1D or 2D array of float or bool
The constraints. If 2D with targets along the first index and times along
the second.
"""
if times is None and time_range is not None:
times = time_grid_from_range(time_range,
time_resolution=time_grid_resolution)
if grid_times_targets:
targets = get_skycoord(targets)
# TODO: these broadcasting operations are relatively slow
# but there is potential for huge speedup if the end user
# disables gridding and re-shapes the coords themselves
# prior to evaluating multiple constraints.
if targets.isscalar:
# ensure we have a (1, 1) shape coord
targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis]
else:
targets = targets[..., np.newaxis]
times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False)
result = self.compute_constraint(times, observer, targets)
# make sure the output has the same shape as would result from
# broadcasting times and targets against each other
if targets is not None:
# broadcasting times v targets is slow due to
# complex nature of these objects. We make
# to simple numpy arrays of the same shape and
# broadcast these to find the correct shape
shp1, shp2 = times.shape, targets.shape
x = np.array([1])
a = as_strided(x, shape=shp1, strides=[0] * len(shp1))
b = as_strided(x, shape=shp2, strides=[0] * len(shp2))
output_shape = np.broadcast(a, b).shape
if output_shape != np.array(result).shape:
result = np.broadcast_to(result, output_shape)
return result
@abstractmethod
def compute_constraint(self, times, observer, targets):
"""
Actually do the real work of computing the constraint. Subclasses
override this.
Parameters
----------
times : `~astropy.time.Time`
The times to compute the constraint
observer : `~astroplan.Observer`
the observaton location from which to apply the constraints
targets : sequence of `~astroplan.Target`
The targets on which to apply the constraints.
Returns
-------
constraint_result : 2D array of float or bool
The constraints, with targets along the first index and times along
the second.
"""
# Should be implemented on each subclass of Constraint
raise NotImplementedError
class AltitudeConstraint(Constraint):
"""
Constrain the altitude of the target.
.. note::
This can misbehave if you try to constrain negative altitudes, as
the `~astropy.coordinates.AltAz` frame tends to mishandle negative
Parameters
----------
min : `~astropy.units.Quantity` or `None`
Minimum altitude of the target (inclusive). `None` indicates no limit.
max : `~astropy.units.Quantity` or `None`
Maximum altitude of the target (inclusive). `None` indicates no limit.
boolean_constraint : bool
If True, the constraint is treated as a boolean (True for within the
limits and False for outside). If False, the constraint returns a
float on [0, 1], where 0 is the min altitude and 1 is the max.
"""
def __init__(self, min=None, max=None, boolean_constraint=True):
if min is None:
self.min = -90*u.deg
else:
self.min = min
if max is None:
self.max = 90*u.deg
else:
self.max = max
self.boolean_constraint = boolean_constraint
def compute_constraint(self, times, observer, targets):
cached_altaz = _get_altaz(times, observer, targets)
alt = cached_altaz['altaz'].alt
if self.boolean_constraint:
lowermask = self.min <= alt
uppermask = alt <= self.max
return lowermask & uppermask
else:
return max_best_rescale(alt, self.min, self.max)
class AirmassConstraint(AltitudeConstraint):
"""
Constrain the airmass of a target.
In the current implementation the airmass is approximated by the secant of
the zenith angle.
.. note::
The ``max`` and ``min`` arguments appear in the order (max, min)
in this initializer to support the common case for users who care
about the upper limit on the airmass (``max``) and not the lower
limit.
Parameters
----------
max : float or `None`
Maximum airmass of the target. `None` indicates no limit.
min : float or `None`
Minimum airmass of the target. `None` indicates no limit.
boolean_contstraint : bool
Examples
--------
To create a constraint that requires the airmass be "better than 2",
i.e. at a higher altitude than airmass=2::
AirmassConstraint(2)
"""
def __init__(self, max=None, min=1, boolean_constraint=True):
self.min = min
self.max = max
self.boolean_constraint = boolean_constraint
def compute_constraint(self, times, observer, targets):
cached_altaz = _get_altaz(times, observer, targets)
secz = cached_altaz['altaz'].secz.value
if self.boolean_constraint:
if self.min is None and self.max is not None:
mask = secz <= self.max
elif self.max is None and self.min is not None:
mask = self.min <= secz
elif self.min is not None and self.max is not None:
mask = (self.min <= secz) & (secz <= self.max)
else:
raise ValueError("No max and/or min specified in "
"AirmassConstraint.")
return mask
else:
if self.max is None:
raise ValueError("Cannot have a float AirmassConstraint if max is None.")
else:
mx = self.max
mi = 1 if self.min is None else self.min
# values below 1 should be disregarded
return min_best_rescale(secz, mi, mx, less_than_min=0)
class AtNightConstraint(Constraint):
"""
Constrain the Sun to be below ``horizon``.
"""
@u.quantity_input(horizon=u.deg)
def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True):
"""
Parameters
----------
max_solar_altitude : `~astropy.units.Quantity`
The altitude of the sun below which it is considered to be "night"
(inclusive).
force_pressure_zero : bool (optional)
Force the pressure to zero for solar altitude calculations. This
avoids errors in the altitude of the Sun that can occur when the
Sun is below the horizon and the corrections for atmospheric
refraction return nonsense values.
"""
self.max_solar_altitude = max_solar_altitude
self.force_pressure_zero = force_pressure_zero
@classmethod
def twilight_civil(cls, **kwargs):
"""
Consider nighttime as time between civil twilights (-6 degrees).
"""
return cls(max_solar_altitude=-6*u.deg, **kwargs)
@classmethod
def twilight_nautical(cls, **kwargs):
"""
Consider nighttime as time between nautical twilights (-12 degrees).
"""
return cls(max_solar_altitude=-12*u.deg, **kwargs)
@classmethod
def twilight_astronomical(cls, **kwargs):
"""
Consider nighttime as time between astronomical twilights (-18 degrees).
"""
return cls(max_solar_altitude=-18*u.deg, **kwargs)
def _get_solar_altitudes(self, times, observer, targets):
if not hasattr(observer, '_altaz_cache'):
observer._altaz_cache = {}
aakey = _make_cache_key(times, 'sun')
if aakey not in observer._altaz_cache:
try:
if self.force_pressure_zero:
observer_old_pressure = observer.pressure
observer.pressure = 0
# find solar altitude at these times
altaz = observer.altaz(times, get_sun(times))
altitude = altaz.alt
# cache the altitude
observer._altaz_cache[aakey] = dict(times=times,
altitude=altitude)
finally:
if self.force_pressure_zero:
observer.pressure = observer_old_pressure
else:
altitude = observer._altaz_cache[aakey]['altitude']
return altitude
def compute_constraint(self, times, observer, targets):
solar_altitude = self._get_solar_altitudes(times, observer, targets)
mask = solar_altitude <= self.max_solar_altitude
return mask
class GalacticLatitudeConstraint(Constraint):
"""
Constrain the distance between the Galactic plane and some targets.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable Galactic latitude of target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable Galactic latitude of target (inclusive).
`None` indicates no limit.
"""
self.min = min
self.max = max
def compute_constraint(self, times, observer, targets):
separation = abs(targets.transform_to(Galactic).b)
if self.min is None and self.max is not None:
mask = self.max >= separation
elif self.max is None and self.min is not None:
mask = self.min <= separation
elif self.min is not None and self.max is not None:
mask = ((self.min <= separation) & (separation <= self.max))
else:
raise ValueError("No max and/or min specified in "
"GalacticLatitudeConstraint.")
return mask
class SunSeparationConstraint(Constraint):
"""
Constrain the distance between the Sun and some targets.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable separation between Sun and target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Maximum acceptable separation between Sun and target (inclusive).
`None` indicates no limit.
"""
self.min = min
self.max = max
def compute_constraint(self, times, observer, targets):
# use get_body rather than get sun here, since
# it returns the Sun's coordinates in an observer
# centred frame, so the separation is as-seen
# by the observer.
# 'get_sun' returns ICRS coords.
sun = get_body('sun', times, location=observer.location)
solar_separation = sun.separation(targets)
if self.min is None and self.max is not None:
mask = self.max >= solar_separation
elif self.max is None and self.min is not None:
mask = self.min <= solar_separation
elif self.min is not None and self.max is not None:
mask = ((self.min <= solar_separation) &
(solar_separation <= self.max))
else:
raise ValueError("No max and/or min specified in "
"SunSeparationConstraint.")
return mask
class MoonSeparationConstraint(Constraint):
"""
Constrain the distance between the Earth's moon and some targets.
"""
def __init__(self, min=None, max=None, ephemeris=None):
"""
Parameters
----------
min : `~astropy.units.Quantity` or `None` (optional)
Minimum acceptable separation between moon and target (inclusive).
`None` indicates no limit.
max : `~astropy.units.Quantity` or `None` (optional)
Maximum acceptable separation between moon and target (inclusive).
`None` indicates no limit.
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
"""
self.min = min
self.max = max
self.ephemeris = ephemeris
def compute_constraint(self, times, observer, targets):
# removed the location argument here, which causes small <1 deg
# innacuracies, but it is needed until astropy PR #5897 is released
# which should be astropy 1.3.2
moon = get_moon(times,
ephemeris=self.ephemeris)
# note to future editors - the order matters here
# moon.separation(targets) is NOT the same as targets.separation(moon)
# the former calculates the separation in the frame of the moon coord
# which is GCRS, and that is what we want.
moon_separation = moon.separation(targets)
if self.min is None and self.max is not None:
mask = self.max >= moon_separation
elif self.max is None and self.min is not None:
mask = self.min <= moon_separation
elif self.min is not None and self.max is not None:
mask = ((self.min <= moon_separation) &
(moon_separation <= self.max))
else:
raise ValueError("No max and/or min specified in "
"MoonSeparationConstraint.")
return mask
class MoonIlluminationConstraint(Constraint):
"""
Constrain the fractional illumination of the Earth's moon.
Constraint is also satisfied if the Moon has set.
"""
def __init__(self, min=None, max=None, ephemeris=None):
"""
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
`~astropy.coordinates.solar_system_ephemeris` (which is
set to 'builtin' by default).
"""
self.min = min
self.max = max
self.ephemeris = ephemeris
@classmethod
def dark(cls, min=None, max=0.25, **kwargs):
"""
initialize a `~astroplan.constraints.MoonIlluminationConstraint`
with defaults of no minimum and a maximum of 0.25
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
"""
return cls(min, max, **kwargs)
@classmethod
def grey(cls, min=0.25, max=0.65, **kwargs):
"""
initialize a `~astroplan.constraints.MoonIlluminationConstraint`
with defaults of a minimum of 0.25 and a maximum of 0.65
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
"""
return cls(min, max, **kwargs)
@classmethod
def bright(cls, min=0.65, max=None, **kwargs):
"""
initialize a `~astroplan.constraints.MoonIlluminationConstraint`
with defaults of a minimum of 0.65 and no maximum
Parameters
----------
min : float or `None` (optional)
Minimum acceptable fractional illumination (inclusive). `None`
indicates no limit.
max : float or `None` (optional)
Maximum acceptable fractional illumination (inclusive). `None`
indicates no limit.
"""
return cls(min, max, **kwargs)
def compute_constraint(self, times, observer, targets):
# first is the moon up?
cached_moon = _get_moon_data(times, observer)
moon_alt = cached_moon['altaz'].alt
moon_down_mask = moon_alt < 0
moon_up_mask = moon_alt >= 0
illumination = cached_moon['illum']
if self.min is None and self.max is not None:
mask = (self.max >= illumination) | moon_down_mask
elif self.max is None and self.min is not None:
mask = (self.min <= illumination) & moon_up_mask
elif self.min is not None and self.max is not None:
mask = ((self.min <= illumination) &
(illumination <= self.max)) & moon_up_mask
else:
raise ValueError("No max and/or min specified in "
"MoonSeparationConstraint.")
return mask
class LocalTimeConstraint(Constraint):
"""
Constrain the observable hours.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~datetime.time`
Earliest local time (inclusive). `None` indicates no limit.
max : `~datetime.time`
Latest local time (inclusive). `None` indicates no limit.
Examples
--------
Constrain the observations to targets that are observable between
23:50 and 04:08 local time:
>>> from astroplan import Observer
>>> from astroplan.constraints import LocalTimeConstraint
>>> import datetime as dt
>>> subaru = Observer.at_site("Subaru", timezone="US/Hawaii")
>>> # bound times between 23:50 and 04:08 local Hawaiian time
>>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8))
"""
self.min = min
self.max = max
if self.min is None and self.max is None:
raise ValueError("You must at least supply either a minimum or a maximum time.")
if self.min is not None:
if not isinstance(self.min, datetime.time):
raise TypeError("Time limits must be specified as datetime.time objects.")
if self.max is not None:
if not isinstance(self.max, datetime.time):
raise TypeError("Time limits must be specified as datetime.time objects.")
def compute_constraint(self, times, observer, targets):
timezone = None
# get timezone from time objects, or from observer
if self.min is not None:
timezone = self.min.tzinfo
elif self.max is not None:
timezone = self.max.tzinfo
if timezone is None:
timezone = observer.timezone
if self.min is not None:
min_time = self.min
else:
min_time = self.min = datetime.time(0, 0, 0)
if self.max is not None:
max_time = self.max
else:
max_time = datetime.time(23, 59, 59)
# If time limits occur on same day:
if min_time < max_time:
try:
mask = np.array([min_time <= t.time() <= max_time for t in times.datetime])
except BaseException: # use np.bool so shape queries don't cause problems
mask = np.bool_(min_time <= times.datetime.time() <= max_time)
# If time boundaries straddle midnight:
else:
try:
mask = np.array([(t.time() >= min_time) or
(t.time() <= max_time) for t in times.datetime])
except BaseException:
mask = np.bool_((times.datetime.time() >= min_time) or
(times.datetime.time() <= max_time))
return mask
class TimeConstraint(Constraint):
"""Constrain the observing time to be within certain time limits.
An example use case for this class would be to associate an acceptable
time range with a specific observing block. This can be useful if not
all observing blocks are valid over the time limits used in calls
to `is_observable` or `is_always_observable`.
"""
def __init__(self, min=None, max=None):
"""
Parameters
----------
min : `~astropy.time.Time`
Earliest time (inclusive). `None` indicates no limit.
max : `~astropy.time.Time`
Latest time (inclusive). `None` indicates no limit.
Examples
--------
Constrain the observations to targets that are observable between
2016-03-28 and 2016-03-30:
>>> from astroplan import Observer
>>> from astropy.time import Time
>>> subaru = Observer.at_site("Subaru")
>>> t1 = Time("2016-03-28T12:00:00")
>>> t2 = Time("2016-03-30T12:00:00")
>>> constraint = TimeConstraint(t1,t2)
"""
self.min = min
self.max = max
if self.min is None and self.max is None:
raise ValueError("You must at least supply either a minimum or a "
"maximum time.")
if self.min is not None:
if not isinstance(self.min, Time):
raise TypeError("Time limits must be specified as "
"astropy.time.Time objects.")
if self.max is not None:
if not isinstance(self.max, Time):
raise TypeError("Time limits must be specified as "
"astropy.time.Time objects.")
def compute_constraint(self, times, observer, targets):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
min_time = Time("1950-01-01T00:00:00") if self.min is None else self.min
max_time = Time("2120-01-01T00:00:00") if self.max is None else self.max
mask = np.logical_and(times > min_time, times < max_time)
return mask
class PrimaryEclipseConstraint(Constraint):
"""
Constrain observations to times during primary eclipse.
"""
def __init__(self, eclipsing_system):
"""
Parameters
----------
eclipsing_system : `~astroplan.periodic.EclipsingSystem`
System which must be in primary eclipse.
"""
self.eclipsing_system = eclipsing_system
def compute_constraint(self, times, observer=None, targets=None):
mask = self.eclipsing_system.in_primary_eclipse(times)
return mask
class SecondaryEclipseConstraint(Constraint):
"""
Constrain observations to times during secondary eclipse.
"""
def __init__(self, eclipsing_system):
"""
Parameters
----------
eclipsing_system : `~astroplan.periodic.EclipsingSystem`
System which must be in secondary eclipse.
"""
self.eclipsing_system = eclipsing_system
def compute_constraint(self, times, observer=None, targets=None):
mask = self.eclipsing_system.in_secondary_eclipse(times)
return mask
class PhaseConstraint(Constraint):
"""
Constrain observations to times in some range of phases for a periodic event
(e.g.~transiting exoplanets, eclipsing binaries).
"""
def __init__(self, periodic_event, min=None, max=None):
"""
Parameters
----------
periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass
System on which to compute the phase. For example, the system
could be an eclipsing or non-eclipsing binary, or exoplanet system.
min : float (optional)
Minimum phase (inclusive) on interval [0, 1). Default is zero.
max : float (optional)
Maximum phase (inclusive) on interval [0, 1). Default is one.
Examples
--------
To constrain observations on orbital phases between 0.4 and 0.6,
>>> from astroplan import PeriodicEvent
>>> from astropy.time import Time
>>> import astropy.units as u
>>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day)
>>> constraint = PhaseConstraint(binary, min=0.4, max=0.6)
The minimum and maximum phase must be described on the interval [0, 1).
To constrain observations on orbital phases between 0.6 and 1.2, for
example, you should subtract one from the second number:
>>> constraint = PhaseConstraint(binary, min=0.6, max=0.2)
"""
self.periodic_event = periodic_event
if (min < 0) or (min > 1) or (max < 0) or (max > 1):
raise ValueError('The minimum of the PhaseConstraint must be within'
' the interval [0, 1).')
self.min = min if min is not None else 0.0
self.max = max if max is not None else 1.0
def compute_constraint(self, times, observer=None, targets=None):
phase = self.periodic_event.phase(times)
mask = np.where(self.max > self.min,
(phase >= self.min) & (phase <= self.max),
(phase >= self.min) | (phase <= self.max))
return mask
def is_always_observable(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
A function to determine whether ``targets`` are always observable throughout
``time_range`` given constraints in the ``constraints_list`` for a
particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
ever_observable : list
List of booleans of same length as ``targets`` for whether or not each
target is observable in the time range given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
return np.all(constraint_arr, axis=1)
def is_observable(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
Determines if the ``targets`` are observable during ``time_range`` given
constraints in ``constraints_list`` for a particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
ever_observable : list
List of booleans of same length as ``targets`` for whether or not each
target is ever observable in the time range given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
return np.any(constraint_arr, axis=1)
def is_event_observable(constraints, observer, target, times=None,
times_ingress_egress=None):
"""
Determines if the ``target`` is observable at each time in ``times``, given
constraints in ``constraints`` for a particular ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target
times : `~astropy.time.Time` (optional)
Array of mid-event times on which to test the constraints
times_ingress_egress : `~astropy.time.Time` (optional)
Array of ingress and egress times for ``N`` events, with shape
(``N``, 2).
Returns
-------
event_observable : `~numpy.ndarray`
Array of booleans of same length as ``times`` for whether or not the
target is ever observable at each time, given the constraints.
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
if times is not None:
applied_constraints = [constraint(observer, target, times=times,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
else:
times_ing = times_ingress_egress[:, 0]
times_egr = times_ingress_egress[:, 1]
applied_constraints_ing = [constraint(observer, target, times=times_ing,
grid_times_targets=True)
for constraint in constraints]
applied_constraints_egr = [constraint(observer, target, times=times_egr,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing),
np.logical_and.reduce(applied_constraints_egr))
return constraint_arr
def months_observable(constraints, observer, targets,
time_range=_current_year_time_range,
time_grid_resolution=0.5*u.hour):
"""
Determines which month the specified ``targets`` are observable for a
specific ``observer``, given the supplied ``constraints``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence
If ``time_range`` is not specified, defaults to current year (localtime)
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
observable_months : list
List of sets of unique integers representing each month that a target is
observable, one set per target. These integers are 1-based so that
January maps to 1, February maps to 2, etc.
"""
# TODO: This method could be sped up a lot by dropping to the trigonometric
# altitude calculations.
if not hasattr(constraints, '__len__'):
constraints = [constraints]
times = time_grid_from_range(time_range, time_grid_resolution)
# TODO: This method could be sped up a lot by dropping to the trigonometric
# altitude calculations.
applied_constraints = [constraint(observer, targets,
times=times,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
months_observable = []
for target, observable in zip(targets, constraint_arr):
s = set([t.datetime.month for t in times[observable]])
months_observable.append(s)
return months_observable
def observability_table(constraints, observer, targets, times=None,
time_range=None, time_grid_resolution=0.5*u.hour):
"""
Creates a table with information about observability for all the ``targets``
over the requested ``time_range``, given the constraints in
``constraints_list`` for ``observer``.
Parameters
----------
constraints : list or `~astroplan.constraints.Constraint`
Observational constraint(s)
observer : `~astroplan.Observer`
The observer who has constraints ``constraints``
targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`}
Target or list of targets
times : `~astropy.time.Time` (optional)
Array of times on which to test the constraint
time_range : `~astropy.time.Time` (optional)
Lower and upper bounds on time sequence, with spacing
``time_resolution``. This will be passed as the first argument into
`~astroplan.time_grid_from_range`. If a single (scalar) time, the table
will be for a 24 hour period centered on that time.
time_grid_resolution : `~astropy.units.Quantity` (optional)
If ``time_range`` is specified, determine whether constraints are met
between test times in ``time_range`` by checking constraint at
linearly-spaced times separated by ``time_resolution``. Default is 0.5
hours.
Returns
-------
observability_table : `~astropy.table.Table`
A Table containing the observability information for each of the
``targets``. The table contains four columns with information about the
target and it's observability: ``'target name'``, ``'ever observable'``,
``'always observable'``, and ``'fraction of time observable'``. The
column ``'time observable'`` will also be present if the ``time_range``
is given as a scalar. It also contains metadata entries ``'times'``
(with an array of all the times), ``'observer'`` (the
`~astroplan.Observer` object), and ``'constraints'`` (containing the
supplied ``constraints``).
"""
if not hasattr(constraints, '__len__'):
constraints = [constraints]
is_24hr_table = False
if hasattr(time_range, 'isscalar') and time_range.isscalar:
time_range = (time_range-12*u.hour, time_range+12*u.hour)
is_24hr_table = True
applied_constraints = [constraint(observer, targets, times=times,
time_range=time_range,
time_grid_resolution=time_grid_resolution,
grid_times_targets=True)
for constraint in constraints]
constraint_arr = np.logical_and.reduce(applied_constraints)
colnames = ['target name', 'ever observable', 'always observable',
'fraction of time observable']
target_names = [target.name for target in targets]
ever_obs = np.any(constraint_arr, axis=1)
always_obs = np.all(constraint_arr, axis=1)
frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1]
tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs,
frac_obs])
if times is None and time_range is not None:
times = time_grid_from_range(time_range,
time_resolution=time_grid_resolution)
if is_24hr_table:
tab['time observable'] = tab['fraction of time observable'] * 24*u.hour
tab.meta['times'] = times.datetime
tab.meta['observer'] = observer
tab.meta['constraints'] = constraints
return tab
def min_best_rescale(vals, min_val, max_val, less_than_min=1):
"""
rescales an input array ``vals`` to be a score (between zero and one),
where the ``min_val`` goes to one, and the ``max_val`` goes to zero.
Parameters
----------
vals : array-like
the values that need to be rescaled to be between 0 and 1
min_val : float
worst acceptable value (rescales to 0)
max_val : float
best value cared about (rescales to 1)
less_than_min : 0 or 1
what is returned for ``vals`` below ``min_val``. (in some cases
anything less than ``min_val`` should also return one,
in some cases it should return zero)
Returns
-------
array of floats between 0 and 1 inclusive rescaled so that
``vals`` equal to ``max_val`` equal 0 and those equal to
``min_val`` equal 1
Examples
--------
rescale airmasses to between 0 and 1, with the best (1)
and worst (2.25). All values outside the range should
return 0.
>>> from astroplan.constraints import min_best_rescale
>>> import numpy as np
>>> airmasses = np.array([1, 1.5, 2, 3, 0])
>>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP
array([ 1. , 0.6, 0.2, 0. , 0. ])
"""
rescaled = (vals - max_val) / (min_val - max_val)
below = vals < min_val
above = vals > max_val
rescaled[below] = less_than_min
rescaled[above] = 0
return rescaled
def max_best_rescale(vals, min_val, max_val, greater_than_max=1):
"""
rescales an input array ``vals`` to be a score (between zero and one),
where the ``max_val`` goes to one, and the ``min_val`` goes to zero.
Parameters
----------
vals : array-like
the values that need to be rescaled to be between 0 and 1
min_val : float
worst acceptable value (rescales to 0)
max_val : float
best value cared about (rescales to 1)
greater_than_max : 0 or 1
what is returned for ``vals`` above ``max_val``. (in some cases
anything higher than ``max_val`` should also return one,
in some cases it should return zero)
Returns
-------
array of floats between 0 and 1 inclusive rescaled so that
``vals`` equal to ``min_val`` equal 0 and those equal to
``max_val`` equal 1
Examples
--------
rescale an array of altitudes to be between 0 and 1,
with the best (60) going to 1 and worst (35) going to
0. For values outside the range, the rescale should
return 0 below 35 and 1 above 60.
>>> from astroplan.constraints import max_best_rescale
>>> import numpy as np
>>> altitudes = np.array([20, 30, 40, 45, 55, 70])
>>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP
array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ])
"""
rescaled = (vals - min_val) / (max_val - min_val)
below = vals < min_val
above = vals > max_val
rescaled[below] = 0
rescaled[above] = greater_than_max
return rescaled
| 1.820313 | 2 |
backend/views.py | Raulios/django-blog | 0 | 2489 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from core.models import Post, Category, Tag
from backend.forms import PostForm, CategoryForm, TagForm
# Create your views here.
@login_required()
def index(request):
context = {}
context['nav_active'] = 'index'
return render(request, 'backend/index.html', context)
@login_required()
def posts(request):
context = {}
context['nav_active'] = 'posts'
post_list = Post.objects.all()
paginator = Paginator(list(reversed(post_list)), 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context['posts'] = posts
return render(request, 'backend/posts.html', context)
@login_required()
def add_post(request):
context = {}
context['nav_active'] = 'posts'
form = PostForm()
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Post created.')
return HttpResponseRedirect(reverse('user_panel_posts'))
context['form'] = form
return render(request, 'backend/edit_post.html', context)
@login_required()
def edit_post(request, post_id):
context = {}
context['nav_active'] = 'posts'
post = Post.objects.get(pk=post_id)
context['post'] = post
form = PostForm(instance=post)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
messages.success(request, 'Post updated.')
return HttpResponseRedirect(reverse('user_panel_posts'))
context['form'] = form
return render(request, 'backend/edit_post.html', context)
@login_required()
def delete_post(request, post_id):
context = {}
context['nav_active'] = 'posts'
post = Post.objects.get(pk=post_id)
post.delete()
messages.success(request, 'Post deleted.')
return HttpResponseRedirect(reverse('user_panel_posts'))
@login_required()
def categories(request):
context = {}
context['nav_active'] = 'categories'
categories_list = Category.objects.all()
paginator = Paginator(list(reversed(categories_list)), 10)
page = request.GET.get('page')
try:
categories = paginator.page(page)
except PageNotAnInteger:
categories = paginator.page(1)
except EmptyPage:
categories = paginator.page(paginator.num_pages)
context['categories'] = categories
return render(request, 'backend/categories.html', context)
@login_required()
def add_category(request):
context = {}
context['nav_active'] = 'categories'
form = CategoryForm()
if request.method == 'POST':
form = CategoryForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Category created.')
return HttpResponseRedirect(reverse('user_panel_categories'))
context['form'] = form
return render(request, 'backend/edit_category.html', context)
@login_required()
def edit_category(request, category_id):
context = {}
context['nav_active'] = 'categories'
category = Category.objects.get(pk=category_id)
context['category'] = category
form = CategoryForm(instance=category)
if request.method == 'POST':
form = CategoryForm(request.POST, request.FILES, instance=category)
if form.is_valid():
form.save()
messages.success(request, 'Category updated.')
return HttpResponseRedirect(reverse('user_panel_categories'))
context['form'] = form
return render(request, 'backend/edit_category.html', context)
@login_required()
def delete_category(request, category_id):
context = {}
context['nav_active'] = 'categories'
category = Category.objects.get(pk=category_id)
category.delete()
messages.success(request, 'Category deleted.')
return HttpResponseRedirect(reverse('user_panel_categories'))
@login_required()
def tags(request):
context = {}
context['nav_active'] = 'tags'
tags_list = Tag.objects.all()
paginator = Paginator(list(reversed(tags_list)), 10)
page = request.GET.get('page')
try:
tags = paginator.page(page)
except PageNotAnInteger:
tags = paginator.page(1)
except EmptyPage:
tags = paginator.page(paginator.num_pages)
context['tags'] = tags
return render(request, 'backend/tags.html', context)
@login_required()
def add_tag(request):
context = {}
context['nav_active'] = 'tags'
form = TagForm()
if request.method == 'POST':
form = TagForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Tag created.')
return HttpResponseRedirect(reverse('user_panel_tags'))
context['form'] = form
return render(request, 'backend/edit_tag.html', context)
@login_required()
def edit_tag(request, tag_id):
context = {}
context['nav_active'] = 'tags'
tag = Tag.objects.get(pk=tag_id)
context['tag'] = tag
form = TagForm(instance=tag)
if request.method == 'POST':
form = TagForm(request.POST, request.FILES, instance=tag)
if form.is_valid():
form.save()
messages.success(request, 'Tag updated.')
return HttpResponseRedirect(reverse('user_panel_tags'))
context['form'] = form
return render(request, 'backend/edit_tag.html', context)
@login_required()
def delete_tag(request, tag_id):
context = {}
context['nav_active'] = 'tags'
tag = Tag.objects.get(pk=tag_id)
tag.delete()
messages.success(request, 'Tag deleted.')
return HttpResponseRedirect(reverse('user_panel_tags')) | 2.125 | 2 |
tiktorch/server/session/process.py | FynnBe/tiktorch | 0 | 2490 | import dataclasses
import io
import multiprocessing as _mp
import uuid
import zipfile
from concurrent.futures import Future
from multiprocessing.connection import Connection
from typing import List, Optional, Tuple
import numpy
from tiktorch import log
from tiktorch.rpc import Shutdown
from tiktorch.rpc import mp as _mp_rpc
from tiktorch.rpc.mp import MPServer
from tiktorch.server.reader import eval_model_zip
from .backend import base
from .rpc_interface import IRPCModelSession
@dataclasses.dataclass
class ModelInfo:
# TODO: Test for model info
name: str
input_axes: str
output_axes: str
valid_shapes: List[List[Tuple[str, int]]]
halo: List[Tuple[str, int]]
offset: List[Tuple[str, int]]
scale: List[Tuple[str, float]]
class ModelSessionProcess(IRPCModelSession):
def __init__(self, model_zip: bytes, devices: List[str]) -> None:
with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file:
self._model = eval_model_zip(model_file, devices)
self._datasets = {}
self._worker = base.SessionBackend(self._model)
def forward(self, input_tensor: numpy.ndarray) -> Future:
res = self._worker.forward(input_tensor)
return res
def create_dataset(self, mean, stddev):
id_ = uuid.uuid4().hex
self._datasets[id_] = {"mean": mean, "stddev": stddev}
return id_
def get_model_info(self) -> ModelInfo:
return ModelInfo(
self._model.name,
self._model.input_axes,
self._model.output_axes,
valid_shapes=[self._model.input_shape],
halo=self._model.halo,
scale=self._model.scale,
offset=self._model.offset,
)
def shutdown(self) -> Shutdown:
self._worker.shutdown()
return Shutdown()
def _run_model_session_process(
conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
):
try:
# from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
except ModuleNotFoundError:
pass # probably running on windows
if log_queue:
log.configure(log_queue)
session_proc = ModelSessionProcess(model_zip, devices)
srv = MPServer(session_proc, conn)
srv.listen()
def start_model_session_process(
model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
) -> Tuple[_mp.Process, IRPCModelSession]:
client_conn, server_conn = _mp.Pipe()
proc = _mp.Process(
target=_run_model_session_process,
name="ModelSessionProcess",
kwargs={"conn": server_conn, "devices": devices, "log_queue": log_queue, "model_zip": model_zip},
)
proc.start()
return proc, _mp_rpc.create_client(IRPCModelSession, client_conn)
| 2.171875 | 2 |
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py | dangerstudios/OpenPype | 0 | 2491 | from openpype.modules.ftrack.lib import BaseEvent
from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY
from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import (
SyncToAvalonEvent
)
class DelAvalonIdFromNew(BaseEvent):
'''
This event removes AvalonId from custom attributes of new entities
Result:
- 'Copy->Pasted' entities won't have same AvalonID as source entity
Priority of this event must be less than SyncToAvalon event
'''
priority = SyncToAvalonEvent.priority - 1
ignore_me = True
def launch(self, session, event):
created = []
entities = event['data']['entities']
for entity in entities:
try:
entity_id = entity['entityId']
if entity.get('action', None) == 'add':
id_dict = entity['changes']['id']
if id_dict['new'] is not None and id_dict['old'] is None:
created.append(id_dict['new'])
elif (
entity.get('action', None) == 'update' and
CUST_ATTR_ID_KEY in entity['keys'] and
entity_id in created
):
ftrack_entity = session.get(
self._get_entity_type(entity),
entity_id
)
cust_attrs = ftrack_entity["custom_attributes"]
if cust_attrs[CUST_ATTR_ID_KEY]:
cust_attrs[CUST_ATTR_ID_KEY] = ""
session.commit()
except Exception:
session.rollback()
continue
def register(session):
'''Register plugin. Called when used as an plugin.'''
DelAvalonIdFromNew(session).register()
| 1.679688 | 2 |
tests/workflow/test_workflow_ingest_accepted_submission.py | elifesciences/elife-bot | 17 | 2492 | import unittest
import tests.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger
from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission
class TestWorkflowIngestAcceptedSubmission(unittest.TestCase):
def setUp(self):
self.workflow = workflow_IngestAcceptedSubmission(
settings_mock, FakeLogger(), None, None, None, None
)
def test_init(self):
self.assertEqual(self.workflow.name, "IngestAcceptedSubmission")
| 2.59375 | 3 |
go/token/views.py | lynnUg/vumi-go | 0 | 2493 | from urllib import urlencode
import urlparse
from django.shortcuts import Http404, redirect
from django.contrib.auth.views import logout
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from vumi.utils import load_class_by_string
from go.base.utils import vumi_api
def token(request, token):
# We only need the redis manager here, but it's saner to get a whole
# vumi_api and not worry about all the setup magic.
api = vumi_api()
token_data = api.token_manager.get(token)
if not token_data:
raise Http404
user_id = int(token_data['user_id'])
redirect_to = token_data['redirect_to']
system_token = token_data['system_token']
# If we're authorized and we're the same user_id then redirect to
# where we need to be
if not user_id or request.user.id == user_id:
path, _, qs = redirect_to.partition('?')
params = urlparse.parse_qs(qs)
# since the token can be custom we prepend the size of the user_token
# to the token being forwarded so the view handling the `redirect_to`
# can lookup the token and verify the system token.
params.update({'token': '%s-%s%s' % (len(token), token, system_token)})
return redirect('%s?%s' % (path, urlencode(params)))
# If we got here then we need authentication and the user's either not
# logged in or is logged in with a wrong account.
if request.user.is_authenticated():
logout(request)
messages.info(request, 'Wrong account for this token.')
return redirect('%s?%s' % (reverse('auth_login'), urlencode({
'next': reverse('token', kwargs={'token': token}),
})))
@login_required
def token_task(request):
api = request.user_api.api
token = request.GET.get('token')
token_data = api.token_manager.verify_get(token)
if not token_data:
raise Http404
params = token_data['extra_params']
callback_name = params['callback_name']
callback_args = params['callback_args']
callback_kwargs = params['callback_kwargs']
return_to = params['return_to']
message = params['message']
message_level = params['message_level']
callback = load_class_by_string(callback_name)
callback(*callback_args, **callback_kwargs)
messages.add_message(request, message_level, message)
return redirect(return_to)
| 2.125 | 2 |
typogrify/templatetags/typogrify_tags.py | tylerbutler/typogrify | 0 | 2494 | from typogrify.filters import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError
from functools import wraps
from django.conf import settings
from django import template
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
register = template.Library()
def make_safe(f):
"""
A function wrapper to make typogrify play nice with django's
unicode support.
"""
@wraps(f)
def wrapper(text):
text = force_unicode(text)
f.is_safe = True
out = text
try:
out = f(text)
except TypogrifyError, e:
if settings.DEBUG:
raise e
return text
return mark_safe(out)
wrapper.is_safe = True
return wrapper
register.filter('amp', make_safe(amp))
register.filter('caps', make_safe(caps))
register.filter('initial_quotes', make_safe(initial_quotes))
register.filter('smartypants', make_safe(smartypants))
register.filter('titlecase', make_safe(titlecase))
register.filter('typogrify', make_safe(typogrify))
register.filter('widont', make_safe(widont))
| 2.265625 | 2 |
bvbabel/vmr.py | carbrock/bvbabel | 7 | 2495 | """Read, write, create Brainvoyager VMR file format."""
import struct
import numpy as np
from bvbabel.utils import (read_variable_length_string,
write_variable_length_string)
# =============================================================================
def read_vmr(filename):
"""Read Brainvoyager VMR file.
Parameters
----------
filename : string
Path to file.
Returns
-------
header : dictionary
Pre-data and post-data headers.
data : 3D numpy.array
Image data.
"""
header = dict()
with open(filename, 'rb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets,
# typically containing the whole brain (head) of subjects. The
# intensity values are stored as a series of bytes. See the V16 format
# for a version storing each intensity value with two bytes (short
# integers). The VMR format contains a small header followed by the
# actual data followed by a second, more extensive, header. The current
# version of VMR files is "4", which is only slightly different from
# version 3 (as indicated below). Version 3 added offset values to
# format 2 in order to represent large data sets efficiently, e.g. in
# the context of advanced segmentation processing. Compared to the
# original file version "1", file versions 2 and higher contain
# additional header information after the actual data ("post-data
# header"). This allows to read VMR data sets with minimal header
# checking if the extended information is not needed. The information
# in the post-data header contains position information (if available)
# and stores a series of spatial transformations, which might have been
# performed to the original data set ("history record"). The
# post-header data can be probably ignored for custom routines, but is
# important in BrainVoyager QX for spatial transformation and
# coregistration routines as well as for proper visualization.
# Expected binary data: unsigned short int (2 bytes)
data, = struct.unpack('<H', f.read(2))
header["File version"] = data
data, = struct.unpack('<H', f.read(2))
header["DimX"] = data
data, = struct.unpack('<H', f.read(2))
header["DimY"] = data
data, = struct.unpack('<H', f.read(2))
header["DimZ"] = data
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): Each data element (intensity value) is
# represented in 1 byte. The data is organized in three loops:
# DimZ
# DimY
# DimX
#
# The axes terminology follows the internal BrainVoyager (BV) format.
# The mapping to Talairach axes is as follows:
# BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space
# BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space
# BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space
# Expected binary data: unsigned char (1 byte)
data_img = np.zeros((header["DimZ"] * header["DimY"] * header["DimX"]),
dtype="<B")
for i in range(data_img.size):
data_img[i], = struct.unpack('<B', f.read(1))
data_img = np.reshape(
data_img, (header["DimZ"], header["DimY"], header["DimX"]))
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): The first four entries of the post-data
# header are new since file version "3" and contain offset values for
# each dimension as well as a value indicating the size of a cube with
# iso-dimensions to which the data set will be internally "expanded"
# for certain operations. The axes labels are in terms of
# BrainVoyager's internal format. These four entries are followed by
# scan position information from the original file headers, e.g. from
# DICOM files. The coordinate axes labels in these entries are not in
# terms of BrainVoyager's internal conventions but follow the DICOM
# standard. Then follows eventually a section listing spatial
# transformations which have been eventually performed to create the
# current VMR (e.g. ACPC transformation). Finally, additional
# information further descries the data set, including the assumed
# left-right convention, the reference space (e.g. Talairach after
# normalization) and voxel resolution.
if header["File version"] >= 3:
# NOTE(Developer Guide 2.6): These four entries have been added in
# file version "3" with BrainVoyager QX 1.7. All other entries are
# identical to file version "2".
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["OffsetX"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetY"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetZ"] = data
data, = struct.unpack('<h', f.read(2))
header["FramingCubeDim"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PosInfosVerified"] = data
data, = struct.unpack('<i', f.read(4))
header["CoordinateSystem"] = data
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterX"] = data # First slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterY"] = data # First slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterZ"] = data # First slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterX"] = data # Last slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterY"] = data # Last slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterZ"] = data # Last slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["RowDirX"] = data # Slice row direction vector X component
data, = struct.unpack('<f', f.read(4))
header["RowDirY"] = data # Slice row direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["RowDirZ"] = data # Slice row direction vector Z component
data, = struct.unpack('<f', f.read(4))
header["ColDirX"] = data # Slice column direction vector X component
data, = struct.unpack('<f', f.read(4))
header["ColDirY"] = data # Slice column direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["ColDirZ"] = data # Slice column direction vector Z component
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NRows"] = data # Nr of rows of slice image matrix
data, = struct.unpack('<i', f.read(4))
header["NCols"] = data # Nr of columns of slice image matrix
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["FoVRows"] = data # Field of view extent in row direction [mm]
data, = struct.unpack('<f', f.read(4))
header["FoVCols"] = data # Field of view extent in column dir. [mm]
data, = struct.unpack('<f', f.read(4))
header["SliceThickness"] = data # Slice thickness [mm]
data, = struct.unpack('<f', f.read(4))
header["GapThickness"] = data # Gap thickness [mm]
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NrOfPastSpatialTransformations"] = data
if header["NrOfPastSpatialTransformations"] != 0:
# NOTE(Developer Guide 2.6): For each past transformation, the
# information specified in the following table is stored. The
# "type of transformation" is a value determining how many
# subsequent values define the transformation:
# "1": Rigid body+scale (3 translation, 3 rotation, 3 scale)
# "2": Affine transformation (16 values, 4x4 matrix)
# "4": Talairach transformation
# "5": Un-Talairach transformation (1 - 5 -> BV axes)
header["PastTransformation"] = []
for i in range(header["NrOfPastSpatialTransformations"]):
header["PastTransformation"].append(dict())
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["Name"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["Type"] = data
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["SourceFileName"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["NrOfValues"] = data
# Store transformation values as a list
trans_values = []
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
trans_values.append(data)
header["PastTransformation"][i]["Values"] = trans_values
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["LeftRightConvention"] = data # modified in v4
data, = struct.unpack('<B', f.read(1))
header["ReferenceSpaceVMR"] = data # new in v4
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeX"] = data # Voxel resolution along X axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeY"] = data # Voxel resolution along Y axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeZ"] = data # Voxel resolution along Z axis
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionVerified"] = data
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionInTALmm"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MinValue"] = data # 16-bit data min intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MeanValue"] = data # 16-bit data mean intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MaxValue"] = data # 16-bit data max intensity
return header, data_img
# =============================================================================
def write_vmr(filename, header, data_img):
"""Protocol to write Brainvoyager VMR file.
Parameters
----------
filename : string
Output filename.
header : dictionary
Header of VMR file.
data_img : numpy.array, 3D
Image.
"""
with open(filename, 'wb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# Expected binary data: unsigned short int (2 bytes)
data = header["File version"]
f.write(struct.pack('<H', data))
data = header["DimX"]
f.write(struct.pack('<H', data))
data = header["DimY"]
f.write(struct.pack('<H', data))
data = header["DimZ"]
f.write(struct.pack('<H', data))
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# Convert axes from Nifti standard back to BV standard
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
# Expected binary data: unsigned char (1 byte)
data_img = data_img.flatten()
for i in range(data_img.size):
f.write(struct.pack('<B', data_img[i]))
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
if header["File version"] >= 3:
# Expected binary data: short int (2 bytes)
data = header["OffsetX"]
f.write(struct.pack('<h', data))
data = header["OffsetY"]
f.write(struct.pack('<h', data))
data = header["OffsetZ"]
f.write(struct.pack('<h', data))
data = header["FramingCubeDim"]
f.write(struct.pack('<h', data))
# Expected binary data: int (4 bytes)
data = header["PosInfosVerified"]
f.write(struct.pack('<i', data))
data = header["CoordinateSystem"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["Slice1CenterX"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterY"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterZ"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterX"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterY"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterZ"]
f.write(struct.pack('<f', data))
data = header["RowDirX"]
f.write(struct.pack('<f', data))
data = header["RowDirY"]
f.write(struct.pack('<f', data))
data = header["RowDirZ"]
f.write(struct.pack('<f', data))
data = header["ColDirX"]
f.write(struct.pack('<f', data))
data = header["ColDirY"]
f.write(struct.pack('<f', data))
data = header["ColDirZ"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NRows"]
f.write(struct.pack('<i', data))
data = header["NCols"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["FoVRows"]
f.write(struct.pack('<f', data))
data = header["FoVCols"]
f.write(struct.pack('<f', data))
data = header["SliceThickness"]
f.write(struct.pack('<f', data))
data = header["GapThickness"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NrOfPastSpatialTransformations"]
f.write(struct.pack('<i', data))
if header["NrOfPastSpatialTransformations"] != 0:
for i in range(header["NrOfPastSpatialTransformations"]):
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["Name"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["Type"]
f.write(struct.pack('<i', data))
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["SourceFileName"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["NrOfValues"]
f.write(struct.pack('<i', data))
# Transformation values are stored as a list
trans_values = header["PastTransformation"][i]["Values"]
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
f.write(struct.pack('<f', trans_values[j]))
# Expected binary data: char (1 byte)
data = header["LeftRightConvention"]
f.write(struct.pack('<B', data))
data = header["ReferenceSpaceVMR"]
f.write(struct.pack('<B', data))
# Expected binary data: float (4 bytes)
data = header["VoxelSizeX"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeY"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeZ"]
f.write(struct.pack('<f', data))
# Expected binary data: char (1 byte)
data = header["VoxelResolutionVerified"]
f.write(struct.pack('<B', data))
data = header["VoxelResolutionInTALmm"]
f.write(struct.pack('<B', data))
# Expected binary data: int (4 bytes)
data = header["VMROrigV16MinValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MeanValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MaxValue"]
f.write(struct.pack('<i', data))
return print("VMR saved.")
| 3.078125 | 3 |
example/image-classification/test_score.py | Vikas-kum/incubator-mxnet | 399 | 2496 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
test pretrained models
"""
from __future__ import print_function
import mxnet as mx
from common import find_mxnet, modelzoo
from score import score
VAL_DATA='data/val-5k-256.rec'
def download_data():
return mx.test_utils.download(
'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA)
def test_imagenet1k_resnet(**kwargs):
models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152']
accs = [.77, .78]
for (m, g) in zip(models, accs):
acc = mx.metric.create('acc')
(speed,) = score(model=m, data_val=VAL_DATA,
rgb_mean='0,0,0', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s, acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1
def test_imagenet1k_inception_bn(**kwargs):
acc = mx.metric.create('acc')
m = 'imagenet1k-inception-bn'
g = 0.75
(speed,) = score(model=m,
data_val=VAL_DATA,
rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1
if __name__ == '__main__':
gpus = mx.test_utils.list_gpus()
assert len(gpus) > 0
batch_size = 16 * len(gpus)
gpus = ','.join([str(i) for i in gpus])
kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}
download_data()
test_imagenet1k_resnet(**kwargs)
test_imagenet1k_inception_bn(**kwargs)
| 1.867188 | 2 |
verticapy/vcolumn.py | vertica/vertica_ml_python | 7 | 2497 | # (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, re, decimal, warnings, datetime
from collections.abc import Iterable
from typing import Union
# VerticaPy Modules
import verticapy
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
##
#
# __ __ ______ ______ __ __ __ __ __ __ __
# /\ \ / / /\ ___\ /\ __ \ /\ \ /\ \/\ \ /\ "-./ \ /\ "-.\ \
# \ \ \'/ \ \ \____ \ \ \/\ \ \ \ \____ \ \ \_\ \ \ \ \-./\ \ \ \ \-. \
# \ \__| \ \_____\ \ \_____\ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_\\"\_\
# \/_/ \/_____/ \/_____/ \/_____/ \/_____/ \/_/ \/_/ \/_/ \/_/
#
#
# ---#
class vColumn(str_sql):
"""
---------------------------------------------------------------------------
Python object which that stores all user transformations. If the vDataFrame
represents the entire relation, a vColumn can be seen as one column of that
relation. vColumns simplify several processes with its abstractions.
Parameters
----------
alias: str
vColumn alias.
transformations: list, optional
List of the different transformations. Each transformation must be similar
to the following: (function, type, category)
parent: vDataFrame, optional
Parent of the vColumn. One vDataFrame can have multiple children vColumns
whereas one vColumn can only have one parent.
catalog: dict, optional
Catalog where each key corresponds to an aggregation. vColumns will memorize
the already computed aggregations to gain in performance. The catalog will
be updated when the parent vDataFrame is modified.
Attributes
----------
alias, str : vColumn alias.
catalog, dict : Catalog of pre-computed aggregations.
parent, vDataFrame : Parent of the vColumn.
transformations, str : List of the different transformations.
"""
#
# Special Methods
#
# ---#
def __init__(
self, alias: str, transformations: list = [], parent=None, catalog: dict = {}
):
self.parent, self.alias, self.transformations = (
parent,
alias,
[elem for elem in transformations],
)
self.catalog = {
"cov": {},
"pearson": {},
"spearman": {},
"spearmand": {},
"kendall": {},
"cramer": {},
"biserial": {},
"regr_avgx": {},
"regr_avgy": {},
"regr_count": {},
"regr_intercept": {},
"regr_r2": {},
"regr_slope": {},
"regr_sxx": {},
"regr_sxy": {},
"regr_syy": {},
}
for elem in catalog:
self.catalog[elem] = catalog[elem]
# ---#
def __getitem__(self, index):
if isinstance(index, slice):
assert index.step in (1, None), ValueError(
"vColumn doesn't allow slicing having steps different than 1."
)
index_stop = index.stop
index_start = index.start
if not (isinstance(index_start, int)):
index_start = 0
if index_start < 0:
index_start += self.parent.shape()[0]
if isinstance(index_stop, int):
if index_stop < 0:
index_stop += self.parent.shape()[0]
limit = index_stop - index_start
if limit <= 0:
limit = 0
limit = " LIMIT {}".format(limit)
else:
limit = ""
query = "(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE".format(
self.alias,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
index_start,
limit,
)
return vDataFrameSQL(query)
elif isinstance(index, int):
cast = "::float" if self.category() == "float" else ""
if index < 0:
index += self.parent.shape()[0]
query = "SELECT {}{} FROM {}{} OFFSET {} LIMIT 1".format(
self.alias,
cast,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
index,
)
return executeSQL(
query=query,
title="Getting the vColumn element.",
method="fetchfirstelem",
)
else:
return getattr(self, index)
# ---#
def __len__(self):
return int(self.count())
# ---#
def __nonzero__(self):
return self.count() > 0
# ---#
def __repr__(self):
return self.head(limit=verticapy.options["max_rows"]).__repr__()
# ---#
def _repr_html_(self):
return self.head(limit=verticapy.options["max_rows"])._repr_html_()
# ---#
def __setattr__(self, attr, val):
self.__dict__[attr] = val
#
# Methods
#
# ---#
def aad(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'aad' (Average Absolute Deviation).
Returns
-------
float
aad
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["aad"]).values[self.alias][0]
# ---#
def abs(self):
"""
---------------------------------------------------------------------------
Applies the absolute value function to the input vColumn.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
return self.apply(func="ABS({})")
# ---#
def add(self, x: float):
"""
---------------------------------------------------------------------------
Adds the input element to the vColumn.
Parameters
----------
x: float
If the vColumn type is date like (date, datetime ...), the parameter 'x'
will represent the number of seconds, otherwise it will represent a number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
if self.isdate():
return self.apply(func="TIMESTAMPADD(SECOND, {}, {})".format(x, "{}"))
else:
return self.apply(func="{} + ({})".format("{}", x))
# ---#
def add_copy(self, name: str):
"""
---------------------------------------------------------------------------
Adds a copy vColumn to the parent vDataFrame.
Parameters
----------
name: str
Name of the copy.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.eval : Evaluates a customized expression.
"""
check_types([("name", name, [str])])
name = quote_ident(name.replace('"', "_"))
assert name.replace('"', ""), EmptyParameter(
"The parameter 'name' must not be empty"
)
assert not (self.parent.is_colname_in(name)), NameError(
f"A vColumn has already the alias {name}.\nBy changing the parameter 'name', you'll be able to solve this issue."
)
new_vColumn = vColumn(
name,
parent=self.parent,
transformations=[item for item in self.transformations],
catalog=self.catalog,
)
setattr(self.parent, name, new_vColumn)
setattr(self.parent, name[1:-1], new_vColumn)
self.parent._VERTICAPY_VARIABLES_["columns"] += [name]
self.parent.__add_to_history__(
"[Add Copy]: A copy of the vColumn {} named {} was added to the vDataFrame.".format(
self.alias, name
)
)
return self.parent
# ---#
def aggregate(self, func: list):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using the input functions.
Parameters
----------
func: list
List of the different aggregation.
aad : average absolute deviation
approx_unique : approximative cardinality
count : number of non-missing elements
cvar : conditional value at risk
dtype : vColumn type
iqr : interquartile range
kurtosis : kurtosis
jb : Jarque-Bera index
mad : median absolute deviation
max : maximum
mean : average
median : median
min : minimum
mode : most occurent element
percent : percent of non-missing elements
q% : q quantile (ex: 50% for the median)
prod : product
range : difference between the max and the min
sem : standard error of the mean
skewness : skewness
sum : sum
std : standard deviation
topk : kth most occurent element (ex: top1 for the mode)
topk_percent : kth most occurent element density
unique : cardinality (count distinct)
var : variance
Other aggregations could work if it is part of
the DB version you are using.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.analytic : Adds a new vColumn to the vDataFrame by using an advanced
analytical function on a specific vColumn.
"""
return self.parent.aggregate(func=func, columns=[self.alias]).transpose()
agg = aggregate
# ---#
def apply(self, func: str, copy_name: str = ""):
"""
---------------------------------------------------------------------------
Applies a function to the vColumn.
Parameters
----------
func: str,
Function in pure SQL used to transform the vColumn.
The function variable must be composed of two flower brackets {}. For
example to apply the function: x -> x^2 + 2 use "POWER({}, 2) + 2".
copy_name: str, optional
If not empty, a copy will be created using the input Name.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.apply : Applies functions to the input vColumns.
vDataFrame.applymap : Applies a function to all the vColumns.
vDataFrame.eval : Evaluates a customized expression.
"""
if isinstance(func, str_sql):
func = str(func)
check_types([("func", func, [str]), ("copy_name", copy_name, [str])])
try:
try:
ctype = get_data_types(
"SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0".format(
func.replace("{}", self.alias),
self.parent.__genSQL__(),
self.alias,
),
"apply_test_feature",
)
except:
ctype = get_data_types(
"SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0".format(
func.replace("{}", self.alias),
self.parent.__genSQL__(),
self.alias,
),
"apply_test_feature",
)
category = get_category_from_vertica_type(ctype=ctype)
all_cols, max_floor = self.parent.get_columns(), 0
for column in all_cols:
try:
if (quote_ident(column) in func) or (
re.search(
re.compile("\\b{}\\b".format(column.replace('"', ""))), func
)
):
max_floor = max(
len(self.parent[column].transformations), max_floor
)
except:
pass
max_floor -= len(self.transformations)
if copy_name:
self.add_copy(name=copy_name)
for k in range(max_floor):
self.parent[copy_name].transformations += [
("{}", self.ctype(), self.category())
]
self.parent[copy_name].transformations += [(func, ctype, category)]
self.parent[copy_name].catalog = self.catalog
self.parent.__add_to_history__(
"[Apply]: The vColumn '{}' was transformed with the func 'x -> {}'.".format(
copy_name.replace('"', ""), func.replace("{}", "x"),
)
)
else:
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += [(func, ctype, category)]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
self.parent.__add_to_history__(
"[Apply]: The vColumn '{}' was transformed with the func 'x -> {}'.".format(
self.alias.replace('"', ""), func.replace("{}", "x"),
)
)
return self.parent
except Exception as e:
raise QueryError(
"{}\nError when applying the func 'x -> {}' to '{}'".format(
e, func.replace("{}", "x"), self.alias.replace('"', "")
)
)
# ---#
def apply_fun(self, func: str, x: float = 2):
"""
---------------------------------------------------------------------------
Applies a default function to the vColumn.
Parameters
----------
func: str
Function to use to transform the vColumn.
abs : absolute value
acos : trigonometric inverse cosine
asin : trigonometric inverse sine
atan : trigonometric inverse tangent
cbrt : cube root
ceil : value up to the next whole number
cos : trigonometric cosine
cosh : hyperbolic cosine
cot : trigonometric cotangent
exp : exponential function
floor : value down to the next whole number
ln : natural logarithm
log : logarithm
log10 : base 10 logarithm
mod : remainder of a division operation
pow : number raised to the power of another number
round : rounds a value to a specified number of decimal places
sign : arithmetic sign
sin : trigonometric sine
sinh : hyperbolic sine
sqrt : arithmetic square root
tan : trigonometric tangent
tanh : hyperbolic tangent
x: int/float, optional
If the function has two arguments (example, power or mod), 'x' represents
the second argument.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the vColumn.
"""
check_types(
[
(
"func",
func,
[
"abs",
"acos",
"asin",
"atan",
"cbrt",
"ceil",
"cos",
"cosh",
"cot",
"exp",
"floor",
"ln",
"log",
"log10",
"mod",
"pow",
"round",
"sign",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
],
),
("x", x, [int, float]),
]
)
if func not in ("log", "mod", "pow", "round"):
expr = "{}({})".format(func.upper(), "{}")
else:
expr = "{}({}, {})".format(func.upper(), "{}", x)
return self.apply(func=expr)
# ---#
def astype(self, dtype: str):
"""
---------------------------------------------------------------------------
Converts the vColumn to the input type.
Parameters
----------
dtype: str
New type.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.astype : Converts the vColumns to the input type.
"""
check_types([("dtype", dtype, [str])])
try:
query = "SELECT {}::{} AS {} FROM {} WHERE {} IS NOT NULL LIMIT 20".format(
self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias
)
executeSQL(query, title="Testing the Type casting.")
self.transformations += [
(
"{}::{}".format("{}", dtype),
dtype,
get_category_from_vertica_type(ctype=dtype),
)
]
self.parent.__add_to_history__(
"[AsType]: The vColumn {} was converted to {}.".format(
self.alias, dtype
)
)
return self.parent
except Exception as e:
raise ConversionError(
"{}\nThe vColumn {} can not be converted to {}".format(
e, self.alias, dtype
)
)
# ---#
def avg(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'avg' (Average).
Returns
-------
float
average
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["avg"]).values[self.alias][0]
mean = avg
# ---#
def bar(
self,
method: str = "density",
of: str = "",
max_cardinality: int = 6,
nbins: int = 0,
h: float = 0,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the bar chart of the vColumn based on an aggregation.
Parameters
----------
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
max_cardinality: int, optional
Maximum number of the vColumn distinct elements to be used as categorical
(No h will be picked or computed)
nbins: int, optional
Number of nbins. If empty, an optimized number of nbins will be computed.
h: float, optional
Interval width of the bar. If empty, an optimized h will be computed.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame[].hist : Draws the histogram of the vColumn based on an aggregation.
"""
check_types(
[
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [int, float]),
("nbins", nbins, [int, float]),
("h", h, [int, float]),
]
)
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import bar
return bar(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)
# ---#
def boxplot(
self,
by: str = "",
h: float = 0,
max_cardinality: int = 8,
cat_priority: list = [],
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the box plot of the vColumn.
Parameters
----------
by: str, optional
vColumn to use to partition the data.
h: float, optional
Interval width if the vColumn is numerical or of type date like. Optimized
h will be computed if the parameter is empty or invalid.
max_cardinality: int, optional
Maximum number of vColumn distinct elements to be used as categorical.
The less frequent elements will be gathered together to create a new
category : 'Others'.
cat_priority: list, optional
List of the different categories to consider when drawing the box plot.
The other categories will be filtered.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.boxplot : Draws the Box Plot of the input vColumns.
"""
if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)):
cat_priority = [cat_priority]
check_types(
[
("by", by, [str]),
("max_cardinality", max_cardinality, [int, float]),
("h", h, [int, float]),
("cat_priority", cat_priority, [list]),
]
)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
from verticapy.plot import boxplot
return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds)
# ---#
def category(self):
"""
---------------------------------------------------------------------------
Returns the category of the vColumn. The category will be one of the following:
date / int / float / text / binary / spatial / uuid / undefined
Returns
-------
str
vColumn category.
See Also
--------
vDataFrame[].ctype : Returns the vColumn database type.
"""
return self.transformations[-1][2]
# ---#
def clip(self, lower=None, upper=None):
"""
---------------------------------------------------------------------------
Clips the vColumn by transforming the values lesser than the lower bound to
the lower bound itself and the values higher than the upper bound to the upper
bound itself.
Parameters
----------
lower: float, optional
Lower bound.
upper: float, optional
Upper bound.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].fill_outliers : Fills the vColumn outliers using the input method.
"""
check_types([("lower", lower, [float, int]), ("upper", upper, [float, int])])
assert (lower != None) or (upper != None), ParameterError(
"At least 'lower' or 'upper' must have a numerical value"
)
lower_when = (
"WHEN {} < {} THEN {} ".format("{}", lower, lower)
if (isinstance(lower, (float, int)))
else ""
)
upper_when = (
"WHEN {} > {} THEN {} ".format("{}", upper, upper)
if (isinstance(upper, (float, int)))
else ""
)
func = "(CASE {}{}ELSE {} END)".format(lower_when, upper_when, "{}")
self.apply(func=func)
return self.parent
# ---#
def count(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'count' (Number of non-Missing elements).
Returns
-------
int
number of non-Missing elements.
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["count"]).values[self.alias][0]
# ---#
def cut(
self,
breaks: list,
labels: list = [],
include_lowest: bool = True,
right: bool = True,
):
"""
---------------------------------------------------------------------------
Discretizes the vColumn using the input list.
Parameters
----------
breaks: list
List of values used to cut the vColumn.
labels: list, optional
Labels used to name the new categories. If empty, names will be generated.
include_lowest: bool, optional
If set to True, the lowest element of the list will be included.
right: bool, optional
How the intervals should be closed. If set to True, the intervals will be
closed on the right.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types(
[
("breaks", breaks, [list]),
("labels", labels, [list]),
("include_lowest", include_lowest, [bool]),
("right", right, [bool]),
]
)
assert self.isnum() or self.isdate(), TypeError(
"cut only works on numerical / date-like vColumns."
)
assert len(breaks) >= 2, ParameterError(
"Length of parameter 'breaks' must be greater or equal to 2."
)
assert len(breaks) == len(labels) + 1 or not (labels), ParameterError(
"Length of parameter breaks must be equal to the length of parameter 'labels' + 1 or parameter 'labels' must be empty."
)
conditions, column = [], self.alias
for idx in range(len(breaks) - 1):
first_elem, second_elem = breaks[idx], breaks[idx + 1]
if right:
op1, op2, close_l, close_r = "<", "<=", "]", "]"
else:
op1, op2, close_l, close_r = "<=", "<", "[", "["
if idx == 0 and include_lowest:
op1, close_l = "<=", "["
elif idx == 0:
op1, close_l = "<", "]"
if labels:
label = labels[idx]
else:
label = f"{close_l}{first_elem};{second_elem}{close_r}"
conditions += [
f"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}' THEN '{label}'"
]
expr = "CASE WHEN " + " WHEN ".join(conditions) + " END"
self.apply(func=expr)
# ---#
def ctype(self):
"""
---------------------------------------------------------------------------
Returns the vColumn DB type.
Returns
-------
str
vColumn DB type.
"""
return self.transformations[-1][1].lower()
dtype = ctype
# ---#
def date_part(self, field: str):
"""
---------------------------------------------------------------------------
Extracts a specific TS field from the vColumn (only if the vColumn type is
date like). The vColumn will be transformed.
Parameters
----------
field: str
The field to extract. It must be one of the following:
CENTURY / DAY / DECADE / DOQ / DOW / DOY / EPOCH / HOUR / ISODOW / ISOWEEK /
ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE / MONTH / QUARTER /
SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].slice : Slices the vColumn using a time series rule.
"""
return self.apply(func="DATE_PART('{}', {})".format(field, "{}"))
# ---#
def decode(self, *argv):
"""
---------------------------------------------------------------------------
Encodes the vColumn using a user-defined encoding.
Parameters
----------
argv: object
Any amount of expressions.
The expression generated will look like:
even: CASE ... WHEN vColumn = argv[2 * i] THEN argv[2 * i + 1] ... END
odd : CASE ... WHEN vColumn = argv[2 * i] THEN argv[2 * i + 1] ... ELSE argv[n] END
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.case_when : Creates a new feature by evaluating some conditions.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
import verticapy.stats as st
return self.apply(func=st.decode(str_sql("{}"), *argv))
# ---#
def density(
self,
by: str = "",
bandwidth: float = 1.0,
kernel: str = "gaussian",
nbins: int = 200,
xlim: tuple = None,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the vColumn Density Plot.
Parameters
----------
by: str, optional
vColumn to use to partition the data.
bandwidth: float, optional
The bandwidth of the kernel.
kernel: str, optional
The method used for the plot.
gaussian : Gaussian kernel.
logistic : Logistic kernel.
sigmoid : Sigmoid kernel.
silverman : Silverman kernel.
nbins: int, optional
Maximum number of points to use to evaluate the approximate density function.
Increasing this parameter will increase the precision but will also increase
the time of the learning and scoring phases.
xlim: tuple, optional
Set the x limits of the current axes.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame[].hist : Draws the histogram of the vColumn based on an aggregation.
"""
check_types(
[
("by", by, [str]),
("kernel", kernel, ["gaussian", "logistic", "sigmoid", "silverman"]),
("bandwidth", bandwidth, [int, float]),
("nbins", nbins, [float, int]),
]
)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
from verticapy.plot import gen_colors
from matplotlib.lines import Line2D
colors = gen_colors()
if not xlim:
xmin = self.min()
xmax = self.max()
else:
xmin, xmax = xlim
custom_lines = []
columns = self.parent[by].distinct()
for idx, column in enumerate(columns):
param = {"color": colors[idx % len(colors)]}
ax = self.parent.search(
"{} = '{}'".format(self.parent[by].alias, column)
)[self.alias].density(
bandwidth=bandwidth,
kernel=kernel,
nbins=nbins,
xlim=(xmin, xmax),
ax=ax,
**updated_dict(param, style_kwds, idx),
)
custom_lines += [
Line2D(
[0],
[0],
color=updated_dict(param, style_kwds, idx)["color"],
lw=4,
),
]
ax.set_title("KernelDensity")
ax.legend(
custom_lines,
columns,
title=by,
loc="center left",
bbox_to_anchor=[1, 0.5],
)
ax.set_xlabel(self.alias)
return ax
kernel = kernel.lower()
from verticapy.learn.neighbors import KernelDensity
schema = verticapy.options["temp_schema"]
if not (schema):
schema = "public"
name = gen_tmp_name(schema=schema, name="kde")
if isinstance(xlim, (tuple, list)):
xlim_tmp = [xlim]
else:
xlim_tmp = []
model = KernelDensity(
name,
bandwidth=bandwidth,
kernel=kernel,
nbins=nbins,
xlim=xlim_tmp,
store=False,
)
try:
result = model.fit(self.parent.__genSQL__(), [self.alias]).plot(
ax=ax, **style_kwds
)
model.drop()
return result
except:
model.drop()
raise
# ---#
def describe(
self, method: str = "auto", max_cardinality: int = 6, numcol: str = ""
):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using multiple statistical aggregations:
min, max, median, unique... depending on the input method.
Parameters
----------
method: str, optional
The describe method.
auto : Sets the method to 'numerical' if the vColumn is numerical
, 'categorical' otherwise.
categorical : Uses only categorical aggregations during the computation.
cat_stats : Computes statistics of a numerical column for each vColumn
category. In this case, the parameter 'numcol' must be defined.
numerical : Uses popular numerical aggregations during the computation.
max_cardinality: int, optional
Cardinality threshold to use to determine if the vColumn will be considered
as categorical.
numcol: str, optional
Numerical vColumn to use when the parameter method is set to 'cat_stats'.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types(
[
("method", method, ["auto", "numerical", "categorical", "cat_stats"]),
("max_cardinality", max_cardinality, [int, float]),
("numcol", numcol, [str]),
]
)
method = method.lower()
assert (method != "cat_stats") or (numcol), ParameterError(
"The parameter 'numcol' must be a vDataFrame column if the method is 'cat_stats'"
)
distinct_count, is_numeric, is_date = (
self.nunique(),
self.isnum(),
self.isdate(),
)
if (is_date) and not (method == "categorical"):
result = self.aggregate(["count", "min", "max"])
index = result.values["index"]
result = result.values[self.alias]
elif (method == "cat_stats") and (numcol != ""):
numcol = self.parent.format_colnames(numcol)
assert self.parent[numcol].category() in ("float", "int"), TypeError(
"The column 'numcol' must be numerical"
)
cast = "::int" if (self.parent[numcol].isbool()) else ""
query, cat = [], self.distinct()
if len(cat) == 1:
lp, rp = "(", ")"
else:
lp, rp = "", ""
for category in cat:
tmp_query = """SELECT
'{0}' AS 'index',
COUNT({1}) AS count,
100 * COUNT({1}) / {2} AS percent,
AVG({3}{4}) AS mean,
STDDEV({3}{4}) AS std,
MIN({3}{4}) AS min,
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.1) AS 'approx_10%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.25) AS 'approx_25%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.5) AS 'approx_50%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.75) AS 'approx_75%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.9) AS 'approx_90%',
MAX({3}{4}) AS max
FROM vdf_table""".format(
category, self.alias, self.parent.shape()[0], numcol, cast,
)
tmp_query += (
" WHERE {} IS NULL".format(self.alias)
if (category in ("None", None))
else " WHERE {} = '{}'".format(
bin_spatial_to_str(self.category(), self.alias), category,
)
)
query += [lp + tmp_query + rp]
query = "WITH vdf_table AS (SELECT * FROM {}) {}".format(
self.parent.__genSQL__(), " UNION ALL ".join(query)
)
title = "Describes the statics of {} partitioned by {}.".format(
numcol, self.alias
)
values = to_tablesample(query, title=title).values
elif (
((distinct_count < max_cardinality + 1) and (method != "numerical"))
or not (is_numeric)
or (method == "categorical")
):
query = """(SELECT
{0} || '',
COUNT(*)
FROM vdf_table
GROUP BY {0}
ORDER BY COUNT(*) DESC
LIMIT {1})""".format(
self.alias, max_cardinality
)
if distinct_count > max_cardinality:
query += (
"UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS count"
" FROM vdf_table WHERE {0} IS NOT NULL GROUP BY {0} ORDER BY COUNT(*)"
" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC"
).format(self.alias, max_cardinality + 1)
query = "WITH vdf_table AS (SELECT * FROM {}) {}".format(
self.parent.__genSQL__(), query
)
query_result = executeSQL(
query=query,
title="Computing the descriptive statistics of {}.".format(self.alias),
method="fetchall",
)
result = [distinct_count, self.count()] + [item[1] for item in query_result]
index = ["unique", "count"] + [item[0] for item in query_result]
else:
result = (
self.parent.describe(
method="numerical", columns=[self.alias], unique=False
)
.transpose()
.values[self.alias]
)
result = [distinct_count] + result
index = [
"unique",
"count",
"mean",
"std",
"min",
"approx_25%",
"approx_50%",
"approx_75%",
"max",
]
if method != "cat_stats":
values = {
"index": ["name", "dtype"] + index,
"value": [self.alias, self.ctype()] + result,
}
if ((is_date) and not (method == "categorical")) or (
method == "is_numeric"
):
self.parent.__update_catalog__({"index": index, self.alias: result})
for elem in values:
for i in range(len(values[elem])):
if isinstance(values[elem][i], decimal.Decimal):
values[elem][i] = float(values[elem][i])
return tablesample(values)
# ---#
def discretize(
self,
method: str = "auto",
h: float = 0,
nbins: int = -1,
k: int = 6,
new_category: str = "Others",
RFmodel_params: dict = {},
response: str = "",
return_enum_trans: bool = False,
):
"""
---------------------------------------------------------------------------
Discretizes the vColumn using the input method.
Parameters
----------
method: str, optional
The method to use to discretize the vColumn.
auto : Uses method 'same_width' for numerical vColumns, cast
the other types to varchar.
same_freq : Computes bins with the same number of elements.
same_width : Computes regular width bins.
smart : Uses the Random Forest on a response column to find the most
relevant interval to use for the discretization.
topk : Keeps the topk most frequent categories and merge the other
into one unique category.
h: float, optional
The interval size to convert to use to convert the vColumn. If this parameter
is equal to 0, an optimised interval will be computed.
nbins: int, optional
Number of bins used for the discretization (must be > 1)
k: int, optional
The integer k of the 'topk' method.
new_category: str, optional
The name of the merging category when using the 'topk' method.
RFmodel_params: dict, optional
Dictionary of the Random Forest model parameters used to compute the best splits
when 'method' is set to 'smart'. A RF Regressor will be trained if the response
is numerical (except ints and bools), a RF Classifier otherwise.
Example: Write {"n_estimators": 20, "max_depth": 10} to train a Random Forest with
20 trees and a maximum depth of 10.
response: str, optional
Response vColumn when method is set to 'smart'.
return_enum_trans: bool, optional
Returns the transformation instead of the vDataFrame parent and do not apply
it. This parameter is very useful for testing to be able to look at the final
transformation.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn with user defined Encoding.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
check_types(
[
("RFmodel_params", RFmodel_params, [dict]),
("return_enum_trans", return_enum_trans, [bool]),
("h", h, [int, float]),
("response", response, [str]),
("nbins", nbins, [int, float]),
(
"method",
method,
["auto", "smart", "same_width", "same_freq", "topk"],
),
("return_enum_trans", return_enum_trans, [bool]),
]
)
method = method.lower()
if self.isnum() and method == "smart":
schema = verticapy.options["temp_schema"]
if not (schema):
schema = "public"
tmp_view_name = gen_tmp_name(schema=schema, name="view")
tmp_model_name = gen_tmp_name(schema=schema, name="model")
assert nbins >= 2, ParameterError(
"Parameter 'nbins' must be greater or equals to 2 in case of discretization using the method 'smart'."
)
assert response, ParameterError(
"Parameter 'response' can not be empty in case of discretization using the method 'smart'."
)
self.parent.are_namecols_in(response)
response = self.parent.format_colnames(response)
drop(tmp_view_name, method="view")
self.parent.to_db(tmp_view_name)
from verticapy.learn.ensemble import (
RandomForestClassifier,
RandomForestRegressor,
)
drop(tmp_model_name, method="model")
if self.parent[response].category() == "float":
model = RandomForestRegressor(tmp_model_name)
else:
model = RandomForestClassifier(tmp_model_name)
model.set_params({"n_estimators": 20, "max_depth": 8, "nbins": 100})
model.set_params(RFmodel_params)
parameters = model.get_params()
try:
model.fit(tmp_view_name, [self.alias], response)
query = [
"(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id = {}, format = 'tabular'))".format(
tmp_model_name, i
)
for i in range(parameters["n_estimators"])
]
query = "SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP BY 1 ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float".format(
" UNION ALL ".join(query), nbins - 1
)
result = executeSQL(
query=query,
title="Computing the optimized histogram nbins using Random Forest.",
method="fetchall",
)
result = [elem[0] for elem in result]
except:
drop(tmp_view_name, method="view")
drop(tmp_model_name, method="model")
raise
drop(tmp_view_name, method="view")
drop(tmp_model_name, method="model")
result = [self.min()] + result + [self.max()]
elif method == "topk":
assert k >= 2, ParameterError(
"Parameter 'k' must be greater or equals to 2 in case of discretization using the method 'topk'"
)
distinct = self.topk(k).values["index"]
trans = (
"(CASE WHEN {} IN ({}) THEN {} || '' ELSE '{}' END)".format(
bin_spatial_to_str(self.category()),
", ".join(
[
"'{}'".format(str(elem).replace("'", "''"))
for elem in distinct
]
),
bin_spatial_to_str(self.category()),
new_category.replace("'", "''"),
),
"varchar",
"text",
)
elif self.isnum() and method == "same_freq":
assert nbins >= 2, ParameterError(
"Parameter 'nbins' must be greater or equals to 2 in case of discretization using the method 'same_freq'"
)
count = self.count()
nb = int(float(count / int(nbins)))
assert nb != 0, Exception(
"Not enough values to compute the Equal Frequency discretization"
)
total, query, nth_elems = nb, [], []
while total < int(float(count / int(nbins))) * int(nbins):
nth_elems += [str(total)]
total += nb
where = "WHERE _verticapy_row_nb_ IN ({})".format(
", ".join(["1"] + nth_elems + [str(count)])
)
query = "SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM {} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}".format(
self.alias,
self.alias,
self.alias,
self.parent.__genSQL__(),
self.alias,
where,
)
result = executeSQL(
query=query,
title="Computing the equal frequency histogram bins.",
method="fetchall",
)
result = [elem[0] for elem in result]
elif self.isnum() and method in ("same_width", "auto"):
if not (h) or h <= 0:
if nbins <= 0:
h = self.numh()
else:
h = (self.max() - self.min()) * 1.01 / nbins
if h > 0.01:
h = round(h, 2)
elif h > 0.0001:
h = round(h, 4)
elif h > 0.000001:
h = round(h, 6)
if self.category() == "int":
h = int(max(math.floor(h), 1))
floor_end = -1 if (self.category() == "int") else ""
if (h > 1) or (self.category() == "float"):
trans = (
"'[' || FLOOR({} / {}) * {} || ';' || (FLOOR({} / {}) * {} + {}{}) || ']'".format(
"{}", h, h, "{}", h, h, h, floor_end
),
"varchar",
"text",
)
else:
trans = ("FLOOR({}) || ''", "varchar", "text")
else:
trans = ("{} || ''", "varchar", "text")
if (self.isnum() and method == "same_freq") or (
self.isnum() and method == "smart"
):
n = len(result)
trans = "(CASE "
for i in range(1, n):
trans += "WHEN {} BETWEEN {} AND {} THEN '[{};{}]' ".format(
"{}", result[i - 1], result[i], result[i - 1], result[i]
)
trans += " ELSE NULL END)"
trans = (trans, "varchar", "text")
if return_enum_trans:
return trans
else:
self.transformations += [trans]
sauv = {}
for elem in self.catalog:
sauv[elem] = self.catalog[elem]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
try:
if "count" in sauv:
self.catalog["count"] = sauv["count"]
self.catalog["percent"] = (
100 * sauv["count"] / self.parent.shape()[0]
)
except:
pass
self.parent.__add_to_history__(
"[Discretize]: The vColumn {} was discretized.".format(self.alias)
)
return self.parent
# ---#
def distinct(self, **kwargs):
"""
---------------------------------------------------------------------------
Returns the distinct categories of the vColumn.
Returns
-------
list
Distinct caterogies of the vColumn.
See Also
--------
vDataFrame.topk : Returns the vColumn most occurent elements.
"""
if "agg" not in kwargs:
query = "SELECT {} AS {} FROM {} WHERE {} IS NOT NULL GROUP BY {} ORDER BY {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.alias,
self.parent.__genSQL__(),
self.alias,
self.alias,
self.alias,
)
else:
query = "SELECT {} FROM (SELECT {} AS {}, {} AS verticapy_agg FROM {} WHERE {} IS NOT NULL GROUP BY 1) x ORDER BY verticapy_agg DESC".format(
self.alias,
bin_spatial_to_str(self.category(), self.alias),
self.alias,
kwargs["agg"],
self.parent.__genSQL__(),
self.alias,
)
query_result = executeSQL(
query=query,
title="Computing the distinct categories of {}.".format(self.alias),
method="fetchall",
)
return [item for sublist in query_result for item in sublist]
# ---#
def div(self, x: float):
"""
---------------------------------------------------------------------------
Divides the vColumn by the input element.
Parameters
----------
x: float
Input number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
assert x != 0, ValueError("Division by 0 is forbidden !")
return self.apply(func="{} / ({})".format("{}", x))
# ---#
def drop(self, add_history: bool = True):
"""
---------------------------------------------------------------------------
Drops the vColumn from the vDataFrame. Dropping a vColumn means simply
not selecting it in the final generated SQL code.
Note: Dropping a vColumn can make the vDataFrame "heavier" if it is used
to compute other vColumns.
Parameters
----------
add_history: bool, optional
If set to True, the information will be stored in the vDataFrame history.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.drop: Drops the input vColumns from the vDataFrame.
"""
check_types([("add_history", add_history, [bool])])
try:
parent = self.parent
force_columns = [
column for column in self.parent._VERTICAPY_VARIABLES_["columns"]
]
force_columns.remove(self.alias)
executeSQL(
"SELECT * FROM {} LIMIT 10".format(
self.parent.__genSQL__(force_columns=force_columns)
),
print_time_sql=False,
)
self.parent._VERTICAPY_VARIABLES_["columns"].remove(self.alias)
delattr(self.parent, self.alias)
except:
self.parent._VERTICAPY_VARIABLES_["exclude_columns"] += [self.alias]
if add_history:
self.parent.__add_to_history__(
"[Drop]: vColumn {} was deleted from the vDataFrame.".format(self.alias)
)
return parent
# ---#
def drop_outliers(
self, threshold: float = 4.0, use_threshold: bool = True, alpha: float = 0.05
):
"""
---------------------------------------------------------------------------
Drops outliers in the vColumn.
Parameters
----------
threshold: float, optional
Uses the Gaussian distribution to identify outliers. After normalizing
the data (Z-Score), if the absolute value of the record is greater than
the threshold, it will be considered as an outlier.
use_threshold: bool, optional
Uses the threshold instead of the 'alpha' parameter.
alpha: float, optional
Number representing the outliers threshold. Values lesser than
quantile(alpha) or greater than quantile(1-alpha) will be dropped.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.fill_outliers : Fills the outliers in the vColumn.
vDataFrame.outliers : Adds a new vColumn labeled with 0 and 1
(1 meaning global outlier).
"""
check_types(
[
("alpha", alpha, [int, float]),
("use_threshold", use_threshold, [bool]),
("threshold", threshold, [int, float]),
]
)
if use_threshold:
result = self.aggregate(func=["std", "avg"]).transpose().values
self.parent.filter(
"ABS({} - {}) / {} < {}".format(
self.alias, result["avg"][0], result["std"][0], threshold
)
)
else:
p_alpha, p_1_alpha = (
self.parent.quantile([alpha, 1 - alpha], [self.alias])
.transpose()
.values[self.alias]
)
self.parent.filter(
"({} BETWEEN {} AND {})".format(self.alias, p_alpha, p_1_alpha)
)
return self.parent
# ---#
def dropna(self):
"""
---------------------------------------------------------------------------
Filters the vDataFrame where the vColumn is missing.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.filter: Filters the data using the input expression.
"""
self.parent.filter("{} IS NOT NULL".format(self.alias))
return self.parent
# ---#
def fill_outliers(
self,
method: str = "winsorize",
threshold: float = 4.0,
use_threshold: bool = True,
alpha: float = 0.05,
):
"""
---------------------------------------------------------------------------
Fills the vColumns outliers using the input method.
Parameters
----------
method: str, optional
Method to use to fill the vColumn outliers.
mean : Replaces the upper and lower outliers by their respective
average.
null : Replaces the outliers by the NULL value.
winsorize : Clips the vColumn using as lower bound quantile(alpha) and as
upper bound quantile(1-alpha) if 'use_threshold' is set to False else
the lower and upper ZScores.
threshold: float, optional
Uses the Gaussian distribution to define the outliers. After normalizing the
data (Z-Score), if the absolute value of the record is greater than the
threshold it will be considered as an outlier.
use_threshold: bool, optional
Uses the threshold instead of the 'alpha' parameter.
alpha: float, optional
Number representing the outliers threshold. Values lesser than quantile(alpha)
or greater than quantile(1-alpha) will be filled.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].drop_outliers : Drops outliers in the vColumn.
vDataFrame.outliers : Adds a new vColumn labeled with 0 and 1
(1 meaning global outlier).
"""
if isinstance(method, str):
method = method.lower()
check_types(
[
("method", method, ["winsorize", "null", "mean"]),
("alpha", alpha, [int, float]),
("use_threshold", use_threshold, [bool]),
("threshold", threshold, [int, float]),
]
)
if use_threshold:
result = self.aggregate(func=["std", "avg"]).transpose().values
p_alpha, p_1_alpha = (
-threshold * result["std"][0] + result["avg"][0],
threshold * result["std"][0] + result["avg"][0],
)
else:
query = "SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY {}) OVER () FROM {} LIMIT 1".format(
alpha, self.alias, alpha, self.alias, self.parent.__genSQL__()
)
p_alpha, p_1_alpha = executeSQL(
query=query,
title="Computing the quantiles of {}.".format(self.alias),
method="fetchrow",
)
if method == "winsorize":
self.clip(lower=p_alpha, upper=p_1_alpha)
elif method == "null":
self.apply(
func="(CASE WHEN ({} BETWEEN {} AND {}) THEN {} ELSE NULL END)".format(
"{}", p_alpha, p_1_alpha, "{}"
)
)
elif method == "mean":
query = "WITH vdf_table AS (SELECT * FROM {}) (SELECT AVG({}) FROM vdf_table WHERE {} < {}) UNION ALL (SELECT AVG({}) FROM vdf_table WHERE {} > {})".format(
self.parent.__genSQL__(),
self.alias,
self.alias,
p_alpha,
self.alias,
self.alias,
p_1_alpha,
)
mean_alpha, mean_1_alpha = [
item[0]
for item in executeSQL(
query=query,
title="Computing the average of the {}'s lower and upper outliers.".format(
self.alias
),
method="fetchall",
)
]
if mean_alpha == None:
mean_alpha = "NULL"
if mean_1_alpha == None:
mean_alpha = "NULL"
self.apply(
func="(CASE WHEN {} < {} THEN {} WHEN {} > {} THEN {} ELSE {} END)".format(
"{}", p_alpha, mean_alpha, "{}", p_1_alpha, mean_1_alpha, "{}"
)
)
return self.parent
# ---#
def fillna(
self,
val=None,
method: str = "auto",
expr: str = "",
by: list = [],
order_by: list = [],
):
"""
---------------------------------------------------------------------------
Fills missing elements in the vColumn with a user-specified rule.
Parameters
----------
val: int/float/str, optional
Value to use to impute the vColumn.
method: dict, optional
Method to use to impute the missing values.
auto : Mean for the numerical and Mode for the categorical vColumns.
bfill : Back Propagation of the next element (Constant Interpolation).
ffill : Propagation of the first element (Constant Interpolation).
mean : Average.
median : median.
mode : mode (most occurent element).
0ifnull : 0 when the vColumn is null, 1 otherwise.
expr: str, optional
SQL expression.
by: list, optional
vColumns used in the partition.
order_by: list, optional
List of the vColumns to use to sort the data when using TS methods.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].dropna : Drops the vColumn missing values.
"""
if isinstance(by, str):
by = [by]
if isinstance(order_by, str):
order_by = [order_by]
check_types(
[
(
"method",
method,
[
"auto",
"mode",
"0ifnull",
"mean",
"avg",
"median",
"ffill",
"pad",
"bfill",
"backfill",
],
),
("expr", expr, [str]),
("by", by, [list]),
("order_by", order_by, [list]),
]
)
method = method.lower()
self.parent.are_namecols_in([elem for elem in order_by] + by)
by = self.parent.format_colnames(by)
if method == "auto":
method = "mean" if (self.isnum() and self.nunique(True) > 6) else "mode"
total = self.count()
if (method == "mode") and (val == None):
val = self.mode(dropna=True)
if val == None:
warning_message = "The vColumn {} has no mode (only missing values).\nNothing was filled.".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self.parent
if isinstance(val, str):
val = val.replace("'", "''")
if val != None:
new_column = "COALESCE({}, '{}')".format("{}", val)
elif expr:
new_column = "COALESCE({}, {})".format("{}", expr)
elif method == "0ifnull":
new_column = "DECODE({}, NULL, 0, 1)"
elif method in ("mean", "avg", "median"):
fun = "MEDIAN" if (method == "median") else "AVG"
if by == []:
if fun == "AVG":
val = self.avg()
elif fun == "MEDIAN":
val = self.median()
new_column = "COALESCE({}, {})".format("{}", val)
elif (len(by) == 1) and (self.parent[by[0]].nunique() < 50):
try:
if fun == "MEDIAN":
fun = "APPROXIMATE_MEDIAN"
query = "SELECT {}, {}({}) FROM {} GROUP BY {};".format(
by[0], fun, self.alias, self.parent.__genSQL__(), by[0]
)
result = executeSQL(
query,
title="Computing the different aggregations.",
method="fetchall",
)
for idx, elem in enumerate(result):
result[idx][0] = (
"NULL"
if (elem[0] == None)
else "'{}'".format(str(elem[0]).replace("'", "''"))
)
result[idx][1] = "NULL" if (elem[1] == None) else str(elem[1])
new_column = "COALESCE({}, DECODE({}, {}, NULL))".format(
"{}",
by[0],
", ".join(
["{}, {}".format(elem[0], elem[1]) for elem in result]
),
)
executeSQL(
"SELECT {} FROM {} LIMIT 1".format(
new_column.format(self.alias), self.parent.__genSQL__()
),
print_time_sql=False,
)
except:
new_column = "COALESCE({}, {}({}) OVER (PARTITION BY {}))".format(
"{}", fun, "{}", ", ".join(by)
)
else:
new_column = "COALESCE({}, {}({}) OVER (PARTITION BY {}))".format(
"{}", fun, "{}", ", ".join(by)
)
elif method in ("ffill", "pad", "bfill", "backfill"):
assert order_by, ParameterError(
"If the method is in ffill|pad|bfill|backfill then 'order_by' must be a list of at least one element to use to order the data"
)
desc = "" if (method in ("ffill", "pad")) else " DESC"
partition_by = (
"PARTITION BY {}".format(
", ".join([quote_ident(column) for column in by])
)
if (by)
else ""
)
order_by_ts = ", ".join([quote_ident(column) + desc for column in order_by])
new_column = "COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY {}))".format(
"{}", "{}", partition_by, order_by_ts
)
if method in ("mean", "median") or isinstance(val, float):
category, ctype = "float", "float"
elif method == "0ifnull":
category, ctype = "int", "bool"
else:
category, ctype = self.category(), self.ctype()
copy_trans = [elem for elem in self.transformations]
total = self.count()
if method not in ["mode", "0ifnull"]:
max_floor = 0
all_partition = by
if method in ["ffill", "pad", "bfill", "backfill"]:
all_partition += [elem for elem in order_by]
for elem in all_partition:
if len(self.parent[elem].transformations) > max_floor:
max_floor = len(self.parent[elem].transformations)
max_floor -= len(self.transformations)
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += [(new_column, ctype, category)]
try:
sauv = {}
for elem in self.catalog:
sauv[elem] = self.catalog[elem]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
total = abs(self.count() - total)
except Exception as e:
self.transformations = [elem for elem in copy_trans]
raise QueryError("{}\nAn Error happened during the filling.".format(e))
if total > 0:
try:
if "count" in sauv:
self.catalog["count"] = int(sauv["count"]) + total
self.catalog["percent"] = (
100 * (int(sauv["count"]) + total) / self.parent.shape()[0]
)
except:
pass
total = int(total)
conj = "s were " if total > 1 else " was "
if verticapy.options["print_info"]:
print("{} element{}filled.".format(total, conj))
self.parent.__add_to_history__(
"[Fillna]: {} {} missing value{} filled.".format(
total, self.alias, conj,
)
)
else:
if verticapy.options["print_info"]:
print("Nothing was filled.")
self.transformations = [elem for elem in copy_trans]
for elem in sauv:
self.catalog[elem] = sauv[elem]
return self.parent
# ---#
def geo_plot(self, *args, **kwargs):
"""
---------------------------------------------------------------------------
Draws the Geospatial object.
Parameters
----------
*args / **kwargs
Any optional parameter to pass to the geopandas plot function.
For more information, see:
https://geopandas.readthedocs.io/en/latest/docs/reference/api/
geopandas.GeoDataFrame.plot.html
Returns
-------
ax
Matplotlib axes object
"""
columns = [self.alias]
check = True
if len(args) > 0:
column = args[0]
elif "column" in kwargs:
column = kwargs["column"]
else:
check = False
if check:
self.parent.are_namecols_in(column)
column = self.parent.format_colnames(column)
columns += [column]
if not ("cmap" in kwargs):
from verticapy.plot import gen_cmap
kwargs["cmap"] = gen_cmap()[0]
else:
if not ("color" in kwargs):
from verticapy.plot import gen_colors
kwargs["color"] = gen_colors()[0]
if not ("legend" in kwargs):
kwargs["legend"] = True
if not ("figsize" in kwargs):
kwargs["figsize"] = (14, 10)
return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs)
# ---#
def get_dummies(
self,
prefix: str = "",
prefix_sep: str = "_",
drop_first: bool = True,
use_numbers_as_suffix: bool = False,
):
"""
---------------------------------------------------------------------------
Encodes the vColumn with the One-Hot Encoding algorithm.
Parameters
----------
prefix: str, optional
Prefix of the dummies.
prefix_sep: str, optional
Prefix delimitor of the dummies.
drop_first: bool, optional
Drops the first dummy to avoid the creation of correlated features.
use_numbers_as_suffix: bool, optional
Uses numbers as suffix instead of the vColumns categories.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn with user defined Encoding.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
check_types(
[
("prefix", prefix, [str]),
("prefix_sep", prefix_sep, [str]),
("drop_first", drop_first, [bool]),
("use_numbers_as_suffix", use_numbers_as_suffix, [bool]),
]
)
distinct_elements = self.distinct()
if distinct_elements not in ([0, 1], [1, 0]) or self.isbool():
all_new_features = []
prefix = (
self.alias.replace('"', "") + prefix_sep.replace('"', "_")
if not (prefix)
else prefix.replace('"', "_") + prefix_sep.replace('"', "_")
)
n = 1 if drop_first else 0
for k in range(len(distinct_elements) - n):
name = (
'"{}{}"'.format(prefix, k)
if (use_numbers_as_suffix)
else '"{}{}"'.format(
prefix, str(distinct_elements[k]).replace('"', "_")
)
)
assert not (self.parent.is_colname_in(name)), NameError(
f"A vColumn has already the alias of one of the dummies ({name}).\n"
"It can be the result of using previously the method on the vColumn "
"or simply because of ambiguous columns naming.\nBy changing one of "
"the parameters ('prefix', 'prefix_sep'), you'll be able to solve this "
"issue."
)
for k in range(len(distinct_elements) - n):
name = (
'"{}{}"'.format(prefix, k)
if (use_numbers_as_suffix)
else '"{}{}"'.format(
prefix, str(distinct_elements[k]).replace('"', "_")
)
)
name = (
name.replace(" ", "_")
.replace("/", "_")
.replace(",", "_")
.replace("'", "_")
)
expr = "DECODE({}, '{}', 1, 0)".format(
"{}", str(distinct_elements[k]).replace("'", "''")
)
transformations = self.transformations + [(expr, "bool", "int")]
new_vColumn = vColumn(
name,
parent=self.parent,
transformations=transformations,
catalog={
"min": 0,
"max": 1,
"count": self.parent.shape()[0],
"percent": 100.0,
"unique": 2,
"approx_unique": 2,
"prod": 0,
},
)
setattr(self.parent, name, new_vColumn)
setattr(self.parent, name.replace('"', ""), new_vColumn)
self.parent._VERTICAPY_VARIABLES_["columns"] += [name]
all_new_features += [name]
conj = "s were " if len(all_new_features) > 1 else " was "
self.parent.__add_to_history__(
"[Get Dummies]: One hot encoder was applied to the vColumn {}\n{} feature{}created: {}".format(
self.alias, len(all_new_features), conj, ", ".join(all_new_features)
)
+ "."
)
return self.parent
one_hot_encode = get_dummies
# ---#
def head(self, limit: int = 5):
"""
---------------------------------------------------------------------------
Returns the head of the vColumn.
Parameters
----------
limit: int, optional
Number of elements to display.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].tail : Returns the a part of the vColumn.
"""
return self.iloc(limit=limit)
# ---#
def hist(
self,
method: str = "density",
of: str = "",
max_cardinality: int = 6,
nbins: int = 0,
h: float = 0,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the histogram of the vColumn based on an aggregation.
Parameters
----------
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
max_cardinality: int, optional
Maximum number of the vColumn distinct elements to be used as categorical
(No h will be picked or computed)
nbins: int, optional
Number of bins. If empty, an optimized number of bins will be computed.
h: float, optional
Interval width of the bar. If empty, an optimized h will be computed.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame[].bar : Draws the Bar Chart of vColumn based on an aggregation.
"""
check_types(
[
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [int, float]),
("h", h, [int, float]),
("nbins", nbins, [int, float]),
]
)
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import hist
return hist(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)
# ---#
def iloc(self, limit: int = 5, offset: int = 0):
"""
---------------------------------------------------------------------------
Returns a part of the vColumn (delimited by an offset and a limit).
Parameters
----------
limit: int, optional
Number of elements to display.
offset: int, optional
Number of elements to skip.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].head : Returns the head of the vColumn.
vDataFrame[].tail : Returns the tail of the vColumn.
"""
check_types([("limit", limit, [int, float]), ("offset", offset, [int, float])])
if offset < 0:
offset = max(0, self.parent.shape()[0] - limit)
title = "Reads {}.".format(self.alias)
tail = to_tablesample(
"SELECT {} AS {} FROM {}{} LIMIT {} OFFSET {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.alias,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
limit,
offset,
),
title=title,
)
tail.count = self.parent.shape()[0]
tail.offset = offset
tail.dtype[self.alias] = self.ctype()
tail.name = self.alias
return tail
# ---#
def isbool(self):
"""
---------------------------------------------------------------------------
Returns True if the vColumn is boolean, False otherwise.
Returns
-------
bool
True if the vColumn is boolean.
See Also
--------
vDataFrame[].isdate : Returns True if the vColumn category is date.
vDataFrame[].isnum : Returns True if the vColumn is numerical.
"""
return self.ctype().lower() in ("bool", "boolean")
# ---#
def isdate(self):
"""
---------------------------------------------------------------------------
Returns True if the vColumn category is date, False otherwise.
Returns
-------
bool
True if the vColumn category is date.
See Also
--------
vDataFrame[].isbool : Returns True if the vColumn is boolean.
vDataFrame[].isnum : Returns True if the vColumn is numerical.
"""
return self.category() == "date"
# ---#
def isin(self, val: list, *args):
"""
---------------------------------------------------------------------------
Looks if some specific records are in the vColumn and it returns the new
vDataFrame of the search.
Parameters
----------
val: list
List of the different records. For example, to check if Badr and Fouad
are in the vColumn. You can write the following list: ["Fouad", "Badr"]
Returns
-------
vDataFrame
The vDataFrame of the search.
See Also
--------
vDataFrame.isin : Looks if some specific records are in the vDataFrame.
"""
if isinstance(val, str) or not (isinstance(val, Iterable)):
val = [val]
val += list(args)
check_types([("val", val, [list])])
val = {self.alias: val}
return self.parent.isin(val)
# ---#
def isnum(self):
"""
---------------------------------------------------------------------------
Returns True if the vColumn is numerical, False otherwise.
Returns
-------
bool
True if the vColumn is numerical.
See Also
--------
vDataFrame[].isbool : Returns True if the vColumn is boolean.
vDataFrame[].isdate : Returns True if the vColumn category is date.
"""
return self.category() in ("float", "int")
# ---#
def iv_woe(self, y: str, nbins: int = 10):
"""
---------------------------------------------------------------------------
Computes the Information Value (IV) / Weight Of Evidence (WOE) Table. It tells
the predictive power of an independent variable in relation to the dependent
variable.
Parameters
----------
y: str
Response vColumn.
nbins: int, optional
Maximum number of nbins used for the discretization (must be > 1)
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.iv_woe : Computes the Information Value (IV) Table.
"""
check_types([("y", y, [str]), ("nbins", nbins, [int])])
self.parent.are_namecols_in(y)
y = self.parent.format_colnames(y)
assert self.parent[y].nunique() == 2, TypeError(
"vColumn {} must be binary to use iv_woe.".format(y)
)
response_cat = self.parent[y].distinct()
response_cat.sort()
assert response_cat == [0, 1], TypeError(
"vColumn {} must be binary to use iv_woe.".format(y)
)
self.parent[y].distinct()
trans = self.discretize(
method="same_width" if self.isnum() else "topk",
nbins=nbins,
k=nbins,
new_category="Others",
return_enum_trans=True,
)[0].replace("{}", self.alias)
query = "SELECT {} AS {}, {} AS ord, {}::int AS {} FROM {}".format(
trans, self.alias, self.alias, y, y, self.parent.__genSQL__(),
)
query = "SELECT {}, MIN(ord) AS ord, SUM(1 - {}) AS non_events, SUM({}) AS events FROM ({}) x GROUP BY 1".format(
self.alias, y, y, query,
)
query = "SELECT {}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({}) x".format(
self.alias, query,
)
query = "SELECT {} AS index, non_events, events, pt_non_events, pt_events, CASE WHEN non_events = 0 OR events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events = 0 OR events = 0 THEN 0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM ({}) x ORDER BY ord".format(
self.alias, query,
)
title = "Computing WOE & IV of {} (response = {}).".format(self.alias, y)
result = to_tablesample(query, title=title)
result.values["index"] += ["total"]
result.values["non_events"] += [sum(result["non_events"])]
result.values["events"] += [sum(result["events"])]
result.values["pt_non_events"] += [""]
result.values["pt_events"] += [""]
result.values["woe"] += [""]
result.values["iv"] += [sum(result["iv"])]
return result
# ---#
def kurtosis(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'kurtosis'.
Returns
-------
float
kurtosis
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["kurtosis"]).values[self.alias][0]
kurt = kurtosis
# ---#
def label_encode(self):
"""
---------------------------------------------------------------------------
Encodes the vColumn using a bijection from the different categories to
[0, n - 1] (n being the vColumn cardinality).
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn with a user defined Encoding.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
if self.category() in ["date", "float"]:
warning_message = (
"label_encode is only available for categorical variables."
)
warnings.warn(warning_message, Warning)
else:
distinct_elements = self.distinct()
expr = ["DECODE({}"]
text_info = "\n"
for k in range(len(distinct_elements)):
expr += [
"'{}', {}".format(str(distinct_elements[k]).replace("'", "''"), k)
]
text_info += "\t{} => {}".format(distinct_elements[k], k)
expr = ", ".join(expr) + ", {})".format(len(distinct_elements))
self.transformations += [(expr, "int", "int")]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
self.catalog["count"] = self.parent.shape()[0]
self.catalog["percent"] = 100
self.parent.__add_to_history__(
"[Label Encoding]: Label Encoding was applied to the vColumn {} using the following mapping:{}".format(
self.alias, text_info
)
)
return self.parent
# ---#
def mad(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'mad' (median absolute deviation).
Returns
-------
float
mad
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["mad"]).values[self.alias][0]
# ---#
def max(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'max' (Maximum).
Returns
-------
float/str
maximum
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["max"]).values[self.alias][0]
# ---#
def mean_encode(self, response: str):
"""
---------------------------------------------------------------------------
Encodes the vColumn using the average of the response partitioned by the
different vColumn categories.
Parameters
----------
response: str
Response vColumn.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn using a user-defined encoding.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
"""
check_types([("response", response, [str])])
self.parent.are_namecols_in(response)
response = self.parent.format_colnames(response)
assert self.parent[response].isnum(), TypeError(
"The response column must be numerical to use a mean encoding"
)
max_floor = len(self.parent[response].transformations) - len(
self.transformations
)
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += [
("AVG({}) OVER (PARTITION BY {})".format(response, "{}"), "int", "float")
]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
self.parent.__add_to_history__(
"[Mean Encode]: The vColumn {} was transformed using a mean encoding with {} as Response Column.".format(
self.alias, response
)
)
if verticapy.options["print_info"]:
print("The mean encoding was successfully done.")
return self.parent
# ---#
def median(
self, approx: bool = True,
):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'median'.
Parameters
----------
approx: bool, optional
If set to True, the approximate median is returned. By setting this
parameter to False, the function's performance can drastically decrease.
Returns
-------
float/str
median
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.quantile(0.5, approx=approx)
# ---#
def memory_usage(self):
"""
---------------------------------------------------------------------------
Returns the vColumn memory usage.
Returns
-------
float
vColumn memory usage (byte)
See Also
--------
vDataFrame.memory_usage : Returns the vDataFrame memory usage.
"""
import sys
total = (
sys.getsizeof(self)
+ sys.getsizeof(self.alias)
+ sys.getsizeof(self.transformations)
+ sys.getsizeof(self.catalog)
)
for elem in self.catalog:
total += sys.getsizeof(elem)
return total
# ---#
def min(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'min' (Minimum).
Returns
-------
float/str
minimum
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["min"]).values[self.alias][0]
# ---#
def mode(self, dropna: bool = False, n: int = 1):
"""
---------------------------------------------------------------------------
Returns the nth most occurent element.
Parameters
----------
dropna: bool, optional
If set to True, NULL values will not be considered during the computation.
n: int, optional
Integer corresponding to the offset. For example, if n = 1 then this
method will return the mode of the vColumn.
Returns
-------
str/float/int
vColumn nth most occurent element.
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types([("dropna", dropna, [bool]), ("n", n, [int, float])])
if n == 1:
pre_comp = self.parent.__get_catalog_value__(self.alias, "top")
if pre_comp != "VERTICAPY_NOT_PRECOMPUTED":
if not (dropna) and (pre_comp != None):
return pre_comp
assert n >= 1, ParameterError("Parameter 'n' must be greater or equal to 1")
where = " WHERE {} IS NOT NULL ".format(self.alias) if (dropna) else " "
result = executeSQL(
"SELECT {} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1".format(
self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n
),
title="Computing the mode.",
method="fetchall",
)
top = None if not (result) else result[0][0]
if not (dropna):
n = "" if (n == 1) else str(int(n))
if isinstance(top, decimal.Decimal):
top = float(top)
self.parent.__update_catalog__(
{"index": ["top{}".format(n)], self.alias: [top]}
)
return top
# ---#
def mul(self, x: float):
"""
---------------------------------------------------------------------------
Multiplies the vColumn by the input element.
Parameters
----------
x: float
Input number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
return self.apply(func="{} * ({})".format("{}", x))
# ---#
def nlargest(self, n: int = 10):
"""
---------------------------------------------------------------------------
Returns the n largest vColumn elements.
Parameters
----------
n: int, optional
Offset.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].nsmallest : Returns the n smallest elements in the vColumn.
"""
check_types([("n", n, [int, float])])
query = "SELECT * FROM {} WHERE {} IS NOT NULL ORDER BY {} DESC LIMIT {}".format(
self.parent.__genSQL__(), self.alias, self.alias, n
)
title = "Reads {} {} largest elements.".format(self.alias, n)
return to_tablesample(query, title=title)
# ---#
def normalize(
self, method: str = "zscore", by: list = [], return_trans: bool = False
):
"""
---------------------------------------------------------------------------
Normalizes the input vColumns using the input method.
Parameters
----------
method: str, optional
Method to use to normalize.
zscore : Normalization using the Z-Score (avg and std).
(x - avg) / std
robust_zscore : Normalization using the Robust Z-Score (median and mad).
(x - median) / (1.4826 * mad)
minmax : Normalization using the MinMax (min and max).
(x - min) / (max - min)
by: list, optional
vColumns used in the partition.
return_trans: bool, optimal
If set to True, the method will return the transformation used instead of
the parent vDataFrame. This parameter is used for testing purpose.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.outliers : Computes the vDataFrame Global Outliers.
"""
if isinstance(by, str):
by = [by]
check_types(
[
("method", method, ["zscore", "robust_zscore", "minmax"]),
("by", by, [list]),
("return_trans", return_trans, [bool]),
]
)
method = method.lower()
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
nullifzero, n = 1, len(by)
if self.isbool():
warning_message = "Normalize doesn't work on booleans".format(self.alias)
warnings.warn(warning_message, Warning)
elif self.isnum():
if method == "zscore":
if n == 0:
nullifzero = 0
avg, stddev = self.aggregate(["avg", "std"]).values[self.alias]
if stddev == 0:
warning_message = "Can not normalize {} using a Z-Score - The Standard Deviation is null !".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self
elif (n == 1) and (self.parent[by[0]].nunique() < 50):
try:
result = executeSQL(
"SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP BY {}".format(
by[0],
self.alias,
self.alias,
self.parent.__genSQL__(),
by[0],
),
title="Computing the different categories to normalize.",
method="fetchall",
)
for i in range(len(result)):
if result[i][2] == None:
pass
elif math.isnan(result[i][2]):
result[i][2] = None
avg = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[1] if elem[1] != None else "NULL",
)
for elem in result
if elem[1] != None
]
),
)
stddev = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[2] if elem[2] != None else "NULL",
)
for elem in result
if elem[2] != None
]
),
)
executeSQL(
"SELECT {}, {} FROM {} LIMIT 1".format(
avg, stddev, self.parent.__genSQL__()
),
print_time_sql=False,
)
except:
avg, stddev = (
"AVG({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"STDDEV({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
else:
avg, stddev = (
"AVG({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"STDDEV({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
if return_trans:
return "({} - {}) / {}({})".format(
self.alias, avg, "NULLIFZERO" if (nullifzero) else "", stddev
)
else:
final_transformation = [
(
"({} - {}) / {}({})".format(
"{}", avg, "NULLIFZERO" if (nullifzero) else "", stddev
),
"float",
"float",
)
]
elif method == "robust_zscore":
if n > 0:
warning_message = "The method 'robust_zscore' is available only if the parameter 'by' is empty\nIf you want to normalize by grouping by elements, please use a method in zscore|minmax"
warnings.warn(warning_message, Warning)
return self
mad, med = self.aggregate(["mad", "approx_median"]).values[self.alias]
mad *= 1.4826
if mad != 0:
if return_trans:
return "({} - {}) / ({})".format(self.alias, med, mad)
else:
final_transformation = [
(
"({} - {}) / ({})".format("{}", med, mad),
"float",
"float",
)
]
else:
warning_message = "Can not normalize {} using a Robust Z-Score - The MAD is null !".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self
elif method == "minmax":
if n == 0:
nullifzero = 0
cmin, cmax = self.aggregate(["min", "max"]).values[self.alias]
if cmax - cmin == 0:
warning_message = "Can not normalize {} using the MIN and the MAX. MAX = MIN !".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self
elif n == 1:
try:
result = executeSQL(
"SELECT {}, MIN({}), MAX({}) FROM {} GROUP BY {}".format(
by[0],
self.alias,
self.alias,
self.parent.__genSQL__(),
by[0],
),
title="Computing the different categories {} to normalize.".format(
by[0]
),
method="fetchall",
)
cmin = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[1] if elem[1] != None else "NULL",
)
for elem in result
if elem[1] != None
]
),
)
cmax = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[2] if elem[2] != None else "NULL",
)
for elem in result
if elem[2] != None
]
),
)
executeSQL(
"SELECT {}, {} FROM {} LIMIT 1".format(
cmax, cmin, self.parent.__genSQL__()
),
print_time_sql=False,
)
except:
cmax, cmin = (
"MAX({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"MIN({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
else:
cmax, cmin = (
"MAX({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"MIN({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
if return_trans:
return "({} - {}) / {}({} - {})".format(
self.alias,
cmin,
"NULLIFZERO" if (nullifzero) else "",
cmax,
cmin,
)
else:
final_transformation = [
(
"({} - {}) / {}({} - {})".format(
"{}",
cmin,
"NULLIFZERO" if (nullifzero) else "",
cmax,
cmin,
),
"float",
"float",
)
]
if method != "robust_zscore":
max_floor = 0
for elem in by:
if len(self.parent[elem].transformations) > max_floor:
max_floor = len(self.parent[elem].transformations)
max_floor -= len(self.transformations)
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += final_transformation
sauv = {}
for elem in self.catalog:
sauv[elem] = self.catalog[elem]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
try:
if "count" in sauv:
self.catalog["count"] = sauv["count"]
self.catalog["percent"] = (
100 * sauv["count"] / self.parent.shape()[0]
)
for elem in sauv:
if "top" in elem:
if "percent" in elem:
self.catalog[elem] = sauv[elem]
elif elem == None:
self.catalog[elem] = None
elif method == "robust_zscore":
self.catalog[elem] = (sauv[elem] - sauv["approx_50%"]) / (
1.4826 * sauv["mad"]
)
elif method == "zscore":
self.catalog[elem] = (sauv[elem] - sauv["mean"]) / sauv[
"std"
]
elif method == "minmax":
self.catalog[elem] = (sauv[elem] - sauv["min"]) / (
sauv["max"] - sauv["min"]
)
except:
pass
if method == "robust_zscore":
self.catalog["median"] = 0
self.catalog["mad"] = 1 / 1.4826
elif method == "zscore":
self.catalog["mean"] = 0
self.catalog["std"] = 1
elif method == "minmax":
self.catalog["min"] = 0
self.catalog["max"] = 1
self.parent.__add_to_history__(
"[Normalize]: The vColumn '{}' was normalized with the method '{}'.".format(
self.alias, method
)
)
else:
raise TypeError("The vColumn must be numerical for Normalization")
return self.parent
# ---#
def nsmallest(self, n: int = 10):
"""
---------------------------------------------------------------------------
Returns the n smallest elements in the vColumn.
Parameters
----------
n: int, optional
Offset.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].nlargest : Returns the n largest vColumn elements.
"""
check_types([("n", n, [int, float])])
query = "SELECT * FROM {} WHERE {} IS NOT NULL ORDER BY {} ASC LIMIT {}".format(
self.parent.__genSQL__(), self.alias, self.alias, n
)
title = "Reads {} {} smallest elements.".format(n, self.alias)
return to_tablesample(query, title=title)
# ---#
def numh(self, method: str = "auto"):
"""
---------------------------------------------------------------------------
Computes the optimal vColumn bar width.
Parameters
----------
method: str, optional
Method to use to compute the optimal h.
auto : Combination of Freedman Diaconis and Sturges.
freedman_diaconis : Freedman Diaconis [2 * IQR / n ** (1 / 3)]
sturges : Sturges [CEIL(log2(n)) + 1]
Returns
-------
float
optimal bar width.
"""
check_types(
[("method", method, ["sturges", "freedman_diaconis", "fd", "auto"])]
)
method = method.lower()
if method == "auto":
pre_comp = self.parent.__get_catalog_value__(self.alias, "numh")
if pre_comp != "VERTICAPY_NOT_PRECOMPUTED":
return pre_comp
assert self.isnum() or self.isdate(), ParameterError(
"numh is only available on type numeric|date"
)
if self.isnum():
result = (
self.parent.describe(
method="numerical", columns=[self.alias], unique=False
)
.transpose()
.values[self.alias]
)
count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = (
result[0],
result[3],
result[4],
result[6],
result[7],
)
elif self.isdate():
min_date = self.min()
table = "(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE".format(
min_date, self.alias, self.alias, self.parent.__genSQL__()
)
query = "SELECT COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS Q3, MAX({}) AS max FROM {}".format(
self.alias, self.alias, self.alias, self.alias, self.alias, table
)
result = executeSQL(
query,
title="Different aggregations to compute the optimal h.",
method="fetchrow",
)
count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result
sturges = max(
float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) + 2)),
1e-99,
)
fd = max(2.0 * (vColumn_075 - vColumn_025) / (count) ** (1.0 / 3.0), 1e-99)
if method.lower() == "sturges":
best_h = sturges
elif method.lower() in ("freedman_diaconis", "fd"):
best_h = fd
else:
best_h = max(sturges, fd)
self.parent.__update_catalog__({"index": ["numh"], self.alias: [best_h]})
if self.category() == "int":
best_h = max(math.floor(best_h), 1)
return best_h
# ---#
def nunique(self, approx: bool = True):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'unique' (cardinality).
Parameters
----------
approx: bool, optional
If set to True, the approximate cardinality is returned. By setting
this parameter to False, the function's performance can drastically
decrease.
Returns
-------
int
vColumn cardinality (or approximate cardinality).
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types([("approx", approx, [bool])])
if approx:
return self.aggregate(func=["approx_unique"]).values[self.alias][0]
else:
return self.aggregate(func=["unique"]).values[self.alias][0]
# ---#
def pie(
self,
method: str = "density",
of: str = "",
max_cardinality: int = 6,
h: float = 0,
pie_type: str = "auto",
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the pie chart of the vColumn based on an aggregation.
Parameters
----------
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
max_cardinality: int, optional
Maximum number of the vColumn distinct elements to be used as categorical
(No h will be picked or computed)
h: float, optional
Interval width of the bar. If empty, an optimized h will be computed.
pie_type: str, optional
The type of pie chart.
auto : Regular pie chart.
donut : Donut chart.
rose : Rose chart.
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.donut : Draws the donut chart of the vColumn based on an aggregation.
"""
if isinstance(pie_type, str):
pie_type = pie_type.lower()
check_types(
[
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [int, float]),
("h", h, [int, float]),
("pie_type", pie_type, ["auto", "donut", "rose"]),
]
)
donut = True if pie_type == "donut" else False
rose = True if pie_type == "rose" else False
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import pie
return pie(
self, method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds,
)
# ---#
def plot(
self,
ts: str,
by: str = "",
start_date: Union[str, datetime.datetime, datetime.date] = "",
end_date: Union[str, datetime.datetime, datetime.date] = "",
area: bool = False,
step: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Time Series of the vColumn.
Parameters
----------
ts: str
TS (Time Series) vColumn to use to order the data. The vColumn type must be
date like (date, datetime, timestamp...) or numerical.
by: str, optional
vColumn to use to partition the TS.
start_date: str / date, optional
Input Start Date. For example, time = '03-11-1993' will filter the data when
'ts' is lesser than November 1993 the 3rd.
end_date: str / date, optional
Input End Date. For example, time = '03-11-1993' will filter the data when
'ts' is greater than November 1993 the 3rd.
area: bool, optional
If set to True, draw an Area Plot.
step: bool, optional
If set to True, draw a Step Plot.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.plot : Draws the time series.
"""
check_types(
[
("ts", ts, [str]),
("by", by, [str]),
("start_date", start_date, [str, datetime.datetime, datetime.date]),
("end_date", end_date, [str, datetime.datetime, datetime.date]),
("area", area, [bool]),
("step", step, [bool]),
]
)
self.parent.are_namecols_in(ts)
ts = self.parent.format_colnames(ts)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
from verticapy.plot import ts_plot
return ts_plot(
self, ts, by, start_date, end_date, area, step, ax=ax, **style_kwds,
)
# ---#
def product(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'product'.
Returns
-------
float
product
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(func=["prod"]).values[self.alias][0]
prod = product
# ---#
def quantile(self, x: float, approx: bool = True):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using an input 'quantile'.
Parameters
----------
x: float
A float between 0 and 1 that represents the quantile.
For example: 0.25 represents Q1.
approx: bool, optional
If set to True, the approximate quantile is returned. By setting this
parameter to False, the function's performance can drastically decrease.
Returns
-------
float
quantile (or approximate quantile).
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types([("x", x, [int, float], ("approx", approx, [bool]))])
prefix = "approx_" if approx else ""
return self.aggregate(func=[prefix + "{}%".format(x * 100)]).values[self.alias][
0
]
# ---#
def range_plot(
self,
ts: str,
q: tuple = (0.25, 0.75),
start_date: Union[str, datetime.datetime, datetime.date] = "",
end_date: Union[str, datetime.datetime, datetime.date] = "",
plot_median: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the range plot of the vColumn. The aggregations used are the median
and two input quantiles.
Parameters
----------
ts: str
TS (Time Series) vColumn to use to order the data. The vColumn type must be
date like (date, datetime, timestamp...) or numerical.
q: tuple, optional
Tuple including the 2 quantiles used to draw the Plot.
start_date: str / date, optional
Input Start Date. For example, time = '03-11-1993' will filter the data when
'ts' is lesser than November 1993 the 3rd.
end_date: str / date, optional
Input End Date. For example, time = '03-11-1993' will filter the data when
'ts' is greater than November 1993 the 3rd.
plot_median: bool, optional
If set to True, the Median will be drawn.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.plot : Draws the time series.
"""
check_types(
[
("ts", ts, [str]),
("q", q, [tuple]),
(
"start_date",
start_date,
[str, datetime.datetime, datetime.date, int, float],
),
(
"end_date",
end_date,
[str, datetime.datetime, datetime.date, int, float],
),
("plot_median", plot_median, [bool]),
]
)
self.parent.are_namecols_in(ts)
ts = self.parent.format_colnames(ts)
from verticapy.plot import range_curve_vdf
return range_curve_vdf(
self, ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds,
)
# ---#
def rename(self, new_name: str):
"""
---------------------------------------------------------------------------
Renames the vColumn by dropping the current vColumn and creating a copy with
the specified name.
\u26A0 Warning : SQL code generation will be slower if the vDataFrame has been
transformed multiple times, so it's better practice to use
this method when first preparing your data.
Parameters
----------
new_name: str
The new vColumn alias.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.add_copy : Creates a copy of the vColumn.
"""
check_types([("new_name", new_name, [str])])
old_name = quote_ident(self.alias)
new_name = new_name.replace('"', "")
assert not (self.parent.is_colname_in(new_name)), NameError(
f"A vColumn has already the alias {new_name}.\nBy changing the parameter 'new_name', you'll be able to solve this issue."
)
self.add_copy(new_name)
parent = self.drop(add_history=False)
parent.__add_to_history__(
"[Rename]: The vColumn {} was renamed '{}'.".format(old_name, new_name)
)
return parent
# ---#
def round(self, n: int):
"""
---------------------------------------------------------------------------
Rounds the vColumn by keeping only the input number of digits after the comma.
Parameters
----------
n: int
Number of digits to keep after the comma.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("n", n, [int, float])])
return self.apply(func="ROUND({}, {})".format("{}", n))
# ---#
def sem(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'sem' (standard error of mean).
Returns
-------
float
sem
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["sem"]).values[self.alias][0]
# ---#
def skewness(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'skewness'.
Returns
-------
float
skewness
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["skewness"]).values[self.alias][0]
skew = skewness
# ---#
def slice(self, length: int, unit: str = "second", start: bool = True):
"""
---------------------------------------------------------------------------
Slices and transforms the vColumn using a time series rule.
Parameters
----------
length: int
Slice size.
unit: str, optional
Slice size unit. For example, it can be 'minute' 'hour'...
start: bool, optional
If set to True, the record will be sliced using the floor of the slicing
instead of the ceiling.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].date_part : Extracts a specific TS field from the vColumn.
"""
check_types(
[
("length", length, [int, float]),
("unit", unit, [str]),
("start", start, [bool]),
]
)
start_or_end = "START" if (start) else "END"
return self.apply(
func="TIME_SLICE({}, {}, '{}', '{}')".format(
"{}", length, unit.upper(), start_or_end
)
)
# ---#
def spider(
self,
by: str = "",
method: str = "density",
of: str = "",
max_cardinality: Union[int, tuple] = (6, 6),
h: Union[int, float, tuple] = (None, None),
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the spider plot of the input vColumn based on an aggregation.
Parameters
----------
by: str, optional
vColumn to use to partition the data.
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
h: int/float/tuple, optional
Interval width of the vColumns 1 and 2 bars. It is only valid if the
vColumns are numerical. Optimized h will be computed if the parameter
is empty or invalid.
max_cardinality: int/tuple, optional
Maximum number of distinct elements for vColumns 1 and 2 to be used as
categorical (No h will be picked or computed)
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.bar : Draws the Bar Chart of the input vColumns based on an aggregation.
"""
check_types(
[
("by", by, [str]),
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [list]),
("h", h, [list, float, int]),
]
)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
columns = [self.alias, by]
else:
columns = [self.alias]
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import spider as spider_plot
return spider_plot(
self.parent, columns, method, of, max_cardinality, h, ax=ax, **style_kwds,
)
# ---#
def std(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'std' (Standard Deviation).
Returns
-------
float
std
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["stddev"]).values[self.alias][0]
stddev = std
# ---#
def store_usage(self):
"""
---------------------------------------------------------------------------
Returns the vColumn expected store usage (unit: b).
Returns
-------
int
vColumn expected store usage.
See Also
--------
vDataFrame.expected_store_usage : Returns the vDataFrame expected store usage.
"""
pre_comp = self.parent.__get_catalog_value__(self.alias, "store_usage")
if pre_comp != "VERTICAPY_NOT_PRECOMPUTED":
return pre_comp
store_usage = executeSQL(
"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.parent.__genSQL__(),
),
title="Computing the Store Usage of the vColumn {}.".format(self.alias),
method="fetchfirstelem",
)
self.parent.__update_catalog__(
{"index": ["store_usage"], self.alias: [store_usage]}
)
return store_usage
# ---#
def str_contains(self, pat: str):
"""
---------------------------------------------------------------------------
Verifies if the regular expression is in each of the vColumn records.
The vColumn will be transformed.
Parameters
----------
pat: str
Regular expression.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("pat", pat, [str])])
return self.apply(
func="REGEXP_COUNT({}, '{}') > 0".format("{}", pat.replace("'", "''"))
)
# ---#
def str_count(self, pat: str):
"""
---------------------------------------------------------------------------
Computes the number of matches for the regular expression in each record of
the vColumn. The vColumn will be transformed.
Parameters
----------
pat: str
regular expression.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("pat", pat, [str])])
return self.apply(
func="REGEXP_COUNT({}, '{}')".format("{}", pat.replace("'", "''"))
)
# ---#
def str_extract(self, pat: str):
"""
---------------------------------------------------------------------------
Extracts the regular expression in each record of the vColumn.
The vColumn will be transformed.
Parameters
----------
pat: str
regular expression.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("pat", pat, [str])])
return self.apply(
func="REGEXP_SUBSTR({}, '{}')".format("{}", pat.replace("'", "''"))
)
# ---#
def str_replace(self, to_replace: str, value: str = ""):
"""
---------------------------------------------------------------------------
Replaces the regular expression matches in each of the vColumn record by an
input value. The vColumn will be transformed.
Parameters
----------
to_replace: str
Regular expression to replace.
value: str, optional
New value.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("to_replace", to_replace, [str]), ("value", value, [str])])
return self.apply(
func="REGEXP_REPLACE({}, '{}', '{}')".format(
"{}", to_replace.replace("'", "''"), value.replace("'", "''")
)
)
# ---#
def str_slice(self, start: int, step: int):
"""
---------------------------------------------------------------------------
Slices the vColumn. The vColumn will be transformed.
Parameters
----------
start: int
Start of the slicing.
step: int
Size of the slicing.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
"""
check_types([("start", start, [int, float]), ("step", step, [int, float])])
return self.apply(func="SUBSTR({}, {}, {})".format("{}", start, step))
# ---#
def sub(self, x: float):
"""
---------------------------------------------------------------------------
Subtracts the input element from the vColumn.
Parameters
----------
x: float
If the vColumn type is date like (date, datetime ...), the parameter 'x'
will represent the number of seconds, otherwise it will represent a number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
if self.isdate():
return self.apply(func="TIMESTAMPADD(SECOND, -({}), {})".format(x, "{}"))
else:
return self.apply(func="{} - ({})".format("{}", x))
# ---#
def sum(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'sum'.
Returns
-------
float
sum
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["sum"]).values[self.alias][0]
# ---#
def tail(self, limit: int = 5):
"""
---------------------------------------------------------------------------
Returns the tail of the vColumn.
Parameters
----------
limit: int, optional
Number of elements to display.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].head : Returns the head of the vColumn.
"""
return self.iloc(limit=limit, offset=-1)
# ---#
def topk(self, k: int = -1, dropna: bool = True):
"""
---------------------------------------------------------------------------
Returns the k most occurent elements and their distributions as percents.
Parameters
----------
k: int, optional
Number of most occurent elements to return.
dropna: bool, optional
If set to True, NULL values will not be considered during the computation.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].describe : Computes the vColumn descriptive statistics.
"""
check_types([("k", k, [int, float]), ("dropna", dropna, [bool])])
topk = "" if (k < 1) else "LIMIT {}".format(k)
dropna = " WHERE {} IS NOT NULL".format(self.alias) if (dropna) else ""
query = "SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) / {} AS percent FROM {}{} GROUP BY {} ORDER BY _verticapy_cnt_ DESC {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.alias,
self.parent.shape()[0],
self.parent.__genSQL__(),
dropna,
self.alias,
topk,
)
result = executeSQL(
query,
title="Computing the top{} categories of {}.".format(
k if k > 0 else "", self.alias
),
method="fetchall",
)
values = {
"index": [item[0] for item in result],
"count": [int(item[1]) for item in result],
"percent": [float(round(item[2], 3)) for item in result],
}
return tablesample(values)
# ---#
def value_counts(self, k: int = 30):
"""
---------------------------------------------------------------------------
Returns the k most occurent elements, how often they occur, and other
statistical information.
Parameters
----------
k: int, optional
Number of most occurent elements to return.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].describe : Computes the vColumn descriptive statistics.
"""
return self.describe(method="categorical", max_cardinality=k)
# ---#
def var(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'var' (Variance).
Returns
-------
float
var
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["variance"]).values[self.alias][0]
variance = var
| 1.890625 | 2 |
booktags/flaskapp/book/views.py | MagicSword/Booktags | 0 | 2498 | <reponame>MagicSword/Booktags
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
example.py
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: 2019 Miller
:license: BSD-3-Clause
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
from flask import render_template, redirect, request, url_for, flash,jsonify,current_app
from flask_login import login_user, logout_user, login_required, current_user
from . import book
from flask_sqlalchemy import get_debug_queries
from sqlalchemy.sql.expression import cast
from datatables import ColumnDT, DataTables
from .. import auth
from .. import db
from .forms import EditBookForm, HackmdMeta
# from booktags.db.basemodels import Book
from booktags.flaskapp.model.models import BookMain
# --------------------------------------------------------- common routines
@book.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@book.route('/', methods=['GET', 'POST'])
def index():
# books=BookMain.get_all_book()
query = BookMain.query
page = request.args.get('page', 1, type=int)
pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate(
page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'],
error_out=False)
books = pagination.items
return render_template('book/index.html',books=books,pagination=pagination)
# @book.route('/list/', methods=['GET', 'POST'])
# def list_book():
# """
#
# :param field: col name
# :param order: asc or desc
# :return: renew query
# """
# books = BookMain.get_all_book()
# return render_template('book/list_book.html',books=books)
@book.route("/list")
def list_book():
"""List users with DataTables <= 1.10.x."""
return render_template('book/list_book.html')
@book.route('/data', methods=['GET', 'POST'])
def data():
"""Return server side data."""
# defining columns
# - explicitly cast date to string, so string searching the date
# will search a date formatted equal to how it is presented
# in the table
columns = [
# ColumnDT(cast(BookMain.id, db.Integer)),
ColumnDT(BookMain.id),
ColumnDT(BookMain.isbn),
ColumnDT(BookMain.title_short),
ColumnDT(BookMain.title),
ColumnDT(BookMain.catalogue),
ColumnDT(BookMain.cutter),
ColumnDT(BookMain.pub_year),
ColumnDT(BookMain.copy_info)
# ColumnDT(BookMain.get_link),
# ColumnDT(BookMain.note),
# ColumnDT(BookMain.reprint),
# ColumnDT(BookMain.removed),
# ColumnDT(BookMain.keepsite)
]
# defining the initial query depending on your purpose
query = db.session.query().select_from(BookMain)
# GET parameters
params = request.args.to_dict()
# instantiating a DataTable for the query and table needed
rowTable = DataTables(params, query, columns)
# returns what is needed by DataTable
return jsonify(rowTable.output_result())
@book.route('/get/<int:id>', methods=['GET', 'POST'])
def get_book():
return f"Hello book index : {id}"
@book.route('/post/', methods=['GET', 'POST'])
def post_book():
"""
post new book entry
:return:
"""
book = BookMain.query.all()
id = int(book[-1].id) + 1
print(f"id is : {id}")
form = EditBookForm()
if form.validate_on_submit():
book.id = form.id.data
book.isbn = form.isbn.data
book.title_short = form.title_short.data
book.title = form.title.data
book.catalogue = form.catalogue.data
book.cutter = form.cutter.data
book.pub_year = form.pub_year.data
book.copy_info = form.copy_info.data
book.get_link = form.get_link.data
book.note = form.note.data
book.reprint = form.reprint.data
book.removed = form.removed.data
book.keepsite = form.keepsite.data
db.session.add(book)
db.session.commit()
flash('Your book data has been added.', 'success')
return redirect(url_for('book.index'))
form.id.data = id
return render_template('book/edit_book.html', form=form)
@book.route('/edit/<int:id>', methods=['GET', 'POST'])
def edit_book(id):
"""
edit , put book data
:param id:
:return:
"""
form = EditBookForm()
book = BookMain.query.filter_by(id=id).first_or_404()
if form.validate_on_submit():
# book.id = form.id.data
book.isbn = form.isbn.data
book.title_short = form.title_short.data
book.title = form.title.data
book.catalogue = form.catalogue.data
book.cutter = form.cutter.data
book.pub_year = form.pub_year.data
book.copy_info = form.copy_info.data
book.get_link = form.get_link.data
book.note = form.note.data
book.reprint = form.reprint.data
book.removed = form.removed.data
book.keepsite = form.keepsite.data
db.session.add(book)
db.session.commit()
flash('Your book data has been updated.', 'success')
return redirect(url_for('book.index'))
form.id.data = book.id
form.isbn.data = book.isbn
form.title_short.data = book.title_short
form.title.data = book.title
form.catalogue.data = book.catalogue
form.cutter.data = book.cutter
form.pub_year.data = book.pub_year
form.copy_info.data = book.copy_info
form.get_link.data = book.get_link
form.note.data = book.note
form.reprint.data = book.reprint
form.removed.data = book.removed
form.keepsite.data = book.keepsite
return render_template('book/edit_book.html', form=form)
@book.route('/del/<int:id>', methods=['GET', 'POST'])
def del_book(id):
return f"Hello book index: del {id}"
@book.route('/hackmdmeta', methods=['GET', 'POST'])
def hackmd_meta():
"""
:return:
"""
from booktags.vendor.hackmd_meta import get_hackmdmeta
form = HackmdMeta()
if form.validate_on_submit():
booksn = str(form.booksn.data)
# print(f"booksn is : {booksn}")
temp = get_hackmdmeta(booksn)
# print(temp)
form.body.data = temp
# flash('Your book data has been updated.', 'success')
# return redirect(url_for('book.hackmd_meta'))
return render_template('book/hackmd_meta.html',form=form)
if __name__ == '__main__':
pass
| 2.125 | 2 |
narwhallet/core/kws/http/enumerations/mediatypes.py | Snider/narwhallet | 3 | 2499 | from enum import Enum
class content_type(Enum):
# https://www.iana.org/assignments/media-types/media-types.xhtml
css = 'text/css'
gif = 'image/gif'
htm = 'text/html'
html = 'text/html'
ico = 'image/bmp'
jpg = 'image/jpeg'
jpeg = 'image/jpeg'
js = 'application/javascript'
png = 'image/png'
txt = 'text/plain; charset=us-ascii'
json = 'application/json'
svg = 'image/svg+xml'
| 2.6875 | 3 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.