repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
andyh616/mne-python | mne/tests/test_epochs.py | 1 | 71695 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (io, Epochs, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds)
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
def test_reject():
"""Test epochs rejection
"""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad_epochs(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad_epochs,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad_epochs(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
def test_decim():
"""Test epochs decimation
"""
# First with EpochsArray
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = np.random.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.decimate(decim, copy=True).get_data()
data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs, data_epochs_2)
# Now let's do it with some real data
raw, events, picks = _get_data()
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=False)
assert_raises(ValueError, epochs.decimate, -1)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload)
epochs.preload_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
np.random.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad_epochs()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
epochs_no_bl.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 1)
# decim without lowpass
lowpass = raw.info['lowpass']
raw.info['lowpass'] = None
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 2)
raw.info['lowpass'] = lowpass
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# test IO
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_no_bl_read.apply_baseline(baseline)
assert_true(epochs_no_bl_read.baseline == baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(), epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname)
assert_true(len(w) == 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax, picks=picks,
on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.preload_data()
epochs.crop(0, 0, copy=False)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = io.Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = evoked1 + evoked2
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
evoked_diff = evoked1 - evoked1
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad_epochs()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad_epochs()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
assert_true(epochs_resampled is epochs)
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_raises(Exception, caught == 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
epochs.add_proj(projs, remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
# delayed without reject params
assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='delayed', reject=None)
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, reject=reject)
epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload,
add_eeg_ref=True, reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data()[0],
epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs2.get_data(), epochs2.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs2._projector)
assert_allclose(epochs._data, epochs2.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection
"""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for preload in (True, False):
for proj in (True, False, 'delayed'):
for decim in (1, 3):
for ii in range(2):
epochs = Epochs(raw, events, event_id, tmin, tmax,
picks=picks, proj=proj, reject=reject,
preload=preload, decim=decim)
if ii == 1:
epochs.preload_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
if proj is True:
evoked.apply_proj()
epochs_data = epochs.get_data().mean(axis=0)[picks_data]
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
assert_allclose(evoked.data, epochs_data,
rtol=1e-5, atol=1e-15)
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
assert_raises(IndexError, epochs.drop_epochs, [-1])
assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop_epochs([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data()[:2]
tests = [(('mag', False), ('grad', 'eeg')),
(('grad', False), ('mag', 'eeg')),
((False, True), ('grad', 'mag'))]
for (meg, eeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
test = 'eeg' if eeg is True else meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info['ch_names'].pop(0)
epochs_meg.info['nchan'] -= 1
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0., baseline=None)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
tmin=-0.2, baseline=(None, 0))
def test_concatenate_epochs():
"""Test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
def test_add_channels():
"""Test epoch splitting / re-appending channel types
"""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
epoch_meg = epoch.pick_types(meg=True, copy=True)
epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
run_tests_if_main()
| bsd-3-clause | -4,107,061,049,105,148,000 | 40.08596 | 79 | 0.604324 | false |
dkrisman/Traipse | mercurial/portable_hgweb/server.py | 1 | 10801 | # hgweb/server.py - The standalone hg web server.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <[email protected]>
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2, incorporated herein by reference.
import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
from upmana.mercurial import hg, util, error
from hgweb_mod import hgweb
from hgwebdir_mod import hgwebdir
from upmana.mercurial.i18n import _
def _splitURI(uri):
""" Return path and query splited from uri
Just like CGI environment, the path is unquoted, the query is
not.
"""
if '?' in uri:
path, query = uri.split('?', 1)
else:
path, query = uri, ''
return urllib.unquote(path), query
class _error_logger(object):
def __init__(self, handler):
self.handler = handler
def flush(self):
pass
def write(self, str):
self.writelines(str.split('\n'))
def writelines(self, seq):
for msg in seq:
self.handler.log_error("HG error: %s", msg)
class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
url_scheme = 'http'
def __init__(self, *args, **kargs):
self.protocol_version = 'HTTP/1.1'
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
def _log_any(self, fp, format, *args):
fp.write("%s - - [%s] %s\n" % (self.client_address[0],
self.log_date_time_string(),
format % args))
fp.flush()
def log_error(self, format, *args):
self._log_any(self.server.errorlog, format, *args)
def log_message(self, format, *args):
self._log_any(self.server.accesslog, format, *args)
def do_write(self):
try:
self.do_hgweb()
except socket.error, inst:
if inst[0] != errno.EPIPE:
raise
def do_POST(self):
try:
self.do_write()
except StandardError:
self._start_response("500 Internal Server Error", [])
self._write("Internal Server Error")
tb = "".join(traceback.format_exception(*sys.exc_info()))
self.log_error("Exception happened during processing "
"request '%s':\n%s", self.path, tb)
def do_GET(self):
self.do_POST()
def do_hgweb(self):
path, query = _splitURI(self.path)
env = {}
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['REQUEST_METHOD'] = self.command
env['SERVER_NAME'] = self.server.server_name
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_URI'] = self.path
env['SCRIPT_NAME'] = self.server.prefix
env['PATH_INFO'] = path[len(self.server.prefix):]
env['REMOTE_HOST'] = self.client_address[0]
env['REMOTE_ADDR'] = self.client_address[0]
if query:
env['QUERY_STRING'] = query
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
for header in [h for h in self.headers.keys()
if h not in ('content-type', 'content-length')]:
hkey = 'HTTP_' + header.replace('-', '_').upper()
hval = self.headers.getheader(header)
hval = hval.replace('\n', '').strip()
if hval:
env[hkey] = hval
env['SERVER_PROTOCOL'] = self.request_version
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = self.url_scheme
env['wsgi.input'] = self.rfile
env['wsgi.errors'] = _error_logger(self)
env['wsgi.multithread'] = isinstance(self.server,
SocketServer.ThreadingMixIn)
env['wsgi.multiprocess'] = isinstance(self.server,
SocketServer.ForkingMixIn)
env['wsgi.run_once'] = 0
self.close_connection = True
self.saved_status = None
self.saved_headers = []
self.sent_headers = False
self.length = None
for chunk in self.server.application(env, self._start_response):
self._write(chunk)
def send_headers(self):
if not self.saved_status:
raise AssertionError("Sending headers before "
"start_response() called")
saved_status = self.saved_status.split(None, 1)
saved_status[0] = int(saved_status[0])
self.send_response(*saved_status)
should_close = True
for h in self.saved_headers:
self.send_header(*h)
if h[0].lower() == 'content-length':
should_close = False
self.length = int(h[1])
# The value of the Connection header is a list of case-insensitive
# tokens separated by commas and optional whitespace.
if 'close' in [token.strip().lower() for token in
self.headers.get('connection', '').split(',')]:
should_close = True
if should_close:
self.send_header('Connection', 'close')
self.close_connection = should_close
self.end_headers()
self.sent_headers = True
def _start_response(self, http_status, headers, exc_info=None):
code, msg = http_status.split(None, 1)
code = int(code)
self.saved_status = http_status
bad_headers = ('connection', 'transfer-encoding')
self.saved_headers = [h for h in headers
if h[0].lower() not in bad_headers]
return self._write
def _write(self, data):
if not self.saved_status:
raise AssertionError("data written before start_response() called")
elif not self.sent_headers:
self.send_headers()
if self.length is not None:
if len(data) > self.length:
raise AssertionError("Content-length header sent, but more "
"bytes than specified are being written.")
self.length = self.length - len(data)
self.wfile.write(data)
self.wfile.flush()
class _shgwebhandler(_hgwebhandler):
url_scheme = 'https'
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def do_write(self):
from OpenSSL.SSL import SysCallError
try:
super(_shgwebhandler, self).do_write()
except SysCallError, inst:
if inst.args[0] != errno.EPIPE:
raise
def handle_one_request(self):
from OpenSSL.SSL import SysCallError, ZeroReturnError
try:
super(_shgwebhandler, self).handle_one_request()
except (SysCallError, ZeroReturnError):
self.close_connection = True
pass
def create_server(ui, repo):
use_threads = True
def openlog(opt, default):
if opt and opt != '-':
return open(opt, 'a')
return default
if repo is None:
myui = ui
else:
myui = repo.ui
address = myui.config("web", "address", "")
port = int(myui.config("web", "port", 8000))
prefix = myui.config("web", "prefix", "")
if prefix:
prefix = "/" + prefix.strip("/")
use_ipv6 = myui.configbool("web", "ipv6")
webdir_conf = myui.config("web", "webdir_conf")
ssl_cert = myui.config("web", "certificate")
accesslog = openlog(myui.config("web", "accesslog", "-"), sys.stdout)
errorlog = openlog(myui.config("web", "errorlog", "-"), sys.stderr)
if use_threads:
try:
from threading import activeCount
except ImportError:
use_threads = False
if use_threads:
_mixin = SocketServer.ThreadingMixIn
else:
if hasattr(os, "fork"):
_mixin = SocketServer.ForkingMixIn
else:
class _mixin:
pass
class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
# SO_REUSEADDR has broken semantics on windows
if os.name == 'nt':
allow_reuse_address = 0
def __init__(self, *args, **kargs):
BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
self.accesslog = accesslog
self.errorlog = errorlog
self.daemon_threads = True
def make_handler():
if webdir_conf:
hgwebobj = hgwebdir(webdir_conf, ui)
elif repo is not None:
hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
else:
raise error.RepoError(_("There is no Mercurial repository"
" here (.hg not found)"))
return hgwebobj
self.application = make_handler()
if ssl_cert:
try:
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
except ImportError:
raise util.Abort(_("SSL support is unavailable"))
ctx.use_privatekey_file(ssl_cert)
ctx.use_certificate_file(ssl_cert)
sock = socket.socket(self.address_family, self.socket_type)
self.socket = SSL.Connection(ctx, sock)
self.server_bind()
self.server_activate()
self.addr, self.port = self.socket.getsockname()[0:2]
self.prefix = prefix
self.fqaddr = socket.getfqdn(address)
class IPv6HTTPServer(MercurialHTTPServer):
address_family = getattr(socket, 'AF_INET6', None)
def __init__(self, *args, **kwargs):
if self.address_family is None:
raise error.RepoError(_('IPv6 is not available on this system'))
super(IPv6HTTPServer, self).__init__(*args, **kwargs)
if ssl_cert:
handler = _shgwebhandler
else:
handler = _hgwebhandler
# ugly hack due to python issue5853 (for threaded use)
import mimetypes; mimetypes.init()
try:
if use_ipv6:
return IPv6HTTPServer((address, port), handler)
else:
return MercurialHTTPServer((address, port), handler)
except socket.error, inst:
raise util.Abort(_("cannot start server at '%s:%d': %s")
% (address, port, inst.args[1]))
| gpl-2.0 | -7,759,598,077,244,585,000 | 35.244966 | 80 | 0.560411 | false |
UITools/saleor | saleor/shipping/migrations/0013_auto_20180822_0721.py | 1 | 4293 | # Generated by Django 2.0.3 on 2018-08-22 12:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
import django_measurement.models
import django_prices.models
import saleor.core.weight
class Migration(migrations.Migration):
dependencies = [
('checkout', '0010_auto_20180822_0720'),
('order', '0052_auto_20180822_0720'),
('shipping', '0012_remove_legacy_shipping_methods'),
]
operations = [
migrations.CreateModel(
name='ShippingMethodTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(max_length=10)),
('name', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='ShippingZone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('countries', django_countries.fields.CountryField(max_length=749, multiple=True)),
],
options={
'permissions': (('manage_shipping', 'Manage shipping.'),),
},
),
migrations.AlterUniqueTogether(
name='shippingmethodcountry',
unique_together=set(),
),
migrations.RemoveField(
model_name='shippingmethodcountry',
name='shipping_method',
),
migrations.AlterModelOptions(
name='shippingmethod',
options={},
),
migrations.RemoveField(
model_name='shippingmethod',
name='description',
),
migrations.AddField(
model_name='shippingmethod',
name='maximum_order_price',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, max_digits=12, null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='maximum_order_weight',
field=django_measurement.models.MeasurementField(blank=True, measurement_class='Mass', null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='minimum_order_price',
field=django_prices.models.MoneyField(blank=True, currency=settings.DEFAULT_CURRENCY, decimal_places=2, default=0, max_digits=12, null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='minimum_order_weight',
field=django_measurement.models.MeasurementField(blank=True, default=saleor.core.weight.zero_weight, measurement_class='Mass', null=True),
),
migrations.AddField(
model_name='shippingmethod',
name='price',
field=django_prices.models.MoneyField(currency=settings.DEFAULT_CURRENCY, decimal_places=2, default=0, max_digits=12),
),
migrations.AddField(
model_name='shippingmethod',
name='type',
field=models.CharField(choices=[('price', 'Price based shipping'), ('weight', 'Weight based shipping')], default=None, max_length=30),
preserve_default=False,
),
migrations.DeleteModel(
name='ShippingMethodCountry',
),
migrations.AddField(
model_name='shippingmethodtranslation',
name='shipping_method',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='shipping.ShippingMethod'),
),
migrations.AddField(
model_name='shippingmethod',
name='shipping_zone',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='shipping_methods', to='shipping.ShippingZone'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='shippingmethodtranslation',
unique_together={('language_code', 'shipping_method')},
),
]
| bsd-3-clause | 1,969,370,652,778,655,500 | 39.885714 | 156 | 0.603075 | false |
RyanSkraba/beam | sdks/python/apache_beam/io/gcp/datastore_write_it_pipeline.py | 1 | 7435 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A job that write Entries into Datastore.
The pipelines behave in the steps below.
1. Create and write Entities to Datastore
2. (Optional) If read limit was provided,
read it and confirm that the expected Entities were read.
3. Query the written Entities and verify result.
4. Delete Entries.
5. Query the written Entities, verify no results.
"""
from __future__ import absolute_import
import argparse
import hashlib
import logging
import uuid
import apache_beam as beam
from apache_beam.io.gcp.datastore.v1.datastoreio import DeleteFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
# Protect against environments where Datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from googledatastore import helper as datastore_helper
from googledatastore import PropertyFilter
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
# pylint: enable=ungrouped-imports
_LOGGER = logging.getLogger(__name__)
def new_pipeline_with_job_name(pipeline_options, job_name, suffix):
"""Create a pipeline with the given job_name and a suffix."""
gcp_options = pipeline_options.view_as(GoogleCloudOptions)
# DirectRunner doesn't have a job name.
if job_name:
gcp_options.job_name = job_name + suffix
return TestPipeline(options=pipeline_options)
class EntityWrapper(object):
"""Create a Cloud Datastore entity from the given string."""
def __init__(self, kind, namespace, ancestor):
self._kind = kind
self._namespace = namespace
self._ancestor = ancestor
def make_entity(self, content):
"""Create entity from given string."""
entity = entity_pb2.Entity()
if self._namespace is not None:
entity.key.partition_id.namespace_id = self._namespace
# All entities created will have the same ancestor
datastore_helper.add_key_path(entity.key, self._kind, self._ancestor,
self._kind, hashlib.sha1(content).hexdigest())
datastore_helper.add_properties(entity, {'content': str(content)})
return entity
def make_ancestor_query(kind, namespace, ancestor):
"""Creates a Cloud Datastore ancestor query."""
ancestor_key = entity_pb2.Key()
datastore_helper.add_key_path(ancestor_key, kind, ancestor)
if namespace is not None:
ancestor_key.partition_id.namespace_id = namespace
query = query_pb2.Query()
query.kind.add().name = kind
datastore_helper.set_property_filter(
query.filter, '__key__', PropertyFilter.HAS_ANCESTOR, ancestor_key)
return query
def run(argv=None):
"""Main entry point."""
parser = argparse.ArgumentParser()
parser.add_argument('--kind',
dest='kind',
default='writereadtest',
help='Datastore Kind')
parser.add_argument('--num_entities',
dest='num_entities',
type=int,
required=True,
help='Number of entities to write')
parser.add_argument('--limit',
dest='limit',
type=int,
help='Limit of number of entities to write')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
gcloud_options = pipeline_options.view_as(GoogleCloudOptions)
job_name = gcloud_options.job_name
kind = known_args.kind
num_entities = known_args.num_entities
project = gcloud_options.project
# a random ancesor key
ancestor = str(uuid.uuid4())
query = make_ancestor_query(kind, None, ancestor)
# Pipeline 1: Create and write the specified number of Entities to the
# Cloud Datastore.
_LOGGER.info('Writing %s entities to %s', num_entities, project)
p = new_pipeline_with_job_name(pipeline_options, job_name, '-write')
# pylint: disable=expression-not-assigned
(p
| 'Input' >> beam.Create(list(range(known_args.num_entities)))
| 'To String' >> beam.Map(str)
| 'To Entity' >> beam.Map(EntityWrapper(kind, None, ancestor).make_entity)
| 'Write to Datastore' >> WriteToDatastore(project))
p.run()
# Optional Pipeline 2: If a read limit was provided, read it and confirm
# that the expected entities were read.
if known_args.limit is not None:
_LOGGER.info('Querying a limited set of %s entities and verifying count.',
known_args.limit)
p = new_pipeline_with_job_name(pipeline_options, job_name, '-verify-limit')
query_with_limit = query_pb2.Query()
query_with_limit.CopyFrom(query)
query_with_limit.limit.value = known_args.limit
entities = p | 'read from datastore' >> ReadFromDatastore(project,
query_with_limit)
assert_that(
entities | beam.combiners.Count.Globally(),
equal_to([known_args.limit]))
p.run()
# Pipeline 3: Query the written Entities and verify result.
_LOGGER.info('Querying entities, asserting they match.')
p = new_pipeline_with_job_name(pipeline_options, job_name, '-verify')
entities = p | 'read from datastore' >> ReadFromDatastore(project, query)
assert_that(
entities | beam.combiners.Count.Globally(),
equal_to([num_entities]))
p.run()
# Pipeline 4: Delete Entities.
_LOGGER.info('Deleting entities.')
p = new_pipeline_with_job_name(pipeline_options, job_name, '-delete')
entities = p | 'read from datastore' >> ReadFromDatastore(project, query)
# pylint: disable=expression-not-assigned
(entities
| 'To Keys' >> beam.Map(lambda entity: entity.key)
| 'Delete keys' >> DeleteFromDatastore(project))
p.run()
# Pipeline 5: Query the written Entities, verify no results.
_LOGGER.info('Querying for the entities to make sure there are none present.')
p = new_pipeline_with_job_name(pipeline_options, job_name, '-verify-deleted')
entities = p | 'read from datastore' >> ReadFromDatastore(project, query)
assert_that(
entities | beam.combiners.Count.Globally(),
equal_to([0]))
p.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 | -6,609,520,080,084,072,000 | 34.745192 | 80 | 0.698991 | false |
ijmarshall/cochrane-nlp | quality4.py | 1 | 73371 | from tokenizer import sent_tokenizer, word_tokenizer
import biviewer
import pdb
import re
import progressbar
import collections
import string
from unidecode import unidecode
import codecs
import yaml
from pprint import pprint
import numpy as np
import math
import difflib
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.grid_search import GridSearchCV
from sklearn.feature_extraction import DictVectorizer
from sklearn import cross_validation
from sklearn import metrics
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.externals import six
from collections import defaultdict
from sklearn.metrics import precision_recall_fscore_support
import random
import operator
from sklearn.cross_validation import KFold
from journalreaders import PdfReader
import cPickle as pickle
from sklearn.metrics import f1_score, make_scorer, fbeta_score, accuracy_score
import nltk
from nltk.corpus import stopwords
REGEX_QUOTE_PRESENT = re.compile("Quote\:")
REGEX_QUOTE = re.compile("\"(.*?)\"") # retrive blocks of text in quotes
REGEX_ELLIPSIS = re.compile("\s*[\[\(]?\s?\.\.+\s?[\]\)]?\s*") # to catch various permetations of "..." and "[...]"
SIMPLE_WORD_TOKENIZER = re.compile("[a-zA-Z]{2,}") # regex of the rule used by sklearn CountVectorizer
CORE_DOMAINS = ["Random sequence generation", "Allocation concealment", "Blinding of participants and personnel",
"Blinding of outcome assessment", "Incomplete outcome data", "Selective reporting"]
# "OTHER" is generated in code, not in the mapping file
# see data/domain_names.txt for various other criteria
# all of these are available via QualityQuoteReader
ALL_DOMAINS = CORE_DOMAINS[:] # will be added to later
RoB_CLASSES = ["YES", "NO", "UNKNOWN"]
STOP_WORDS = set(stopwords.words('english'))
# @TODO move me
domain_str = lambda d: d.lower().replace(" ", "_")
def show_most_informative_features(vectorizer, clf, n=1000):
###
# note that in the multi-class case, clf.coef_ will
# have k weight vectors, which I believe are one per
# each class (i.e., each is a classifier discriminating
# one class versus the rest).
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
if n == 0:
n = len(c_f)/2
top = zip(c_f[:n], c_f[:-(n+1):-1])
print
print "%d most informative features:" % (n, )
out_str = []
for (c1, f1), (c2, f2) in top:
out_str.append("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
feature_str = "\n".join(out_str)
return feature_str
def show_most_informative_features_ynu(vectorizer, clf, n=10):
###
# note that in the multi-class case, clf.coef_ will
# have k weight vectors, which I believe are one per
# each class (i.e., each is a classifier discriminating
# one class versus the rest).
combinations = ["NO vs (YES + UNKNOWN)", "UNKNOWN vs (YES + NO)", "YES vs (NO + UNKNOWN)"]
out_str = []
for i, combination in enumerate(combinations):
out_str.append(combination)
out_str.append("*" * 20)
c_f = sorted(zip(clf.coef_[i], vectorizer.get_feature_names()))
if n == 0:
n = len(c_f)/2
top = zip(c_f[:n], c_f[:-(n+1):-1])
for (c1, f1), (c2, f2) in top:
out_str.append("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
feature_str = "\n".join(out_str)
return feature_str
def load_domain_map(filename="data/domain_names.txt"):
with codecs.open(filename, 'rb', 'utf-8') as f:
raw_data = yaml.load(f)
mapping = {}
for key, value in raw_data.iteritems():
for synonym in value:
mapping[synonym] = key
return mapping
class QualityQuoteReader2():
"""
iterates through Cochrane Risk of Bias information
v2 maintains unique ids for all source studies + does not filter by whether a quote is present
returns list of quotes where they are available, else none
"""
def __init__(self, sent=False, test_mode=False):
self.BiviewerView = collections.namedtuple('BiViewer_View', ['uid', 'cochrane', 'studypdf'])
self.pdfviewer = biviewer.PDFBiViewer()
self.domain_map = load_domain_map()
if test_mode:
self.test_mode_break_point = 500
else:
self.test_mode_break_point = None
def __iter__(self):
"""
run through PDF/Cochrane data
preprocesses PDF text
and maps domain title to one of the core Risk of Bias domains if possible
"""
progress_bar_limit = len(self.pdfviewer) if self.test_mode_break_point is None else self.test_mode_break_point
p = progressbar.ProgressBar(progress_bar_limit, timer=True)
for uid, study in enumerate(self.pdfviewer):
if self.test_mode_break_point and (uid >= self.test_mode_break_point):
break
p.tap()
quality_data = study.cochrane["QUALITY"]
for domain in quality_data:
domain["QUOTES"] = self.preprocess_cochrane(domain["DESCRIPTION"])
try:
domain["DOMAIN"] = self.domain_map[domain["DOMAIN"]] # map domain titles to our core categories
except:
domain["DOMAIN"] = "OTHER"
yield self.BiviewerView(uid=uid, cochrane={"QUALITY": quality_data}, studypdf=self.preprocess_pdf(study.studypdf))
def __len__(self):
return len(self.pdfviewer) if self.test_mode_break_point is None else self.test_mode_break_point
def preprocess_pdf(self, pdftext):
pdftext = unidecode(pdftext)
pdftext = re.sub("\n", " ", pdftext) # preprocessing rule 1
return pdftext
def preprocess_cochrane(self, rawtext):
# regex clean up of cochrane strings
processedtext = unidecode(rawtext)
processedtext = re.sub(" +", " ", processedtext)
# extract all parts in quotes
quotes = REGEX_QUOTE.findall(processedtext)
# then split at any ellipses
quote_parts = []
for quote in quotes:
quote_parts.extend(REGEX_ELLIPSIS.split(quote))
return quote_parts
class PDFMatcher():
"""
matches and generates sent tokens from pdf text
"""
def __init__(self, quotes=None, pdftext=None):
# load a sequence matcher; turn autojunk off (since buggy for long strings)
self.sequencematcher = difflib.SequenceMatcher(None, autojunk=False)
if quotes:
self.quotes = self.load_quotes(quotes)
if pdftext:
self.pdftext = self.load_pdftext(pdftext)
def load_quotes(self, quotes):
self.quotes = quotes
def load_pdftext(self, pdftext):
self.pdftext = pdftext
self.lenpdf = len(pdftext)
self.sequencematcher.set_seq2(self.pdftext)
self.sent_indices = sent_tokenizer.span_tokenize(self.pdftext)
def _overlap(self, t1, t2):
"""
finds out whether two tuples overlap
"""
t1s, t1e = t1
t2s, t2e = t2
# true if either start of t1 is inside t2, or start of t2 is inside t1
return (t2s <= t1s <= t2e) or (t1s <= t2s <= t1e)
def generate_X(self):
X = []
# go through sentence indices
# make X (list of sentences)
for (start_i, end_i) in self.sent_indices:
X.append(self.pdftext[start_i: end_i])
return X
def generate_y(self, min_char_match=20):
"""
returns X: list of sentence strings
y: numpy vector of 1, -1 (for positive/negative examples)
"""
good_match = False # this will be set to True if sufficent matching characters in
# at least one of the parts of the quotes
match_indices = []
# go through quotes, match using difflib
# and keep any matches which are long enough so likely true matches
for quote in self.quotes:
self.sequencematcher.set_seq1(quote)
best_match = self.sequencematcher.find_longest_match(0, len(quote), 0, self.lenpdf)
# only interested in good quality matches
if best_match.size > min_char_match:
good_match = True
match_indices.append((best_match.b, best_match.b + best_match.size)) # add (start_i, end_i) tuples (of PDF indices)
y = []
if not good_match:
# if quality criteria not met, leave here
# (i.e. return empty lists [], [])
return y
# otherwise continue and generate feature and answer vectors
# get indices of sentences (rather than split)
sent_indices = sent_tokenizer.span_tokenize(self.pdftext)
# go through sentence indices
# make X (list of sentences)
# and calculate y, if there is *any* overlap with matched quoted text then
# y = True
for (start_i, end_i) in sent_indices:
# if any overlaps with quotes, then y = True, else False
if any((self._overlap((start_i, end_i), match_tuple) for match_tuple in match_indices)):
y.append(1)
else:
y.append(-1)
return y
class SentenceModel():
"""
predicts whether sentences contain risk of bias information
- uses data from Cochrane quotes only
"""
def __init__(self, test_mode=False):
self.quotereader = QualityQuoteReader2(test_mode=test_mode) # this now runs through all studies
def generate_data(self, uid_filter=None):
"""
tokenizes and processes the raw text from pdfs and cochrane
saves in self.X_list and self.y_list (both dicts)
"""
test_domains = CORE_DOMAINS # may change later to access various "other" domains
# one feature matrix X across all domains
self.X_list = [] # key will be unique id, value will be text
# but multiple y vectors; one for each test domain
self.y_list = {domain: [] for domain in test_domains}
self.y_judgements = {domain: [] for domain in test_domains} # used in subclasses to make hybrid models
self.X_uids = []
self.y_uids = {domain: [] for domain in test_domains}
for uid, cochrane_data, pdf_data in self.quotereader:
if uid_filter is not None and uid not in uid_filter:
continue
matcher = PDFMatcher()
matcher.load_pdftext(pdf_data)
X_study = matcher.generate_X()
self.X_list.extend(X_study)
self.X_uids.extend([uid] * len(X_study))
domains_done_already = [] # for this particular study
# (we're ignoring multiple quotes per domain at the moment and getting the first...)
for domain in cochrane_data["QUALITY"]:
if domain["DOMAIN"] not in test_domains or domain["DOMAIN"] in domains_done_already:
continue # skip if a domain is repeated in a study (though note that this is likely due to different RoB per *outcome* which is ignored here)
if domain["QUOTES"]:
matcher.load_quotes(domain["QUOTES"])
y_study = matcher.generate_y()
self.y_list[domain["DOMAIN"]].extend(y_study)
self.y_uids[domain["DOMAIN"]].extend([uid] * len(y_study))
self.y_judgements[domain["DOMAIN"]].extend([domain["RATING"]] * len(y_study))
domains_done_already.append(domain["DOMAIN"])
self.y = {domain: np.array(self.y_list[domain]) for domain in test_domains}
self.X_uids = np.array(self.X_uids)
self.y_uids = {domain: np.array(self.y_uids[domain]) for domain in test_domains}
self.y_judgements = {domain: np.array(self.y_judgements[domain]) for domain in test_domains}
# self.vectorize()
def vectorize(self):
self.vectorizer = ModularCountVectorizer()
# self.X = self.vectorizer.fit_transform(self.X_list, max_features=50000)
self.X = self.vectorizer.fit_transform(self.X_list)
def load_text(self, filename):
"""
loads the original text of all PDFs, to debugging and looking at predicted text from corpus
NB this is a large file
"""
with open(filename, 'rb') as f:
self.X_list = pickle.load(f)
def __len__(self):
"""
returns the total number of studies (not features)
"""
return len(self.quotereader)
def len_domain(self, domain):
return len(np.unique(self.y_uids[domain]))
def domain_X_filter(self, domain):
"""
returns X_filter for a domain
"""
y_study_uids = np.unique(self.y_uids[domain])
X_filter = np.nonzero([(X_uid in y_study_uids) for X_uid in self.X_uids])[0]
return X_filter
def domain_uids(self, domain):
unique_study_ids = np.unique(self.y_uids[domain])
return unique_study_ids
def X_y_uid_filtered(self, uids, domain):
X_all = self.X_domain_all(domain=domain)
y_all = self.y_domain_all(domain=domain)
filter_ids = np.nonzero([(y_study_id in uids) for y_study_id in self.y_uids[domain]])[0]
X_filtered = X_all[filter_ids]
y_filtered = y_all[filter_ids]
return X_filtered, y_filtered
def get_all_domains(self):
return self.y.keys()
def X_get_sentence(self, select_sent_id, domain):
y_study_ids = np.unique(self.y[domain].study_ids)
X_filter = np.nonzero([X_study_id in y_study_ids for X_study_id in self.X.study_ids])[0]
return self.X_list.data[X_filter[select_sent_id]]
def X_domain_all(self, domain):
"""
retrieve X data for a domain
"""
X_filter = self.domain_X_filter(domain)
return self.X[X_filter]
def y_domain_all(self, domain):
return self.y[domain]
# def X_y_filtered(self, filter_ids, domain):
# X_all = self.X_domain_all(domain=domain)
# y_all = self.y_domain_all(domain=domain)
# # np.unique always returns ordered ids
# unique_study_ids = np.unique(self.y_uids[domain])
# mapped_ids = [unique_study_ids[filter_id] for filter_id in filter_ids]
# filter_ids = np.nonzero([(y_study_id in mapped_ids) for y_study_id in self.y_uids[domain]])[0]
# X_filtered = X_all[filter_ids]
# y_filtered = y_all[filter_ids]
# return X_filtered, y_filtered
class DocumentLevelModel(SentenceModel):
"""
for predicting the risk of bias
as "HIGH", "LOW", or "UNKNOWN" for a document
using a binary bag-of-words as features for each document
"""
def generate_data(self, uid_filter=None, binarize=False):
"""
tokenizes and processes the raw text from pdfs and cochrane
saves in self.X_list and self.y_list (both dicts)
"""
test_domains = CORE_DOMAINS # may change later to access various "other" domains
# one feature matrix X across all domains
self.X_list = [] # key will be unique id, value will be text
# but multiple y vectors; one for each test domain
self.y_list = {domain: [] for domain in test_domains}
self.X_uids = []
self.y_uids = {domain: [] for domain in test_domains}
for uid, cochrane_data, pdf_data in self.quotereader:
if uid_filter is not None and uid not in uid_filter:
continue
X_study = [pdf_data] # this time the X is the whole pdf data
self.X_list.extend(X_study)
self.X_uids.extend([uid] * len(X_study))
domains_done_already = [] # for this particular study
# (we're ignoring multiple quotes per domain at the moment and getting the first...)
for domain in cochrane_data["QUALITY"]:
if domain["DOMAIN"] not in test_domains or domain["DOMAIN"] in domains_done_already:
continue # skip if a domain is repeated in a study (though note that this is likely due to different RoB per *outcome* which is ignored here)
if binarize:
y_study = 1 if domain["RATING"]=="YES" else -1 # binarize
else:
y_study = domain["RATING"]
self.y_list[domain["DOMAIN"]].append(y_study)
self.y_uids[domain["DOMAIN"]].append(uid)
domains_done_already.append(domain["DOMAIN"])
self.y = {domain: np.array(self.y_list[domain]) for domain in test_domains}
self.X_uids = np.array(self.X_uids)
self.y_uids = {domain: np.array(self.y_uids[domain]) for domain in test_domains}
class MultiTaskDocumentModel(DocumentLevelModel):
'''
The idea here is to train a single model across all domains. Basically:
y_ij = sign{(w0 + w_j) * x_i}
for document x_i, where a w_j is learned for each domain and w0 is a shared
weight vector (across domains).
'''
def vectorize(self):
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
self.X_mt_labels = [] # which rows correspond to which doc/interactions?
self.y_mt = []
self.uids_to_row_indices = {}
self.row_indices_to_uids, self.row_indices_to_domains = [], []
# number of rows in the 'multi-task' matrix, which
# will vary depending on how many docs have labels
# for how many domains
n_rows = 0 # (equal to len(self.X_mt_labels)
'''
the vectorizer wants all the documents at once,
so here we are going to build them up. we're only
going to add interaction copies for a given document
for those domains that we have an associated label.
'''
docs = []
# which indices in docs correspond to copies for
# which domains?
domains_to_interaction_doc_indices = defaultdict(list)
for i, doc in enumerate(self.X_list):
# `intercept' document
uid = self.X_uids[i]
# add |CORE_DOMAINS| copies for each instance.
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
if uid in self.domain_uids(domain):
# get the label (note that we match up the
# uid to do this)
uids_to_lbls = dict(zip(self.y_uids[domain],
self.y_domain_all(domain=domain)))
#y_index = self.y_uids(domain).index(uid)
#domain_ys = self.y_domain_all(domain=domain)
#self.y_mt.append(domain_ys[y_index])
self.y_mt.append(uids_to_lbls[uid])
# append interaction copy of doc
docs.append(doc)
self.row_indices_to_uids.append(uid)
self.row_indices_to_domains.append(domain)
self.X_mt_labels.append("%s-%s" % (i, d_str))
domains_to_interaction_doc_indices[d_str].append(n_rows)
n_rows += 1
'''
now actually ad documents and interaction copies to
the vectorizer.
'''
#for i, doc in enumerate(self.X_list):
# `intercept' document
self.vectorizer.builder_add_docs(docs)
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
interaction_list = []
for i in xrange(len(docs)):
if i in domains_to_interaction_doc_indices[d_str]:
interaction_list.append(docs[i])
else:
interaction_list.append("")
self.vectorizer.builder_add_docs(interaction_list, prefix=d_str+"-")
# BCW -- attempting to upper bound features!
# note that this will keep the <max_features> most common
# features, regardless of whether or not they are 'interaction'
# features
# self.X = self.vectorizer.builder_fit_transform(max_features=50000)
self.X = self.vectorizer.builder_fit_transform(low=2)
def X_y_uid_filtered(self, uids, domain=None):
X_indices, y = [], []
for i in xrange(self.X.shape[0]):
if domain is None and self.row_indices_to_uids[i] in uids:
# if domain is None, return big multi-task design matrix
# -- you'll want to do this, e.g., for training
X_indices.append(i)
y.append(self.y_mt[i])
elif domain == self.row_indices_to_domains[i] and self.row_indices_to_uids[i] in uids:
# otherwise (when domain is not None), return
# instances for only the target domain
# (e.g., for testing)
X_indices.append(i)
y.append(self.y_mt[i])
return self.X[X_indices], y
class MultiTaskHybridDocumentModel(MultiTaskDocumentModel):
'''
same as the MultiTaskDocumentModel, except takes in sentence
level modelling too into the mix
'''
def vectorize(self):
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
self.X_mt_labels = [] # which rows correspond to which doc/interactions?
self.y_mt = []
self.uids_to_row_indices = {}
self.row_indices_to_uids, self.row_indices_to_domains = [], []
# number of rows in the 'multi-task' matrix, which
# will vary depending on how many docs have labels
# for how many domains
n_rows = 0 # (equal to len(self.X_mt_labels)
'''
the vectorizer wants all the documents at once,
so here we are going to build them up. we're only
going to add interaction copies for a given document
for those domains that we have an associated label.
'''
docs = []
high_prob_sents = defaultdict(list)
# which indices in docs correspond to copies for
# which domains?
domains_to_interaction_doc_indices = defaultdict(list)
for i, doc in enumerate(self.X_list):
# `intercept' document
uid = self.X_uids[i]
# add |CORE_DOMAINS| copies for each instance.
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
if uid in self.domain_uids(domain):
# get the label (note that we match up the
# uid to do this)
uids_to_lbls = dict(zip(self.y_uids[domain],
self.y_domain_all(domain=domain)))
#y_index = self.y_uids(domain).index(uid)
#domain_ys = self.y_domain_all(domain=domain)
#self.y_mt.append(domain_ys[y_index])
self.y_mt.append(uids_to_lbls[uid])
# append interaction copy of doc
docs.append(doc)
high_prob_sents[domain].append(self.get_sent_predictions_for_doc(doc, domain))
for high_prob_domain in CORE_DOMAINS:
if high_prob_domain != domain:
high_prob_sents[high_prob_domain].append("")
self.row_indices_to_uids.append(uid)
self.row_indices_to_domains.append(domain)
self.X_mt_labels.append("%s-%s" % (i, d_str))
domains_to_interaction_doc_indices[d_str].append(n_rows)
n_rows += 1
'''
now actually add documents and interaction copies to
the vectorizer.
'''
#for i, doc in enumerate(self.X_list):
# `intercept' document
self.vectorizer.builder_add_docs(docs)
for domain in CORE_DOMAINS:
d_str = domain_str(domain)
interaction_list, sent_interaction_list = [], []
for i in xrange(len(docs)):
if i in domains_to_interaction_doc_indices[d_str]:
interaction_list.append(docs[i])
sent_interaction_list.append(high_prob_sents[domain][i])
else:
interaction_list.append("")
sent_interaction_list.append("")
self.vectorizer.builder_add_docs(interaction_list, prefix=d_str+"-doc-")
self.vectorizer.builder_add_docs(sent_interaction_list, prefix=d_str+"-sent-")
self.X = self.vectorizer.builder_fit_transform(max_features=200000, low=3)
# self.X = self.vectorizer.builder_fit_transform(max_features=50000)
####
# maybe record the feature indices here that are to receive
# different 'amounts' of regularization
####
def get_sent_predictions_for_doc(self, doc, domain):
# tokenize into sentences
sents = sent_tokenizer.tokenize(doc)
# vectorize the sentences
X_sents = self.sent_vectorizer.transform(sents)
# get predicted 1 / -1 for the sentences
pred_class = self.sent_clfs[domain].predict(X_sents)
# get the sentences which are predicted 1
positive_sents = [sent for sent, pred in zip(sents, pred_class) if pred==1]
# make a single string per doc
rob_sents = " ".join(positive_sents)
return rob_sents
def set_sent_model(self, sent_clfs, sent_vectorizer):
"""
set a model which takes in a list of sentences;
and returns -1 or 1
"""
self.sent_clfs = sent_clfs
self.sent_vectorizer = sent_vectorizer
class HybridDocModel(DocumentLevelModel):
"""
for predicting the risk of bias
as "HIGH", "LOW", or "UNKNOWN" for a document
using a binary bag-of-words as features for each document
"""
def vectorize(self, domain=None):
if domain is None:
raise TypeError("this class requires domain specific vectorization")
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
X_filter = self.domain_X_filter(domain)
predictions = self.get_sent_predictions_for_domain(domain)
self.vectorizer.builder_add_docs([self.X_list[i] for i in X_filter])
self.vectorizer.builder_add_docs(predictions, prefix="high-prob-sent-", weighting=10)
self.X = self.vectorizer.builder_fit_transform()
def get_sent_predictions_for_domain(self, domain):
uids = self.domain_uids(domain)
predictions = []
for uid in uids:
# get the index of the study with specified uid
study_index = np.nonzero(self.X_uids==uid)[0][0]
# tokenize into sentences
sents = sent_tokenizer.tokenize(self.X_list[study_index])
# vectorize the sentences
X_sents = self.sent_vectorizer.transform(sents)
# get predicted 1 / -1 for the sentences
pred_class = self.sent_clf.predict(X_sents)
# get the sentences which are predicted 1
positive_sents = [sent for sent, pred in zip(sents, pred_class) if pred==1]
# make a single string per doc
doc = " ".join(positive_sents)
predictions.append(doc)
return predictions
def set_sent_model(self, doc_clf, doc_vectorizer):
"""
set a model which takes in a list of sentences;
and returns -1 or 1
"""
self.sent_clf = doc_clf
self.sent_vectorizer = doc_vectorizer
def X_y_uid_filtered(self, uids, domain):
X_all = self.X
y_all = self.y_domain_all(domain=domain)
filter_ids = np.nonzero([(y_study_id in uids) for y_study_id in self.y_uids[domain]])[0]
X_filtered = X_all[filter_ids]
y_filtered = y_all[filter_ids]
return X_filtered, y_filtered
# class HybridDocModel2(HybridDocModel):
# """
# for predicting the risk of bias
# as "HIGH", "LOW", or "UNKNOWN" for a document
# using a binary bag-of-words as features for each document
# """
# def vectorize(self, domain=None):
# if domain is None:
# raise TypeError("this class requires domain specific vectorization")
# self.vectorizer = ModularCountVectorizer()
# self.vectorizer.builder_clear()
# X_filter = self.domain_X_filter(domain)
# predictions = self.get_sent_predictions_for_domain(domain)
# # self.vectorizer.builder_add_docs([self.X_list[i] for i in X_filter])
# self.vectorizer.builder_add_docs(predictions, prefix="high-prob-sent-")
# self.X = self.vectorizer.builder_fit_transform()
class HybridModel(SentenceModel):
"""
predicts whether sentences contain risk of bias information
- uses real RoB judgements
"""
def vectorize(self, domain=None, interaction_classes=["YES", "NO"]):
if domain is None:
raise TypeError("this class requires domain specific vectorization")
self.vectorizer = ModularCountVectorizer()
self.vectorizer.builder_clear()
X_filter = self.domain_X_filter(domain)
sents = [self.X_list[i] for i in X_filter]
self.vectorizer.builder_add_docs(sents)
for interaction_class in interaction_classes:
self.vectorizer.builder_add_interaction_features(
sents, self.y_judgements[domain]==interaction_class, prefix="rob-" + interaction_class + "-")
self.X = self.vectorizer.builder_fit_transform()
def X_y_uid_filtered(self, uids, domain):
X_all = self.X
y_all = self.y_domain_all(domain=domain)
filter_ids = np.nonzero([(y_study_id in uids) for y_study_id in self.y_uids[domain]])[0]
X_filtered = X_all[filter_ids]
y_filtered = y_all[filter_ids]
return X_filtered, y_filtered
class HybridModelProbablistic(HybridModel):
"""
predicts whether sentences contain risk of bias information
- requires a model to be passed in which can predice RoB judgements from
full text document
"""
def vectorize(self, domain=None, interaction_classes=["YES", "NO"], use_vectorizer=None):
if domain is None:
raise TypeError("this class requires domain specific vectorization")
if use_vectorizer is None:
self.vectorizer = ModularCountVectorizer()
else:
self.vectorizer = use_vectorizer
self.vectorizer.builder_clear()
X_filter = self.domain_X_filter(domain)
predictions = self.get_doc_predictions_for_domain(domain)
sents = [self.X_list[i] for i in X_filter]
self.vectorizer.builder_add_docs(sents)
for interaction_class in interaction_classes:
self.vectorizer.builder_add_interaction_features(sents, predictions==interaction_class, prefix="rob-" + interaction_class + "-")
if use_vectorizer is None:
self.X = self.vectorizer.builder_fit_transform()
else:
self.X = self.vectorizer.builder_transform()
def get_doc_predictions_for_domain(self, domain):
uids = self.domain_uids(domain)
predictions = []
for uid in uids:
# get the indices of all sentences in the study with specified uid
X_filter = np.nonzero(self.X_uids==uid)[0]
# make a single string per doc
doc = " ".join([self.X_list[i] for i in X_filter])
# vectorize the docs, then predict using the model
X_doc = self.doc_vectorizer.transform(doc)
prediction = self.doc_clf.predict(X_doc)
# add the same prediction for each sentence
predictions.extend([prediction[0]] * len(X_filter))
return np.array(predictions)
def set_doc_model(self, doc_clf, doc_vectorizer):
"""
set a model which takes in a full text doc;
outputs a doc class "YES", "NO", or "UNKNOWN"
"""
self.doc_clf = doc_clf
self.doc_vectorizer = doc_vectorizer
def _document_frequency(X):
"""Count the number of non-zero values for each feature in csc_matrix X."""
return np.diff(X.indptr)
class ModularCountVectorizer():
"""
Similar to CountVectorizer from sklearn, but allows building up
of feature matrix gradually, and adding prefixes to feature names
(to identify interaction terms)
"""
def __init__(self, *args, **kwargs):
self.data = []
self.vectorizer = DictVectorizer(*args, **kwargs)
def _transform_X_to_dict(self, X, prefix=None, weighting=1):
"""
makes a list of dicts from a document
1. word tokenizes
2. creates {word1:1, word2:1...} dicts
(note all set to '1' since the DictVectorizer we use assumes all missing are 0)
"""
return [self._dict_from_word_list(
self._word_tokenize(document, prefix=prefix), weighting=1) for document in X]
def _word_tokenize(self, text, prefix=None, stopword=True):
"""
simple word tokenizer using the same rule as sklearn
punctuation is ignored, all 2 or more letter characters are a word
"""
stop_word_list = STOP_WORDS if stopword else set()
if prefix:
return [prefix + word.lower() for word in SIMPLE_WORD_TOKENIZER.findall(text)
if not word.lower() in stop_word_list]
else:
return [word.lower() for word in SIMPLE_WORD_TOKENIZER.findall(text)
if not word.lower() in stop_word_list]
def _dict_from_word_list(self, word_list, weighting=1):
return {word: weighting for word in word_list}
def _dictzip(self, dictlist1, dictlist2):
"""
zips together two lists of dicts of the same length
"""
# checks lists must be the same length
if len(dictlist1) != len(dictlist2):
raise IndexError("Unable to combine featuresets with different number of examples")
output = []
for dict1, dict2 in zip(dictlist1, dictlist2):
output.append(dict(dict1.items() + dict2.items()))
# note that this *overwrites* any duplicate keys with the key/value from dictlist2!!
return output
def transform(self, X, prefix=None):
# X is a list of document strings
# word tokenizes each one, then passes to a dict vectorizer
dict_list = self._transform_X_to_dict(X, prefix=prefix)
return self.vectorizer.transform(dict_list)
def fit_transform(self, X, prefix=None, max_features=None, low=None):
# X is a list of document strings
# word tokenizes each one, then passes to a dict vectorizer
dict_list = self._transform_X_to_dict(X, prefix=prefix)
X = self.vectorizer.fit_transform(dict_list)
if max_features is not None or low is not None:
X, removed = self._limit_features(X.tocsc(),
self.vectorizer.vocabulary_, low=low, limit=max_features)
print "pruned %s features!" % len(removed)
X = X.tocsc()
return self.vectorizer.fit_transform(dict_list)
def get_feature_names(self):
return self.vectorizer.get_feature_names()
def builder_clear(self):
self.builder = []
self.builder_len = 0
def builder_add_docs(self, X, prefix = None, weighting=1):
#pdb.set_trace()
if not self.builder:
self.builder_len = len(X)
self.builder = self._transform_X_to_dict(X, prefix=prefix, weighting=weighting)
else:
X_dicts = self._transform_X_to_dict(X, prefix=prefix, weighting=weighting)
self.builder = self._dictzip(self.builder, X_dicts)
def builder_add_interaction_features(self, X, interactions, prefix=None):
if prefix is None:
raise TypeError('Prefix is required when adding interaction features')
doc_list = [(sent if interacting else "") for sent, interacting in zip(X, interactions)]
self.builder_add_docs(doc_list, prefix)
def builder_fit_transform(self, max_features=None, low=2):
X = self.vectorizer.fit_transform(self.builder)
if max_features is not None or low is not None:
X, removed = self._limit_features(X.tocsc(),
self.vectorizer.vocabulary_, low=low, limit=max_features)
print "pruned %s features!" % len(removed)
X = X.tocsc()
return X #self.vectorizer.fit_transform(self.builder)
def builder_transform(self):
return self.vectorizer.transform(self.builder)
def _limit_features(self, cscmatrix, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return cscmatrix, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(cscmatrix)
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
# backward compatibility requires us to keep lower indices in ties!
# (and hence to reverse the sort by negating dfs)
mask_inds = (-dfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
return cscmatrix[:, kept_indices], removed_terms
def sentence_prediction_test(class_weight={1: 5, -1:1}, model=SentenceModel(test_mode=True)):
print
print
print
print "Sentence level prediction"
print "=" * 40
print
s = model
print "Model name:\t" + s.__class__.__name__
print s.__doc__
print "class_weight=%s" % (str(class_weight),)
s.generate_data()
s.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False, indices=True)
# # tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
print "making scorer"
ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = [{"alpha": np.logspace(-4, -1, 10)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 10)]}]
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring=ftwo_scorer)
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# if not sample and list_features:
# # not an obvious way to get best features for ensemble
# print show_most_informative_features(s.vectorizer, clf)
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# then train all for most informative features
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.01, class_weight={1: 5, -1: 1})
X_all = s.X_domain_all(test_domain)
y_all = s.y_domain_all(test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features(s.vectorizer, clf)
def stupid_sentence_prediction_test(model=SentenceModel(test_mode=False)):
print
print
print
print "Sentence level prediction"
print "=" * 40
print
s = model
print "Model name:\t" + s.__class__.__name__
print s.__doc__
# print "class_weight=%s" % (str(class_weight),)
s.generate_data()
s.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False, indices=True)
print "making scorer"
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
y_preds = np.array([1] * len(y_test))
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
fold_metric = np.append(fold_metric, accuracy_score(y_test, y_preds))
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f, precision %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2], fold_metric[3])
# if not sample and list_features:
# # not an obvious way to get best features for ensemble
# print show_most_informative_features(s.vectorizer, clf)
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f, precision %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2], summary_metrics[3])
def binary_doc_prediction_test(model=DocumentLevelModel(test_mode=False)):
print
print
print
print "Binary doc prediction"
print "=" * 40
print
s = model
s.generate_data(binarize=True)
s.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False, indices=True)
# # tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
# print "making scorer"
# ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = {"alpha": np.logspace(-4, -1, 10), "class_weight": [{1: i, -1: 1} for i in np.logspace(-1, 1, 10)]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring="f1")
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# if not sample and list_features:
# # not an obvious way to get best features for ensemble
# print show_most_informative_features(s.vectorizer, clf)
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# then train all for most informative features
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.01, class_weight={1: 5, -1: 1})
X_all = s.X_domain_all(test_domain)
y_all = s.y_domain_all(test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features(s.vectorizer, clf)
def multitask_document_prediction_test(model=MultiTaskDocumentModel(test_mode=False),
test_domain=CORE_DOMAINS[0]):
print "multitask!"
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
# d.X_uids contains all the UIds.
# d.y_uids contains a dictionary mapping domains to the UIds for
# which we have labels (in said domain)
#pdb.set_trace()
all_uids = d.X_uids
d.vectorize()
####
# the major change here is we don't loop through the domains!
tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
clf = GridSearchCV(SGDClassifier(loss="log", penalty="L2"),
tuned_parameters, scoring='f1')
kf = KFold(len(all_uids), n_folds=5, shuffle=False) ### TODO 250 is totally random
metrics = defaultdict(list)
for fold_i, (train, test) in enumerate(kf):
print "Training on fold", fold_i,
# note that we do *not* pass in a domain here, because
# we use *all* domain training data
X_train, y_train = d.X_y_uid_filtered(all_uids[train])
print "done!"
clf.fit(X_train, y_train)
print "Testing on fold", fold_i,
for domain in CORE_DOMAINS:
# multitask uses same trained model for all domains, but test on separate test data
X_test, y_test = d.X_y_uid_filtered(all_uids[test], domain)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics[domain].append(fold_metric) # get the scores for positive instances (save them up since all in the wrong order here!)
print "done!"
# then recreate in the right order for printout
for domain in CORE_DOMAINS:
print
print domain
print "*" * 60
print
for fold_i, fold_metric in enumerate(metrics[domain]):
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# summary score
summary_metrics = np.mean(metrics[domain], axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def dummy_document_prediction():
print "dummy!"
d = DocumentLevelModel(test_mode=False)
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
d.vectorize()
all_uids = d.X_uids
kf = KFold(len(all_uids), n_folds=5, shuffle=False) ### TODO 250 is totally random
metrics = defaultdict(list)
for fold_i, (train, test) in enumerate(kf):
print "Testing on fold", fold_i,
for domain in CORE_DOMAINS:
# multitask uses same trained model for all domains, but test on separate test data
X_test, y_test = d.X_y_uid_filtered(all_uids[test], domain)
y_preds = np.array(([1] * len(y_test))) # everything gets marked low risk of bias
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics[domain].append(fold_metric) # get the scores for positive instances (save them up since all in the wrong order here!)
print "done!"
# then recreate in the right order for printout
for domain in CORE_DOMAINS:
print
print domain
print "*" * 60
print
for fold_i, fold_metric in enumerate(metrics[domain]):
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# summary score
summary_metrics = np.mean(metrics[domain], axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def multitask_hybrid_document_prediction_test(model=MultiTaskHybridDocumentModel(test_mode=True)):
print "multitask! and hybrid! :)"
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
# d.X_uids contains all the UIds.
# d.y_uids contains a dictionary mapping domains to the UIds for
# which we have labels (in said domain)
#pdb.set_trace()
all_uids = d.X_uids
# d.vectorize()
####
# the major change here is we don't loop through the domains!
tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
clf = GridSearchCV(SGDClassifier(loss="log", penalty="L2"),
tuned_parameters, scoring='f1')
kf = KFold(len(all_uids), n_folds=5, shuffle=False)
metrics = defaultdict(list)
print "...generating sentence data...",
s = SentenceModel(test_mode=False)
s.generate_data()
s.vectorize()
print "done!"
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
for fold_i, (train, test) in enumerate(kf):
sent_clfs = defaultdict(list)
for domain in CORE_DOMAINS:
sents_X, sents_y = s.X_domain_all(domain=domain), s.y_domain_all(domain=domain)
sent_clfs[domain] = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"),
tuned_parameters, scoring='recall')
print "Training on fold", fold_i,
d.set_sent_model(sent_clfs, s.vectorizer)
d.vectorize()
# note that we do *not* pass in a domain here, because
# we use *all* domain training data
X_train, y_train = d.X_y_uid_filtered(all_uids[train])
sent_clfs[domain].fit(sents_X, sents_y)
clf.fit(X_train, y_train)
print "done!"
print "Testing on fold", fold_i,
for domain in CORE_DOMAINS:
# multitask uses same trained model for all domains, but test on separate test data
X_test, y_test = d.X_y_uid_filtered(all_uids[test], domain)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics[domain].append(fold_metric) # get the scores for positive instances (save them up since all in the wrong order here!)
print "done!"
# then recreate in the right order for printout
for domain in CORE_DOMAINS:
print
print domain
print "*" * 60
print
for fold_i, fold_metric in enumerate(metrics[domain]):
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# summary score
summary_metrics = np.mean(metrics[domain], axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def document_prediction_test(model=DocumentLevelModel(test_mode=False)):
print "Document level prediction"
print "=" * 40
print
d = model
d.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
d.vectorize()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
# f1_prefer_nos = make_scorer(f1_score, pos_label="NO")
tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
clf = GridSearchCV(SGDClassifier(loss="log", penalty="L2"), tuned_parameters, scoring='f1')
# clf = SGDClassifier(loss="hinge", penalty="L2")
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds, labels=RoB_CLASSES))[:3]
print ('fold %d\t' % (fold_i)) + '\t'.join(RoB_CLASSES)
# for metric_type, scores in zip(["prec.", "recall", "f1"], fold_metric):
# print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
# print
# print clf.best_params_
#### START CONFUSION
real_no_indices = (y_test=="NO")
print "The actual NOs were predicted as..."
print collections.Counter(y_preds[real_no_indices])
#### END CONFUSION
metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
mean_scores = np.mean(metrics, axis=0)
print "=" * 40
print 'means \t' + '\t'.join(RoB_CLASSES)
for metric_type, scores in zip(["prec.", "recall", "f1"], mean_scores):
print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
print
# then train all for most informative features
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.01)
X_all = d.X_domain_all(test_domain)
y_all = d.y_domain_all(test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features_ynu(d.vectorizer, clf)
def simple_hybrid_prediction_test(model=HybridModel(test_mode=True)):
print "Hybrid prediction"
print "=" * 40
print
s = model
s.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
s.vectorize(test_domain)
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
print "making scorer"
ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = [{"alpha": np.logspace(-4, -1, 10)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 10)]}]
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring=ftwo_scorer)
metrics = []
for fold_i, (train, test) in enumerate(kf):
X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
metrics.append(fold_metric) # get the scores for positive instances
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# def simple_hybrid_prediction_test(model=HybridModel(test_mode=True)):
# print "Hybrid prediction"
# print "=" * 40
# print
# s = model
# s.generate_data() # some variations use the quote data internally
# # for sentence prediction (for additional features)
# for test_domain in CORE_DOMAINS:
# s.vectorize(test_domain)
# print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
# domain_uids = s.domain_uids(test_domain)
# no_studies = len(domain_uids)
# kf = KFold(no_studies, n_folds=5, shuffle=False)
# tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 5)]}]
# clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
# metrics = []
# for fold_i, (train, test) in enumerate(kf):
# X_train, y_train = s.X_y_uid_filtered(domain_uids[train], test_domain)
# X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
# clf.fit(X_train, y_train)
# y_preds = clf.predict(X_test)
# fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
# metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# metrics.append(fold_metric) # get the scores for positive instances
# # summary score
# summary_metrics = np.mean(metrics, axis=0)
# print "=" * 40
# print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def true_hybrid_prediction_test(model, test_mode=False):
print "True Hybrid prediction"
print "=" * 40
print
s = model
s.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
s_cheat = HybridModel(test_mode=False)
s_cheat.generate_data()
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = s.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
print "making scorer"
ftwo_scorer = make_scorer(fbeta_score, beta=2)
tuned_parameters = [{"alpha": np.logspace(-4, -1, 10)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 1, 10)]}]
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring=ftwo_scorer)
metrics = []
for fold_i, (train, test) in enumerate(kf):
print "training doc level model with test data, please wait..."
d = DocumentLevelModel(test_mode=False)
d.generate_data(uid_filter=domain_uids[train])
d.vectorize()
doc_X, doc_y = d.X_domain_all(domain=test_domain), d.y_domain_all(domain=test_domain)
doc_tuned_parameters = {"alpha": np.logspace(-4, -1, 10)}
doc_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), doc_tuned_parameters, scoring='f1')
doc_clf.fit(doc_X, doc_y)
s.set_doc_model(doc_clf, d.vectorizer)
s_cheat.vectorize(test_domain)
s.vectorize(test_domain, use_vectorizer=s_cheat.vectorizer)
X_train, y_train = s_cheat.X_y_uid_filtered(domain_uids[train], test_domain)
# train on the *true* labels
X_test, y_test = s.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
metrics.append(fold_metric) # get the scores for positive instances
# summary score
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
def hybrid_doc_prediction_test(model=HybridDocModel(test_mode=True)):
print "Hybrid doc level prediction"
print "=" * 40
print
d = model
d.generate_data() # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
tuned_parameters = {"alpha": np.logspace(-4, -1, 5)}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
metrics = []
for fold_i, (train, test) in enumerate(kf):
s = SentenceModel(test_mode=False)
s.generate_data(uid_filter=domain_uids[train])
s.vectorize()
sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds, labels=RoB_CLASSES))[:3]
print ('fold %d\t' % (fold_i)) + '\t'.join(RoB_CLASSES)
for metric_type, scores in zip(["prec.", "recall", "f1"], fold_metric):
print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
print
metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
mean_scores = np.mean(metrics, axis=0)
print "=" * 40
print 'means \t' + '\t'.join(RoB_CLASSES)
for metric_type, scores in zip(["prec.", "recall", "f1"], mean_scores):
print "%s\t%.2f\t%.2f\t%.2f" % (metric_type, scores[0], scores[1], scores[2])
print
def binary_hybrid_doc_prediction_test(model=HybridDocModel(test_mode=True)):
print "Binary hybrid doc level prediction"
print "=" * 40
print
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
tuned_parameters = {"alpha": np.logspace(-4, -1, 10), "class_weight": [{1: i, -1: 1} for i in np.logspace(-1, 1, 10)]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
metrics = []
for fold_i, (train, test) in enumerate(kf):
s = SentenceModel(test_mode=True)
s.generate_data(uid_filter=domain_uids[train])
s.vectorize()
sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
y_preds = clf.predict(X_test)
fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
metrics.append(fold_metric) # get the scores for positive instances
print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
summary_metrics = np.mean(metrics, axis=0)
print "=" * 40
print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# then train all for most informative features
s = SentenceModel(test_mode=True)
s.generate_data(uid_filter=domain_uids)
s.vectorize()
sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.1, class_weight={1: 1, -1: 1})
X_all, y_all = d.X_y_uid_filtered(domain_uids, test_domain)
clf.fit(X_all, y_all)
print show_most_informative_features(s.vectorizer, clf)
def binary_hybrid_doc_prediction_test2(model=HybridDocModel(test_mode=True)):
print "Binary hybrid doc level prediction version 2 (maybe quicker!!)"
print "=" * 40
print
d = model
d.generate_data(binarize=True) # some variations use the quote data internally
# for sentence prediction (for additional features)
for test_domain in CORE_DOMAINS:
print ("*"*40) + "\n\n" + test_domain + "\n\n" + ("*" * 40)
domain_uids = d.domain_uids(test_domain)
no_studies = len(domain_uids)
kf = KFold(no_studies, n_folds=5, shuffle=False)
tuned_parameters = {"alpha": np.logspace(-4, -1, 10), "class_weight": [{1: i, -1: 1} for i in np.logspace(-1, 1, 10)]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='f1')
metrics = []
s = SentenceModel(test_mode=True)
s.generate_data(uid_filter=domain_uids)
s.vectorize()
for fold_i, (train, test) in enumerate(kf):
sents_X, sents_y = s.X_y_uid_filtered(domain_uids[test], test_domain)
# sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
# sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}]
sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2", class_weight={1:5, -1:1}), sent_tuned_parameters, scoring='recall')
sent_clf.fit(sents_X, sents_y)
d.set_sent_model(sent_clf, s.vectorizer)
d.vectorize(test_domain)
X_train, y_train = d.X_y_uid_filtered(domain_uids[train], test_domain)
X_test, y_test = d.X_y_uid_filtered(domain_uids[test], test_domain)
clf.fit(X_train, y_train)
# print show_most_informative_features(s.vectorizer, clf.best_estimator_)
print show_most_informative_features(s.vectorizer, clf)
# y_preds = clf.predict(X_test)
# fold_metric = np.array(sklearn.metrics.precision_recall_fscore_support(y_test, y_preds))[:,1]
# metrics.append(fold_metric) # get the scores for positive instances
# print "fold %d:\tprecision %.2f, recall %.2f, f-score %.2f" % (fold_i, fold_metric[0], fold_metric[1], fold_metric[2])
# metrics.append(fold_metric) # get the scores for positive instances
# summary_metrics = np.mean(metrics, axis=0)
# print "=" * 40
# print "mean score:\tprecision %.2f, recall %.2f, f-score %.2f" % (summary_metrics[0], summary_metrics[1], summary_metrics[2])
# # then train all for most informative features
# sents_X, sents_y = s.X_domain_all(domain=test_domain), s.y_domain_all(domain=test_domain)
# sent_tuned_parameters = [{"alpha": np.logspace(-4, -1, 5)}, {"class_weight": [{1: i, -1: 1} for i in np.logspace(0, 2, 10)]}]
# sent_clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="L2"), tuned_parameters, scoring='recall')
# sent_clf.fit(sents_X, sents_y)
# d.set_sent_model(sent_clf, s.vectorizer)
# d.vectorize(test_domain)
# clf = SGDClassifier(loss="hinge", penalty="L2", alpha=0.5, class_weight={1: 1, -1: 1})
# X_all, y_all = d.X_y_uid_filtered(domain_uids, test_domain)
# clf.fit(X_all, y_all)
def main():
# dummy_document_prediction()
stupid_sentence_prediction_test()
# true_hybrid_prediction_test(model=HybridModelProbablistic(test_mode=False))
# sentence_prediction_test(model=SentenceModel(test_mode=False))
# simple_hybrid_prediction_test(model=HybridModel(test_mode=False))
# binary_doc_prediction_test()
#print "Try weighting sentences better"
#binary_hybrid_doc_prediction_test2()
# binary_hybrid_doc_prediction_test()
# hybrid_doc_prediction_test(model=HybridDocModel2(test_mode=False))
# document_prediction_test(model=DocumentLevelModel(test_mode=False))
# multitask_document_prediction_test(model=MultiTaskDocumentModel(test_mode=False))
# multitask_hybrid_document_prediction_test(model=MultiTaskHybridDocumentModel(test_mode=False))
if __name__ == '__main__':
main()
| gpl-3.0 | 8,653,868,186,282,305,000 | 31.990558 | 169 | 0.590233 | false |
jakerockland/find-s | find-s.py | 1 | 3211 | # This program is an machine learning experiment with the FindS concept learning algorithm
# Based on an excercise from Machine Learning by Thomas Mitchell (1997)
# By: Jacob Rockland
#
# The attribute EnjoySport indicates whether or not Aldo enjoys his favorite
# water sport on this day
#
# For all possible days with the following attributes:
# Sky: Sunny/Rainy
# AirTemp: Warm/Cold
# Humidity: Normal/High
# Wind: Strong/Weak
# Water: Warm/Cool
# Forecast: Same/Change
#
# Let us represent the hypothesis with the vector:
# [Sky, AirTemp, Humidity, Wind, Water, Forecast]
#
# Where each constraint may be '?' to represent that any value is acceptable,
# '0' to represent that no value is acceptable, or a specific value (from above)
#
# A training example for the hypothesis is True if it correctly predicts that
# Aldo will enjoy his water sport on this day, and False otherwise
import random
attributes = [['Sunny','Rainy'],
['Warm','Cold'],
['Normal','High'],
['Strong','Weak'],
['Warm','Cool'],
['Same','Change']]
num_attributes = len(attributes)
def getRandomTrainingExample(target_concept = ['?'] * num_attributes):
training_example = []
classification = True
for i in range(num_attributes):
training_example.append(attributes[i][random.randint(0,1)])
if target_concept[i] != '?' and target_concept[i] != training_example[i]:
classification = False
return training_example, classification
def findS(training_examples = []):
hypothesis = ['0'] * num_attributes
for example in training_examples:
if example[1]:
for i in range(num_attributes):
example_attribute = example[0][i]
hypothesis_attribute = hypothesis[i]
if example_attribute == attributes[i][0]:
if hypothesis_attribute == '0':
hypothesis_attribute = attributes[i][0]
elif hypothesis_attribute == attributes[i][1]:
hypothesis_attribute = '?'
elif example_attribute == attributes[i][1]:
if hypothesis_attribute == '0':
hypothesis_attribute = attributes[i][1]
elif hypothesis_attribute == attributes[i][0]:
hypothesis_attribute = '?'
hypothesis[i] = hypothesis_attribute
return hypothesis
def experiment(target_concept = ['?'] * num_attributes):
training_examples = []
while findS(training_examples) != target_concept:
training_examples.append(getRandomTrainingExample(target_concept))
return len(training_examples)
def main():
target_concept = ['Sunny','Warm','?','?','?','?']
num_experiments = 1000
experiment_results = []
for i in range(num_experiments):
experiment_results.append(experiment(target_concept))
average_result = sum(experiment_results) / num_experiments
print(str(len(experiment_results)) + ' Experiments Ran')
print('Average # Examples Required: ' + str(average_result))
print('Target Concept:' + str(target_concept))
if __name__ == "__main__":
main()
| mit | 5,064,658,467,091,919,000 | 37.22619 | 90 | 0.629088 | false |
souzabrizolara/py-home-shell | src/dao/appliancedao.py | 1 | 1109 | __author__ = 'alisonbento'
import basedao
from src.entities.hsappliance import HomeShellAppliance
import datetime
import configs
class ApplianceDAO(basedao.BaseDAO):
def __init__(self, connection):
basedao.BaseDAO.__init__(self, connection, 'hs_appliances', 'appliance_id')
def convert_row_to_object(self, entity_row):
appliance = HomeShellAppliance()
appliance.id = entity_row['appliance_id']
appliance.package = entity_row['package']
appliance.type = entity_row['type']
appliance.name = entity_row['type']
appliance.key = None
appliance.address = entity_row['address']
appliance.hash = entity_row['appliance_hash']
appliance.modified = entity_row['modified']
appliance.modified_datetime = datetime.datetime.strptime(appliance.modified, configs.DATABASE_DATE_FORMAT)
return appliance
def update(self, entity):
cursor = self.connection.cursor()
sql = "UPDATE " + self.table + " SET modified = ? WHERE appliance_id = ?"
cursor.execute(sql, (entity.modified, entity.id))
| apache-2.0 | 3,405,373,118,082,334,000 | 33.65625 | 114 | 0.66817 | false |
Flutras/techstitution | app/mod_main/views.py | 1 | 15521 | from flask import Blueprint, render_template, request, redirect, url_for, Response, jsonify, flash
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, DateTimeField, TextField, SubmitField, TextAreaField, RadioField
from wtforms import validators, ValidationError
from wtforms.validators import InputRequired
from bson import ObjectId
from app import mongo
from bson import json_util
import json
mod_main = Blueprint('main', __name__)
@mod_main.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] == 'admin' and request.form['password'] == 'admin':
return redirect(url_for('main.index'))
else:
error = 'Invalid Credentials. Please try again.'
return render_template('mod_main/login.html', error=error)
else:
return render_template('mod_main/login.html', error=error)
class AddPeopleForm(FlaskForm):
firstname = StringField('Firstname', validators=[InputRequired("Please fill out firstname")])
lastname = StringField('Lastname', validators=[InputRequired()])
# submit = SubmitField("Submit")
@mod_main.route('/index', methods=['GET', 'POST'])
def indexpage():
form=AddPeopleForm();
if request.method == 'GET':
reports = mongo.db.reports.find()
return render_template('mod_main/index.html', reports=reports, form=form)
@mod_main.route('/', methods=['GET', 'POST'])
def index():
form = AddPeopleForm()
if request.method == 'GET':
reports = mongo.db.reports.find()
audits = mongo.db.audits.find()
return render_template('mod_main/dashboard.html', reports=reports, audits=audits, form=form)
elif request.method == 'POST' and form.validate_on_submit():
mongo.db.reports.insert({
"firstname": request.form['firstname'],
"lastname": request.form['lastname']
})
return redirect(url_for('main.index'))
@mod_main.route('/audit_list', methods=['GET', 'POST'])
def audit_list():
audits = mongo.db.audits.find()
return render_template('mod_main/audit_list.html', audits=audits)
# New Audit Form
class AddAuditForm(FlaskForm):
audit_ref_num = IntegerField('Reference number', validators=[InputRequired("Please enter audit reference number!")])
audit_title = StringField('Title', validators=[InputRequired("Please enter audit title!")])
audit_type = StringField('Audit type', validators=[InputRequired("Please enter audit type!")])
audit_organization = StringField('Organization', validators=[InputRequired("Please enter organization!")])
audit_start_date = DateTimeField('Audit Start Date', validators=[InputRequired("Please enter start date!")])
audit_end_date = DateTimeField('Audit End Date', validators=[InputRequired("Please enter end date!")])
audit_auditee = StringField('Auditee', validators=[InputRequired("Please enter auditee!")])
audit_place = StringField('Place', validators=[InputRequired("Please enter place!")])
audit_frefnum = IntegerField('Follow-up reference number', validators=[InputRequired("Please enter follow-up reference number!")])
audit_chrefnum = IntegerField('Change reference number', validators=[InputRequired("Please enter changed reference number!")])
audit_tl = StringField('Audit Team Leader', validators=[InputRequired("Please enter team leader name!")])
audit_tm = StringField('Audit Team Members', validators=[InputRequired("Please enter team members!")])
audit_ap = StringField('Auditee Participants', validators=[InputRequired("Please enter auditee participants!")])
submit = SubmitField("Submit")
@mod_main.route('/add_audit_form', methods=['GET', 'POST'])
def add_audit_form():
form = AddAuditForm()
if request.method == 'GET':
audits = mongo.db.audits.find()
return render_template('mod_main/add_audit_form.html', audits=audits, form=form)
elif request.method == 'POST':
data = request.form
new_inputs = ({})
counter = 1
while counter < 5:
if 'input'+str(counter) in data:
new_inputs.update({
'input_'+str(counter): data['input'+str(counter)]
})
counter += 1
print new_inputs
mongo.db.audits.insert({
"new_inputs": new_inputs,
"audit_ref_num": request.form['audit_ref_num'],
"audit_title": request.form['audit_title'],
"audit_type": request.form['audit_type'],
"audit_organization": request.form['audit_organization'],
"audit_start_date": request.form['audit_start_date'],
"audit_end_date": request.form['audit_end_date'],
"audit_auditee": request.form['audit_auditee'],
"audit_place": request.form['audit_place'],
"audit_frefnum": request.form['audit_frefnum'],
"audit_chrefnum": request.form['audit_chrefnum'],
"audit_tl": request.form['audit_tl'],
"audit_tm": request.form['audit_tm'],
"audit_ap": request.form['audit_ap']
})
return redirect(url_for('main.index'))
# New NC Form
class AddNCForm(FlaskForm):
nc_title = StringField('Title', validators=[InputRequired("Please enter NC title!")])
nc_operator_auditee = StringField('Operator Auditee', validators=[InputRequired("Please enter operator auditee!")])
nc_number = IntegerField('Number', validators=[InputRequired("Please enter number!")])
nc_date = DateTimeField('Date', validators=[InputRequired("Please enter date!")])
nc_status = StringField('Status', validators=[InputRequired("Please enter status!")])
nc_agreed_date_for_CAP = DateTimeField('Agreed date for CAP', validators=[InputRequired("Please enter agreed date for CAP!")])
nc_level = StringField('Level', validators=[InputRequired("Please enter level!")])
nc_due_date = DateTimeField('Due Date', validators=[InputRequired("Please enter due date!")])
nc_closure_date = DateTimeField('Closure Date', validators=[InputRequired("Please enter closure date!")])
nc_requirement_references = StringField('Requirement References', validators=[InputRequired("Please enter requirement refeences!")])
nc_further_references = StringField('Further References', validators=[InputRequired("Please enter further references!")])
nc_auditor_ofcaa = StringField('Auditor of CAA', validators=[InputRequired("Please enter auditor of CAA!")])
nc_auditee_rfCAP = StringField('Auditee responsible for CAP', validators=[InputRequired("Please enter auditee!")])
requirement_references = TextAreaField('', validators=[InputRequired("Please enter requirement references!")])
nc_details = TextAreaField('Non Conformity Details', validators=[InputRequired("Please enter details!")])
submit = SubmitField("Submit")
@mod_main.route('/<string:audit_id>/add_nc_form', methods=['GET', 'POST'])
def add_nc_form(audit_id):
form = AddNCForm()
if request.method == 'GET':
audit = mongo.db.audits.find({"_id": ObjectId(audit_id)})
return render_template('mod_main/add_nc_form.html', audit=audit, form=form)
elif request.method == 'POST':
# print "post request"
mongo.db.audits.update({"_id": ObjectId(audit_id)}, {"$set": {nonconformities: {
"nc_title": request.form['nc_title'],
"nc_operator_auditee": request.form['nc_operator_auditee'],
"nc_number": request.form['nc_number'],
"nc_date": request.form['nc_date'],
"nc_status": request.form['nc_status'],
"nc_agreed_date_for_CAP": request.form['nc_agreed_date_for_CAP'],
"nc_level": request.form['nc_level'],
"nc_due_date": request.form['nc_due_date'],
"nc_closure_date": request.form['nc_closure_date'],
"nc_requirement_references": request.form['nc_requirement_references'],
"nc_further_references": request.form['nc_further_references'],
"nc_auditor_ofcaa": request.form['nc_auditor_ofcaa'],
"nc_auditee_rfCAP": request.form['nc_auditee_rfCAP'],
"requirement_references": request.form['requirement_references'],
"nc_details": request.form['nc_details']
}}})
return redirect(url_for('main.show_audit', audit_id=audit_id))
# New NC Form
class AddCAForm(FlaskForm):
ca_description = StringField('Corrective Action Description', validators=[InputRequired("Please enter description!")])
ca_date_of_capapproval = DateTimeField('Date of CAP approval', validators=[InputRequired("Please enter date!")])
ca_due_date = DateTimeField('Due Date', validators=[InputRequired("Please enter due date!")])
ca_contact_person = StringField('Contact Person', validators=[InputRequired("Please enter contact!")])
ca_closure_date = DateTimeField('Closure Date', validators=[InputRequired("Please enter due date!")])
ca_due_date_history = TextAreaField('Due Date History', validators=[InputRequired("Please enter due date!")])
submit = SubmitField("Submit")
@mod_main.route('/<string:audit_id>/add_ca_form', methods=['GET', 'POST'])
def add_ca_form(audit_id):
form = AddCAForm()
if request.method == 'GET':
audit = mongo.db.audits.find({"_id": ObjectId(audit_id)})
return render_template('mod_main/add_ca_form.html', audit=audit, form=form)
elif request.method == 'POST':
# print "post request"
mongo.db.correctiveactions.update({"_id": ObjectId(audit_id)}, {"$set": {
"ca_description": request.form['ca_description'],
"ca_date_of_capapproval": request.form['ca_date_of_capapproval'],
"ca_due_date": request.form['ca_due_date'],
"ca_contact_person": request.form['ca_contact_person'],
"ca_closure_date": request.form['ca_closure_date'],
"ca_due_date_history": request.form['ca_due_date_history']
}})
return redirect(url_for('main.show_nc', audit_id=audit_id))
@mod_main.route('/add_people_form', methods=['GET', 'POST'])
def add_people_form():
form = AddPeopleForm()
if request.method == 'GET':
reports = mongo.db.reports.find()
return render_template('mod_main/add_people_form.html', reports=reports, form=form)
elif request.method == 'POST' and form.validate_on_submit():
# print "post request"
mongo.db.reports.insert({
"firstname": request.form['firstname'],
"lastname": request.form['lastname']
})
# return "Form successfully submitted!"
return redirect(url_for('main.indexpage'))
# behet post request ne kete url
@mod_main.route('/remove/audit', methods=['POST'])
def remove_audit():
if request.method == 'POST':
audit_id = request.form['id']
mongo.db.audits.remove({"_id": ObjectId(audit_id)})
return Response(json.dumps({"removed": True}), mimetype='application/json')
@mod_main.route('/remove/report', methods=['POST'])
def remove_report():
if request.method == 'POST':
report_id = request.form['id']
mongo.db.reports.remove({"_id": ObjectId(report_id)})
return Response(json.dumps({"removed": True}), mimetype='application/json')
@mod_main.route('/show_audit/<string:audit_id>', methods=['GET', 'POST'])
def show_audit(audit_id):
form = AddAuditForm()
if request.method == 'GET':
audit = mongo.db.audits.find_one({"_id": ObjectId(audit_id)})
return render_template('mod_main/audit_details.html', audit=audit, form=form)
@mod_main.route('/edit/<string:audit_id>', methods=['GET', 'POST'])
def edit_audit(audit_id):
form = AddAuditForm()
if request.method == 'GET':
audit = mongo.db.audits.find_one({"_id": ObjectId(audit_id)})
return render_template('mod_main/audit_edit.html', audit=audit, form=form)
elif request.method == 'POST':
audit = mongo.db.audits.find_one({"_id": ObjectId(audit_id)})
mongo.db.audits.update({"_id": ObjectId(audit_id)}, {"$set": {
"audit_ref_num": request.form['audit_ref_num'],
"audit_title": request.form['audit_title'],
"audit_type": request.form['audit_type'],
"audit_organization": request.form['audit_organization'],
"audit_start_date": request.form['audit_start_date'],
"audit_end_date": request.form['audit_end_date'],
"audit_auditee": request.form['audit_auditee'],
"audit_place": request.form['audit_place'],
"audit_frefnum": request.form['audit_frefnum'],
"audit_chrefnum": request.form['audit_chrefnum'],
"audit_tl": request.form['audit_tl'],
"audit_tm": request.form['audit_tm'],
"audit_ap": request.form['audit_ap']
}})
return redirect(url_for('main.show_audit', audit_id= audit_id))
# return 'Showing result ' + str(result)
@mod_main.route('/show_report/<string:report_id>', methods=['GET'])
def show_report(report_id):
result = mongo.db.reports.find_one({"_id": ObjectId(report_id)})
return 'Showing result ' + str(result)
@mod_main.route('/add-people', methods=['GET', 'POST'])
def add_people():
# TODO: Implement POST REQUEST
# if success:
form = AddPeopleForm()
reports = mongo.db.reports.find();
if request.method == 'GET':
return render_template('mod_main/index.html', form=form, reports=reports)
elif request.method == 'POST':
# Get form
form = AddPeopleForm()
# Get form data
data = form.data
# Add document to the database
added_report_id = mongo.db.reports.insert(data)
# Get the added document
report_doc = mongo.db.reports.find_one({"_id": ObjectId(added_report_id)})
# Return a json response
return Response(json_util.dumps(report_doc),mimetype="application/json")
else:
return Response(json_util.dumps({"error":"Something went wrong!"}),mimetype="application/json")
@mod_main.route('/add-audit', methods=['GET', 'POST'])
def add_audit():
# TODO: Implement POST REQUEST
# if success:
form = AddAuditForm()
if request.method == 'POST':
if form.validate() == False:
# flash('All fields are required!')
audits = mongo.db.audits.find()
return render_template('mod_main/add_audit.html', audits=audits, form=form)
else:
mongo.db.audits.insert({
"audit_title": request.form['audit_title'],
"audit_ref_num": request.form['audit_ref_num'],
"audit_start_date": request.form['audit_start_date']
})
return redirect(url_for('main.audit_list'))
elif request.method == 'GET':
return render_template('mod_main/add_audit.html', form=form)
# views for new bootstrap admin dashboard theme template
@mod_main.route('/corrective_actions', methods=['GET', 'POST'])
def corrective_actions():
# audits = mongo.db.audits.find()
return render_template('mod_main/corrective_actions.html')
@mod_main.route('/forms', methods=['GET', 'POST'])
def forms():
# audits = mongo.db.audits.find()
return render_template('mod_main/forms.html')
@mod_main.route('/blank-page', methods=['GET', 'POST'])
def blank_page():
# audits = mongo.db.audits.find()
return render_template('mod_main/blank-page.html')
| cc0-1.0 | -1,095,109,134,127,927,400 | 42.844633 | 136 | 0.644611 | false |
theDarkForce/websearch | webseach_book.py | 1 | 2428 | # -*- coding: UTF-8 -*-
# webseach
# create at 2015/10/30
# autor: qianqians
import sys
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('../')
from webget import gethtml
import pymongo
from doclex import doclex
import time
collection_key = None
def seach(urllist):
def process_keyurl(keyurl):
if keyurl is not None:
for key, urllist in keyurl.iteritems():
for url in urllist:
urlinfo = gethtml.process_url(url)
if urlinfo is None:
continue
list, keyurl1 = urlinfo
if list is not None:
gethtml.collection.insert({'key':key, 'url':url, 'timetmp':time.time()})
if keyurl1 is not None:
process_keyurl(keyurl1)
def process_urllist(url_list):
for url in url_list:
#print url,"sub url"
urlinfo = gethtml.process_url(url)
if urlinfo is None:
continue
list, keyurl = urlinfo
if list is not None:
process_urllist(list)
if keyurl is not None:
process_keyurl(keyurl)
time.sleep(0.1)
suburl = []
subkeyurl = {}
for url in urllist:
print url, "root url"
urlinfo = gethtml.process_url(url)
if urlinfo is None:
continue
list, keyurl = urlinfo
suburl.extend(list)
subkeyurl.update(keyurl)
try:
process_urllist(suburl)
process_keyurl(subkeyurl)
except:
import traceback
traceback.print_exc()
urllist = ["http://www.qidian.com/Default.aspx",
"http://www.zongheng.com/",
"http://chuangshi.qq.com/"
]
def refkeywords():
c = collection_key.find()
keywords = []
for it in c:
keywords.append(it["key"])
doclex.keykorks = keywords
if __name__ == '__main__':
conn = pymongo.Connection('localhost',27017)
db = conn.webseach
gethtml.collection = db.webpage
gethtml.collection_url_profile = db.urlprofile
gethtml.collection_url_title = db.urltitle
collection_key = db.keys
t = 0
while True:
timetmp = time.time()-t
if timetmp > 86400:
refkeywords()
t = time.time()
#urllist = seach(urllist)
seach(urllist) | bsd-2-clause | 5,170,723,839,560,860,000 | 21.700935 | 96 | 0.543657 | false |
mmw125/MuDimA | server/database_reader.py | 1 | 7747 | """Functions for reading from the database."""
import constants
import database_utils
import models
def get_urls():
"""Get all of the urls in articles in the database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT link FROM article;")
urls = set(item[0] for item in cursor.fetchall())
cursor.execute("SELECT link FROM bad_article;")
return urls.union(item[0] for item in cursor.fetchall())
def get_number_topics(category=None):
"""Get just the number of topics from the database."""
with database_utils.DatabaseConnection() as (connection, cursor):
if category is None:
cursor.execute("SELECT 1 FROM article, topic WHERE article.topic_id = topic.id AND "
"article.topic_id IS NOT NULL GROUP BY topic.id ORDER BY count(*) DESC;")
else:
cursor.execute("SELECT 1 FROM article, topic WHERE article.topic_id = topic.id AND article.category = ? AND"
" article.topic_id IS NOT NULL GROUP BY topic.id ORDER BY count(*) DESC;", (category,))
return len(cursor.fetchall())
def get_topics(category=None, page_number=0, articles_per_page=constants.ARTICLES_PER_PAGE):
"""Get the topics for the given page."""
with database_utils.DatabaseConnection() as (connection, cursor):
start = page_number * articles_per_page
end = (page_number + 1) * articles_per_page
total_items = get_number_topics()
if category is None:
cursor.execute("SELECT topic.name, topic.id, topic.image_url, topic.category, count(*) FROM article, topic "
"WHERE article.topic_id = topic.id AND article.topic_id IS NOT NULL "
"GROUP BY topic.id ORDER BY count(*) DESC;")
else:
cursor.execute("SELECT topic.name, topic.id, topic.image_url, topic.category, count(*) FROM article, topic "
"WHERE article.topic_id = topic.id AND topic.category = ? AND article.topic_id IS NOT NULL "
"GROUP BY topic.id ORDER BY count(*) DESC;", (category,))
return sorted([{"total_items": total_items, "title": item[0], "id": item[1],
"image": item[2], "category": item[3], "count": item[4]}
for item in cursor.fetchall()[start:end]], key=lambda x: -x["count"])
def get_sources():
"""Get all of the stories for the topic with the given topic id. Returns empty dict if topic not in database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT source, count(1) FROM article GROUP BY source")
return cursor.fetchall()
def get_stories_for_topic(topic_id):
"""Get all of the stories for the topic with the given topic id. Returns empty dict if topic not in database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name FROM topic WHERE id=?", (topic_id,))
db_item = cursor.fetchone()
if db_item is not None:
title = db_item[0]
cursor.execute("SELECT name, link, image_url, group_fit_x, group_fit_y, popularity, source, favicon "
"FROM article WHERE topic_id=?",
(topic_id,))
items = cursor.fetchall()
else:
title, items = None, []
return {"title": title, "articles": [{"name": item[0], "link": item[1], "image": item[2], "x": item[3],
"y": item[4], "popularity": item[5], "source": item[6], "favicon": item[7]
} for item in items]}
def get_ungrouped_articles():
"""Get the items in the database and puts them into Article and Grouping objects."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name, link, article_text FROM article "
"WHERE article_text != '' AND topic_id IS NULL;")
articles = []
for item in cursor.fetchall():
name, url, article_text = item
articles.append(models.Article(url=url, title=name, text=article_text, in_database=True,
keywords=_get_article_keywords(url, cursor)))
return articles
def get_top_keywords(num=constants.DEFAULT_NUM_KEYWORDS):
"""Get the top keywords used in the database."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT keyword, COUNT(1) AS c FROM keyword GROUP BY keyword ORDER BY c DESC LIMIT ?;", (num,))
return [item[0] for item in cursor.fetchall()]
def get_groups_with_unfit_articles():
"""Get the ids of the groups in the database that have articles that are not fit."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT topic_id FROM article WHERE group_fit_x IS NULL AND topic_id IS NOT NULL "
"GROUP BY topic_id;")
return [i[0] for i in cursor.fetchall()]
def get_number_articles_without_overall_fit():
"""Get the number of articles in the database without an overall fit."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT topic_id FROM article WHERE group_fit_x IS NULL AND topic_id IS NOT NULL;")
return len(cursor.fetchall())
def _get_article_keywords(article_url, cursor):
"""Get the keywords for the given article."""
cursor.execute("SELECT keyword FROM keyword WHERE article_link = ?;", (article_url,))
return set(item[0] for item in cursor.fetchall())
def get_grouped_articles():
"""Get the items in the database and puts them into Article and Grouping objects."""
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name, topic_id, link, article_text, image_url FROM article "
"WHERE article_text != '' AND topic_id IS NOT NULL;")
groups = {}
for item in cursor.fetchall():
name, id, url, article_text, image_url = item
article = models.Article(url=url, title=name, text=article_text, urlToImage=image_url, in_database=True)
article.set_keywords(_get_article_keywords(url, cursor))
if id in groups:
groups.get(id).add_article(article, new_article=False)
else:
groups[id] = models.Grouping(article, uuid=id, in_database=True, has_new_articles=False)
return list(groups.values())
def get_articles(keyword, page=0, limit=10, order_by=None, descending=True):
"""Get the items in the database and puts them into Article and Grouping objects."""
order_by = "date" if order_by is None else order_by
with database_utils.DatabaseConnection() as (connection, cursor):
cursor.execute("SELECT name, link, image_url, fit_x, fit_y, popularity, source, favicon "
"FROM keyword JOIN article ON keyword.article_link = article.link "
"WHERE keyword = ? OR ? GROUP BY article_link ORDER BY ? DESC;",
(keyword, keyword is None, order_by))
items = [item for item in cursor.fetchall()]
num_items = len(items)
if not descending:
items.reverse()
start = limit * page
items = items[start:start + limit]
return {"num": num_items, "articles": [{
"name": item[0], "link": item[1], "image": item[2], "x": item[3], "y": item[4],
"popularity": item[5], "source": item[6], "favicon": item[7]} for item in items]}
| gpl-3.0 | 4,023,521,665,624,149,500 | 51.70068 | 120 | 0.61469 | false |
squirrelo/qiita | qiita_pet/handlers/study_handlers/tests/test_prep_template.py | 1 | 2221 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from json import loads
from qiita_pet.test.tornado_test_base import TestHandlerBase
class TestNewPrepTemplateAjax(TestHandlerBase):
def test_get(self):
response = self.get('/study/new_prep_template/', {'study_id': '1'})
self.assertEqual(response.code, 200)
class TestPrepTemplateGraphAJAX(TestHandlerBase):
def test_get(self):
response = self.get('/prep/graph/', {'prep_id': 1})
self.assertEqual(response.code, 200)
exp = {"status": "success",
"node_labels": [[1, "Raw data 1 - FASTQ"],
[3, "Demultiplexed 2 - Demultiplexed"],
[2, "Demultiplexed 1 - Demultiplexed"],
[4, "BIOM - BIOM"],
[5, "BIOM - BIOM"],
[6, "BIOM - BIOM"]],
"message": "",
"edge_list": [[1, 3], [1, 2], [2, 4], [2, 5], [2, 6]]}
obs = loads(response.body)
self.assertEqual(obs['status'], exp['status'])
self.assertEqual(obs['message'], exp['message'])
self.assertItemsEqual(obs['node_labels'], exp['node_labels'])
self.assertItemsEqual(obs['edge_list'], exp['edge_list'])
class TestPrepTemplateAJAXReadOnly(TestHandlerBase):
def test_get(self):
response = self.get('/study/description/prep_template/',
{'prep_id': 1, 'study_id': 1})
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
class TestPrepFilesHandler(TestHandlerBase):
def test_get_files_not_allowed(self):
response = self.post(
'/study/prep_files/',
{'type': 'BIOM', 'prep_file': 'uploaded_file.txt', 'study_id': 1})
self.assertEqual(response.code, 405)
if __name__ == "__main__":
main()
| bsd-3-clause | -1,560,902,962,253,008,600 | 38.660714 | 79 | 0.52724 | false |
andrew-rogers/DSP | GPS/file_reader.py | 1 | 1929 | #!/usr/bin/env python3
"""Global Position System (GPS) file reader for captured IQ signal
The Standard Positioning Service (SPS) spec can be found at
https://www.navcen.uscg.gov/pubs/gps/sigspec/gpssps1.pdf
"""
# Copyright (c) 2021 Andrew Rogers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
class FileReader :
# Data file available from https://sourceforge.net/projects/gnss-sdr/files/data/
def __init__( self, filename='2013_04_04_GNSS_SIGNAL_at_CTTC_SPAIN/2013_04_04_GNSS_SIGNAL_at_CTTC_SPAIN.dat') :
self.offset = 0
self.filename = filename
def read( self, num_samples ) :
data=np.fromfile(self.filename, dtype=np.int16, offset=self.offset, count=num_samples*2)
self.offset = self.offset + 2 * len(data)
# Convert values to complex
data=data.reshape(num_samples,2)
data=np.matmul(data,[1,1j])
return data
| gpl-3.0 | -1,833,778,985,490,122,800 | 39.1875 | 115 | 0.733022 | false |
renzon/pypratico | setup.py | 1 | 4933 | import codecs
import os
import sys
from distutils.util import convert_path
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ["*.py", "*.pyc", "*$py.class", "*~", ".*", "*.bak"]
standard_exclude_directories = [
".*", "CVS", "_darcs", "./build", "./dist", "EGG-INFO", "*.egg-info"
]
# (c) 2005 Ian Bicking and contributors; written for Paste (
# http://pythonpaste.org)
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where=".",
package="",
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{"package": [files]}
Where ``files`` is a list of all the files in that package that
don"t match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won"t be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren"t
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), "", package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, "__init__.py"))
and not prefix):
if not package:
new_package = name
else:
new_package = package + "." + name
stack.append((fn, "", new_package, False))
else:
stack.append(
(fn, prefix + name + "/", package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
PACKAGE = "pypraticot6"
DESCRIPTION = "Pacote de exemplo do curso pypratico"
NAME = PACKAGE
AUTHOR = "Renzo Nuccitelli"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/renzon/pypratico"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="AGPL",
url=URL,
packages=find_packages(exclude=["tests.*", "tests"]),
package_data=find_package_data(PACKAGE, only_in_packages=False),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3 or "
"later (AGPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Paste",
],
zip_safe=False,
install_requires=[
'requests>=2.13.0'
]
)
| agpl-3.0 | 6,054,291,991,308,348,000 | 33.739437 | 77 | 0.550578 | false |
syndbg/ubuntu-make | tests/medium/test_web.py | 1 | 1847 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for web category"""
from . import ContainerTests
import os
from ..large import test_web
class FirefoxDevContainer(ContainerTests, test_web.FirefoxDevTests):
"""This will test the Firefox dev integration inside a container"""
TIMEOUT_START = 20
TIMEOUT_STOP = 10
def setUp(self):
self.hostname = "download.mozilla.org"
self.port = "443"
super().setUp()
# override with container path
self.installed_path = os.path.expanduser("/home/{}/tools/web/firefox-dev".format(self.DOCKER_USER))
class VisualStudioCodeContainer(ContainerTests, test_web.VisualStudioCodeTest):
"""This will test the Visual Studio Code integration inside a container"""
TIMEOUT_START = 20
TIMEOUT_STOP = 10
def setUp(self):
self.hostname = "code.visualstudio.com"
self.port = "443"
self.apt_repo_override_path = os.path.join(self.APT_FAKE_REPO_PATH, 'vscode')
super().setUp()
# override with container path
self.installed_path = os.path.expanduser("/home/{}/tools/web/visual-studio-code".format(self.DOCKER_USER))
| gpl-3.0 | 7,249,825,882,999,387,000 | 33.849057 | 114 | 0.707093 | false |
gurneyalex/odoo | addons/auth_signup/models/res_partner.py | 4 | 7625 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
import werkzeug.urls
from collections import defaultdict
from datetime import datetime, timedelta
from odoo import api, exceptions, fields, models, _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for _ in range(20))
def now(**kwargs):
return datetime.now() + timedelta(**kwargs)
class ResPartner(models.Model):
_inherit = 'res.partner'
signup_token = fields.Char(copy=False, groups="base.group_erp_manager")
signup_type = fields.Char(string='Signup Token Type', copy=False, groups="base.group_erp_manager")
signup_expiration = fields.Datetime(copy=False, groups="base.group_erp_manager")
signup_valid = fields.Boolean(compute='_compute_signup_valid', string='Signup Token is Valid')
signup_url = fields.Char(compute='_compute_signup_url', string='Signup URL')
@api.depends('signup_token', 'signup_expiration')
def _compute_signup_valid(self):
dt = now()
for partner, partner_sudo in zip(self, self.sudo()):
partner.signup_valid = bool(partner_sudo.signup_token) and \
(not partner_sudo.signup_expiration or dt <= partner_sudo.signup_expiration)
def _compute_signup_url(self):
""" proxy for function field towards actual implementation """
result = self.sudo()._get_signup_url_for_action()
for partner in self:
if any(u.has_group('base.group_user') for u in partner.user_ids if u != self.env.user):
self.env['res.users'].check_access_rights('write')
partner.signup_url = result.get(partner.id, False)
def _get_signup_url_for_action(self, url=None, action=None, view_type=None, menu_id=None, res_id=None, model=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
res = dict.fromkeys(self.ids, False)
for partner in self:
base_url = partner.get_base_url()
# when required, make sure the partner has a valid signup token
if self.env.context.get('signup_valid') and not partner.user_ids:
partner.sudo().signup_prepare()
route = 'login'
# the parameters to encode for the query
query = dict(db=self.env.cr.dbname)
signup_type = self.env.context.get('signup_force_type_in_url', partner.sudo().signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.sudo().signup_token and signup_type:
query['token'] = partner.sudo().signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
if url:
query['redirect'] = url
else:
fragment = dict()
base = '/web#'
if action == '/mail/view':
base = '/mail/view?'
elif action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['res_id'] = res_id
if fragment:
query['redirect'] = base + werkzeug.urls.url_encode(fragment)
url = "/web/%s?%s" % (route, werkzeug.urls.url_encode(query))
if not self.env.context.get('relative_url'):
url = werkzeug.urls.url_join(base_url, url)
res[partner.id] = url
return res
def action_signup_prepare(self):
return self.signup_prepare()
def signup_get_auth_param(self):
""" Get a signup token related to the partner if signup is enabled.
If the partner already has a user, get the login parameter.
"""
if not self.env.user.has_group('base.group_user') and not self.env.is_admin():
raise exceptions.AccessDenied()
res = defaultdict(dict)
allow_signup = self.env['res.users']._get_signup_invitation_scope() == 'b2c'
for partner in self:
partner = partner.sudo()
if allow_signup and not partner.user_ids:
partner.signup_prepare()
res[partner.id]['auth_signup_token'] = partner.signup_token
elif partner.user_ids:
res[partner.id]['auth_login'] = partner.user_ids[0].login
return res
def signup_cancel(self):
return self.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
def signup_prepare(self, signup_type="signup", expiration=False):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self:
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(token):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
@api.model
def _signup_retrieve_partner(self, token, check_validity=False, raise_exception=False):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner = self.search([('signup_token', '=', token)], limit=1)
if not partner:
if raise_exception:
raise exceptions.UserError(_("Signup token '%s' is not valid") % token)
return False
if check_validity and not partner.signup_valid:
if raise_exception:
raise exceptions.UserError(_("Signup token '%s' is no longer valid") % token)
return False
return partner
@api.model
def signup_retrieve_info(self, token):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(token, raise_exception=True)
res = {'db': self.env.cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = res['login'] = partner.email or ''
return res
| agpl-3.0 | 5,222,862,158,435,340,000 | 42.323864 | 119 | 0.590557 | false |
importre/kotlin-unwrap | utils/gen.py | 1 | 1264 | #! /usr/bin/env python3
import os
impl = '''
class Unwrap(private var valid: Boolean) {
infix fun <R> nah(f: () -> R) {
if (!valid) f()
}
}
'''
template = '''
inline fun <{0}, R> unwrap(
{1},
block: ({0}) -> R): Unwrap {{
val valid = null !in arrayOf{4}({2})
if (valid) block({3})
return Unwrap(valid = valid)
}}
'''
if __name__ == '__main__':
max = 5
root = os.path.join('src', 'main', 'kotlin', '')
path = [i[0] for i in os.walk(root)
if i[0].endswith(os.sep + 'unwrap')][0].replace(root, '')
codes = ['package {}\n'.format(path.replace(os.sep, '.')), impl]
for iter in range(1, max + 1):
types = ', '.join(['T{}'.format(i + 1) for i in range(iter)])
params = ', '.join(['t{0}: T{0}?'.format(i + 1) for i in range(iter)])
args1 = ', '.join(['t{}'.format(i + 1) for i in range(iter)])
args2 = ', '.join(['t{}!!'.format(i + 1) for i in range(iter)])
arrayType = '<Any?>' if (iter == 1) else ''
code = template.format(types, params, args1, args2, arrayType)
codes.append(code)
filename = os.path.join(root, path, 'Unwrap.kt')
with open(filename, 'w') as fout:
fout.write(''.join(codes).strip() + '\n')
pass
| apache-2.0 | 3,446,911,921,353,584,000 | 29.095238 | 78 | 0.508703 | false |
arenadata/ambari | ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout.py | 1 | 2241 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
from resource_management.core.resources import Directory
from resource_management.core.resources import File
from resource_management.libraries.functions import format
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions import lzo_utils
from resource_management.libraries.resources import XmlConfig
def mahout():
import params
# ensure that matching LZO libraries are installed for Mahout
lzo_utils.install_lzo_if_needed()
Directory( params.mahout_conf_dir,
create_parents = True,
owner = params.mahout_user,
group = params.user_group
)
XmlConfig("yarn-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['yarn-site'],
configuration_attributes=params.config['configuration_attributes']['yarn-site'],
owner=params.yarn_user,
group=params.user_group,
mode=0644
)
if not is_empty(params.log4j_props):
File(format("{params.mahout_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.mahout_user,
content=params.log4j_props
)
elif (os.path.exists(format("{params.mahout_conf_dir}/log4j.properties"))):
File(format("{params.mahout_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.mahout_user
)
| apache-2.0 | -7,583,274,711,952,001,000 | 34.015625 | 92 | 0.722445 | false |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/tests/unit/gapic/googleads.v6/services/test_mobile_app_category_constant_service.py | 1 | 31876 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v6.resources.types import mobile_app_category_constant
from google.ads.googleads.v6.services.services.mobile_app_category_constant_service import MobileAppCategoryConstantServiceClient
from google.ads.googleads.v6.services.services.mobile_app_category_constant_service import transports
from google.ads.googleads.v6.services.types import mobile_app_category_constant_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(None) is None
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert MobileAppCategoryConstantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_mobile_app_category_constant_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = MobileAppCategoryConstantServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_mobile_app_category_constant_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = MobileAppCategoryConstantServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = MobileAppCategoryConstantServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_mobile_app_category_constant_service_client_get_transport_class():
transport = MobileAppCategoryConstantServiceClient.get_transport_class()
assert transport == transports.MobileAppCategoryConstantServiceGrpcTransport
transport = MobileAppCategoryConstantServiceClient.get_transport_class("grpc")
assert transport == transports.MobileAppCategoryConstantServiceGrpcTransport
@mock.patch.object(MobileAppCategoryConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MobileAppCategoryConstantServiceClient))
def test_mobile_app_category_constant_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.MobileAppCategoryConstantServiceClient.get_transport_class') as gtc:
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = MobileAppCategoryConstantServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.MobileAppCategoryConstantServiceClient.get_transport_class') as gtc:
client = MobileAppCategoryConstantServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = MobileAppCategoryConstantServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = MobileAppCategoryConstantServiceClient()
@mock.patch.object(MobileAppCategoryConstantServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MobileAppCategoryConstantServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_mobile_app_category_constant_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_mobile_app_category_constant_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = MobileAppCategoryConstantServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_mobile_app_category_constant(transport: str = 'grpc', request_type=mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest):
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_mobile_app_category_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mobile_app_category_constant.MobileAppCategoryConstant(
resource_name='resource_name_value',
id=205,
name='name_value',
)
response = client.get_mobile_app_category_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, mobile_app_category_constant.MobileAppCategoryConstant)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.name == 'name_value'
def test_get_mobile_app_category_constant_from_dict():
test_get_mobile_app_category_constant(request_type=dict)
def test_get_mobile_app_category_constant_field_headers():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_mobile_app_category_constant),
'__call__') as call:
call.return_value = mobile_app_category_constant.MobileAppCategoryConstant()
client.get_mobile_app_category_constant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_mobile_app_category_constant_flattened():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_mobile_app_category_constant),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = mobile_app_category_constant.MobileAppCategoryConstant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_mobile_app_category_constant(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_mobile_app_category_constant_flattened_error():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_mobile_app_category_constant(
mobile_app_category_constant_service.GetMobileAppCategoryConstantRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = MobileAppCategoryConstantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.MobileAppCategoryConstantServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.MobileAppCategoryConstantServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_mobile_app_category_constant_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.MobileAppCategoryConstantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_mobile_app_category_constant',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_mobile_app_category_constant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v6.services.services.mobile_app_category_constant_service.transports.MobileAppCategoryConstantServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.MobileAppCategoryConstantServiceTransport()
adc.assert_called_once()
def test_mobile_app_category_constant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
MobileAppCategoryConstantServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_mobile_app_category_constant_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.MobileAppCategoryConstantServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_mobile_app_category_constant_service_host_no_port():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_mobile_app_category_constant_service_host_with_port():
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_mobile_app_category_constant_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.MobileAppCategoryConstantServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.MobileAppCategoryConstantServiceGrpcTransport])
def test_mobile_app_category_constant_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.MobileAppCategoryConstantServiceGrpcTransport,])
def test_mobile_app_category_constant_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_mobile_app_category_constant_path():
mobile_app_category_id = "squid"
expected = "mobileAppCategoryConstants/{mobile_app_category_id}".format(mobile_app_category_id=mobile_app_category_id, )
actual = MobileAppCategoryConstantServiceClient.mobile_app_category_constant_path(mobile_app_category_id)
assert expected == actual
def test_parse_mobile_app_category_constant_path():
expected = {
"mobile_app_category_id": "clam",
}
path = MobileAppCategoryConstantServiceClient.mobile_app_category_constant_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_mobile_app_category_constant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = MobileAppCategoryConstantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = MobileAppCategoryConstantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = MobileAppCategoryConstantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = MobileAppCategoryConstantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = MobileAppCategoryConstantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = MobileAppCategoryConstantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = MobileAppCategoryConstantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = MobileAppCategoryConstantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = MobileAppCategoryConstantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = MobileAppCategoryConstantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = MobileAppCategoryConstantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.MobileAppCategoryConstantServiceTransport, '_prep_wrapped_messages') as prep:
client = MobileAppCategoryConstantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.MobileAppCategoryConstantServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = MobileAppCategoryConstantServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 | -7,187,149,030,105,611,000 | 45.331395 | 241 | 0.693719 | false |
obspy/TauPy | taupy/tests/test_TauP_Time.py | 1 | 5761 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file tests the TauP_Time utility against the original TauPy using
both the high-level tau interface of TauPy and the java-like old script-based
interface.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import *
import inspect
import os
import unittest
import sys
import subprocess
from taupy.tau import TauPyModel
from taupy.TauP_Time import TauP_Time
# Most generic way to get the data folder path.
DATA = os.path.join(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe()))), "data", "TauP_test_data")
def parse_taup_time_output(filename):
with open(filename, "rt") as fh:
data_started = False
arrivals = []
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith("-------"):
data_started = True
continue
if data_started is False:
continue
l = [_i.strip() for _i in line.split() if _i != "="]
arrivals.append({
"distance": float(l[0]),
"depth": float(l[1]),
"phase_name": str(l[2]),
"time": float(l[3]),
"ray_param": float(l[4]),
"takeoff": float(l[5]),
"incident": float(l[6]),
"purist_distance": float(l[7]),
"purist_name": str(l[8]),
})
return arrivals
def compare_arrivals_with_taup_time_output(arrivals, filename):
filename = os.path.join(DATA, filename)
expected_arrivals = parse_taup_time_output(filename)
arrivals = [
{
"distance": _i.getModuloDistDeg(),
"depth": _i.sourceDepth,
"phase_name": _i.phase.name,
"time": _i.time,
"ray_param": _i.rayParam_sec_degree,
"takeoff": _i.takeoffAngle,
"incident": _i.incidentAngle,
"purist_distance": _i.getDistDeg(),
"purist_name": _i.puristName
} for _i in arrivals]
# Sort both by time.
expected_arrivals = sorted(expected_arrivals, key=lambda x: x["time"])
arrivals = sorted(arrivals, key=lambda x: x["time"])
assert len(expected_arrivals) == len(arrivals)
for e_arr, arr in zip(expected_arrivals, arrivals):
assert sorted(e_arr.keys()) == sorted(arr.keys())
for key, value in e_arr.items():
if isinstance(value, float):
# Estimate the precision in the taup output.
v = str(value)
prec = len(v) - v.find(".") - 1
assert value == round(arr[key], prec)
else:
assert value == arr[key]
def test_all_phases_iasp91_35_deg_distance():
"""
Tests tauptime at 35 degree distance, phases ttall.
"""
model = TauPyModel("iasp91")
tts = model.get_travel_times(source_depth_in_km=10.0,
distance_in_degree=35.0)
compare_arrivals_with_taup_time_output(
tts, "taup_time_-h_10_-ph_ttall_-deg_35")
class TestTauPTime(unittest.TestCase):
# For some reason this test throws nosetests off if not in the unittest
# framwork like the test above...?
def test_all_phases_ak135_35_deg_distance(self):
"""
Tests tauptime at 35 degree distance for the ak135 model, phases ttall.
"""
model = TauPyModel("ak135")
tts = model.get_travel_times(source_depth_in_km=10.0,
distance_in_degree=35.0)
compare_arrivals_with_taup_time_output(
tts, "taup_time_-h_10_-ph_ttall_-deg_35_-mod_ak135")
def test_range(self):
"""
Check taup_time output for a range of inputs against the Java output.
"""
if not os.path.isfile("data/java_tauptime_testoutput"):
subprocess.call("./generate_tauptime_output.sh", shell=True)
stdout = sys.stdout
with open('data/taup_time_test_output', 'wt') as sys.stdout:
for degree in [0, 45, 90, 180, 360, 560]:
for depth in [0, 100, 1000, 2889]:
tauptime = TauP_Time(degrees=degree, depth=depth,
modelName="iasp91",
phaseList=["ttall"])
tauptime.run(printOutput=True)
sys.stdout = stdout
# Using ttall need to sort; or lines with same arrival times are in
# different order. With explicit names of all the phases might not be
# a problem.
subprocess.check_call("./compare_tauptime_outputs.sh", shell=True)
# Use this if lines are in same order:
#subprocess.check_call("diff -wB data/java_tauptime_testoutput "
# "taup_time_test_output", shell=True)
os.remove("data/taup_time_test_output")
def test_degree_distance_from_coords(self):
"""
Test the calculation of spherical distance from given coordinates.
"""
tt = TauP_Time(depth=143.2, phaseList=["ttall"],
coordinate_list=[13, 14, 50, 200])
tt.run()
self.assertEqual(tt.degrees, 116.77958601543997)
def test_MCM_model(self):
"""
Test Taup_Time for the MCM_MPS05_XPYQ_C1D2L_S09-M2.tvel model.
"""
mcm = TauPyModel("MCM_MPS05_XPYQ_C1D2L_S09-M2.tvel")
times = mcm.get_travel_times(300, 180)
compare_arrivals_with_taup_time_output(times,
"taup_time_MCM_testfile")
if __name__ == '__main__':
unittest.main(buffer=True)
| gpl-3.0 | -677,136,912,718,723,700 | 36.167742 | 79 | 0.559104 | false |
josiah-wolf-oberholtzer/supriya | supriya/ugens/dynamics.py | 1 | 3847 | import collections
from supriya import CalculationRate
from supriya.synthdefs import PseudoUGen, UGen
from .delay import DelayN
class Amplitude(UGen):
"""
An amplitude follower.
::
>>> source = supriya.ugens.In.ar(0)
>>> amplitude = supriya.ugens.Amplitude.kr(
... attack_time=0.01, release_time=0.01, source=source,
... )
>>> amplitude
Amplitude.kr()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("attack_time", 0.01), ("release_time", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
class Compander(UGen):
"""
A general purpose hard-knee dynamics processor.
"""
_ordered_input_names = collections.OrderedDict(
[
("source", None),
("control", 0.0),
("threshold", 0.5),
("slope_below", 1.0),
("slope_above", 1.0),
("clamp_time", 0.01),
("relax_time", 0.1),
]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class CompanderD(PseudoUGen):
"""
A convenience constructor for Compander.
"""
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
source=None,
threshold=0.5,
clamp_time=0.01,
relax_time=0.1,
slope_above=1.0,
slope_below=1.0,
):
"""
Constructs an audio-rate dynamics processor.
.. container:: example
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> compander_d = supriya.ugens.CompanderD.ar(source=source,)
>>> supriya.graph(compander_d) # doctest: +SKIP
::
>>> print(compander_d)
synthdef:
name: d4e7b88df56af5070a88f09b0f8c633e
ugens:
- In.ar:
bus: 0.0
- DelayN.ar:
delay_time: 0.01
maximum_delay_time: 0.01
source: In.ar[0]
- Compander.ar:
clamp_time: 0.01
control: DelayN.ar[0]
relax_time: 0.1
slope_above: 1.0
slope_below: 1.0
source: In.ar[0]
threshold: 0.5
Returns ugen graph.
"""
control = DelayN.ar(
source=source, maximum_delay_time=clamp_time, delay_time=clamp_time
)
return Compander._new_expanded(
clamp_time=clamp_time,
calculation_rate=CalculationRate.AUDIO,
relax_time=relax_time,
slope_above=slope_above,
slope_below=slope_below,
source=source,
control=control,
threshold=threshold,
)
class Limiter(UGen):
"""
A peak limiter.
::
>>> source = supriya.ugens.In.ar(0)
>>> limiter = supriya.ugens.Limiter.ar(duration=0.01, level=1, source=source,)
>>> limiter
Limiter.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("level", 1), ("duration", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
class Normalizer(UGen):
"""
A dynamics flattener.
::
>>> source = supriya.ugens.In.ar(0)
>>> normalizer = supriya.ugens.Normalizer.ar(duration=0.01, level=1, source=source,)
>>> normalizer
Normalizer.ar()
"""
_ordered_input_names = collections.OrderedDict(
[("source", None), ("level", 1), ("duration", 0.01)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
| mit | 7,030,170,808,679,921,000 | 24.476821 | 92 | 0.494671 | false |
CDE-UNIBE/qcat | apps/search/search.py | 1 | 8779 | from functools import lru_cache
from django.conf import settings
from elasticsearch import TransportError
from questionnaire.models import Questionnaire
from .index import get_elasticsearch
from .utils import get_alias, ElasticsearchAlias
es = get_elasticsearch()
def get_es_query(
filter_params: list=None, query_string: str='',
match_all: bool=True) -> dict:
"""
Kwargs:
``filter_params`` (list): A list of filter parameters. Each
parameter is a tuple consisting of the following elements:
[0]: questiongroup
[1]: key
[2]: values (list)
[3]: operator
[4]: type (eg. checkbox / text)
``query_string`` (str): A query string for the full text search.
``match_all`` (bool): Whether the query MUST match all filters or not.
If not all filters must be matched, the results are ordered by relevance
to show hits matching more filters at the top. Defaults to False.
Returns:
``dict``. A dictionary containing the query to be passed to ES.
"""
if filter_params is None:
filter_params = []
es_queries = []
def _get_terms(qg, k, v):
return {
'terms': {
f'filter_data.{qg}__{k}': [v.lower()]
}
}
# Filter parameters: Nested subqueries to access the correct
# questiongroup.
for filter_param in list(filter_params):
if filter_param.type in [
'checkbox', 'image_checkbox', 'select_type', 'select_model',
'radio', 'bool']:
# So far, range operators only works with one filter value. Does it
# even make sense to have multiple of these joined by OR with the
# same operator?
if filter_param.operator in ['gt', 'gte', 'lt', 'lte']:
raise NotImplementedError(
'Filtering by range is not yet implemented.')
else:
if len(filter_param.values) > 1:
matches = [
_get_terms(filter_param.questiongroup,
filter_param.key, v) for v in
filter_param.values]
query = {
'bool': {
'should': matches
}
}
else:
query = _get_terms(
filter_param.questiongroup, filter_param.key,
filter_param.values[0])
es_queries.append(query)
elif filter_param.type in ['text', 'char']:
raise NotImplementedError(
'Filtering by text or char is not yet implemented/supported.')
elif filter_param.type in ['_date']:
raise NotImplementedError('Not yet implemented.')
elif filter_param.type in ['_flag']:
raise NotImplementedError('Not yet implemented.')
elif filter_param.type in ['_lang']:
es_queries.append({
'terms': {
'translations': [filter_param.values]
}
})
elif filter_param.type == '_edition':
es_queries.append({
'terms': {
'serializer_edition': [filter_param.values]
}
})
if query_string:
es_queries.append({
'multi_match': {
'query': get_escaped_string(query_string),
'fields': [
'list_data.name.*^4',
'list_data.definition.*',
'list_data.country'
],
'type': 'cross_fields',
'operator': 'and',
}
})
es_bool = 'must' if match_all is True else 'should'
if query_string == '':
# Default sort: By country, then by score.
sort = [
{
'list_data.country.keyword': {
'order': 'asc'
}
},
'_score',
]
else:
# If a phrase search is done, then only use the score to sort.
sort = ['_score']
return {
'query': {
'bool': {
es_bool: es_queries
}
},
'sort': sort,
}
def advanced_search(
filter_params: list=None, query_string: str='',
configuration_codes: list=None, limit: int=10,
offset: int=0, match_all: bool=True) -> dict:
"""
Kwargs:
``filter_params`` (list): A list of filter parameters. Each
parameter is a tuple consisting of the following elements:
[0]: questiongroup
[1]: key
[2]: values (list)
[3]: operator
[4]: type (eg. checkbox / text)
``query_string`` (str): A query string for the full text search.
``configuration_codes`` (list): An optional list of
configuration codes to limit the search to certain indices.
``limit`` (int): A limit of query results to return.
``offset`` (int): The number of query results to skip.
``match_all`` (bool): Whether the query MUST match all filters or not.
If not all filters must be matched, the results are ordered by relevance
to show hits matching more filters at the top. Defaults to False.
Returns:
``dict``. The search results as returned by
``elasticsearch.Elasticsearch.search``.
"""
query = get_es_query(
filter_params=filter_params, query_string=query_string,
match_all=match_all)
if configuration_codes is None:
configuration_codes = []
alias = get_alias(*ElasticsearchAlias.from_code_list(*configuration_codes))
return es.search(index=alias, body=query, size=limit, from_=offset)
def get_aggregated_values(
questiongroup, key, filter_type, filter_params: list=None,
query_string: str='', configuration_codes: list=None,
match_all: bool=True) -> dict:
if filter_params is None:
filter_params = []
# Remove the filter_param with the current questiongroup and key from the
# list of filter_params
relevant_filter_params = [
f for f in filter_params if
f.questiongroup != questiongroup and f.key != key]
query = get_es_query(
filter_params=relevant_filter_params, query_string=query_string,
match_all=match_all)
# For text values, use the keyword. This does not work for integer values
# (the way boolean values are stored).
# https://www.elastic.co/guide/en/elasticsearch/reference/current/fielddata.html
if filter_type == 'bool':
field = f'filter_data.{questiongroup}__{key}'
else:
field = f'filter_data.{questiongroup}__{key}.keyword'
query.update({
'aggs': {
'values': {
'terms': {
'field': field,
# Limit needs to be high enough to include all values.
'size': 1000,
}
}
},
'size': 0, # Do not include the actual hits
})
alias = get_alias(*ElasticsearchAlias.from_code_list(*configuration_codes))
es_query = es.search(index=alias, body=query)
buckets = es_query.get('aggregations', {}).get('values', {}).get('buckets', [])
return {b.get('key'): b.get('doc_count') for b in buckets}
def get_element(questionnaire: Questionnaire) -> dict:
"""
Get a single element from elasticsearch.
"""
alias = get_alias(
ElasticsearchAlias.from_configuration(configuration=questionnaire.configuration_object)
)
try:
return es.get_source(index=alias, id=questionnaire.pk, doc_type='questionnaire')
except TransportError:
return {}
def get_escaped_string(query_string: str) -> str:
"""
Replace all reserved characters when searching the ES index.
"""
for char in settings.ES_QUERY_RESERVED_CHARS:
query_string = query_string.replace(char, '\\{}'.format(char))
return query_string
@lru_cache(maxsize=1)
def get_indices_alias() -> list:
"""
Return a list of all elasticsearch index aliases. Only ES indices which
start with the QCAT prefix are respected. Editions are stripped away, only the 'type' of the
index / configuration is relevant.
"""
indices = []
for aliases in es.indices.get_alias('*').values():
for alias in aliases.get('aliases', {}).keys():
if settings.ES_INDEX_PREFIX not in alias:
continue
indices.append(alias.replace(settings.ES_INDEX_PREFIX, '').rsplit('_', 1)[0])
return indices
| apache-2.0 | 638,632,226,290,109,800 | 30.579137 | 96 | 0.552569 | false |
Lindy21/CSE498-LRS | oauth_provider/views.py | 1 | 8387 | from oauth.oauth import OAuthError
from django.conf import settings
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseRedirect, HttpResponseForbidden)
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import get_callable
from django.template import RequestContext
from utils import initialize_server_request, send_oauth_error
from decorators import oauth_required
from stores import check_valid_callback
from consts import OUT_OF_BAND
from django.utils.decorators import decorator_from_middleware
from django.shortcuts import render_to_response
from lrs.forms import AuthClientForm
from lrs.models import Token
OAUTH_AUTHORIZE_VIEW = 'OAUTH_AUTHORIZE_VIEW'
OAUTH_CALLBACK_VIEW = 'OAUTH_CALLBACK_VIEW'
INVALID_PARAMS_RESPONSE = send_oauth_error(OAuthError(
_('Invalid request parameters.')))
def oauth_home(request):
rsp = """
<html><head></head><body><h1>Oauth Authorize</h1></body></html>"""
return HttpResponse(rsp)
def request_token(request):
"""
The Consumer obtains an unauthorized Request Token by asking the Service
Provider to issue a Token. The Request Token's sole purpose is to receive
User approval and can only be used to obtain an Access Token.
"""
# If oauth is not enabled, don't initiate the handshake
if settings.OAUTH_ENABLED:
oauth_server, oauth_request = initialize_server_request(request)
if oauth_server is None:
return INVALID_PARAMS_RESPONSE
try:
# create a request token
token = oauth_server.fetch_request_token(oauth_request)
# return the token
response = HttpResponse(token.to_string(), mimetype="text/plain")
except OAuthError, err:
response = send_oauth_error(err)
return response
else:
return HttpResponseBadRequest("OAuth is not enabled. To enable, set the OAUTH_ENABLED flag to true in settings")
# tom c added login_url
@login_required(login_url="/XAPI/accounts/login")
def user_authorization(request):
"""
The Consumer cannot use the Request Token until it has been authorized by
the User.
"""
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
# get the request token
token = oauth_server.fetch_request_token(oauth_request)
# tom c .. we know user.. save it
token.user = request.user
token.save()
except OAuthError, err:
return send_oauth_error(err)
try:
# get the request callback, though there might not be one
callback = oauth_server.get_callback(oauth_request)
# OAuth 1.0a: this parameter should not be present on this version
if token.callback_confirmed:
return HttpResponseBadRequest("Cannot specify oauth_callback at authorization step for 1.0a protocol")
if not check_valid_callback(callback):
return HttpResponseBadRequest("Invalid callback URL")
except OAuthError:
callback = None
# OAuth 1.0a: use the token's callback if confirmed
if token.callback_confirmed:
callback = token.callback
if callback == OUT_OF_BAND:
callback = None
# entry point for the user
if request.method == 'GET':
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
authorize_view = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = token.key
return authorize_view(request, token, callback, params)
# user grant access to the service
if request.method == 'POST':
# verify the oauth flag set in previous GET
if request.session.get('oauth', '') == token.key:
request.session['oauth'] = ''
try:
form = AuthClientForm(request.POST)
if form.is_valid():
if int(form.cleaned_data.get('authorize_access', 0)):
# authorize the token
token = oauth_server.authorize_token(token, request.user)
# return the token key
s = form.cleaned_data.get('scopes', '')
if isinstance(s, (list, tuple)):
s = ",".join([v.strip() for v in s])
# changed scope, gotta save
if s:
token.scope = s
token.save()
args = { 'token': token }
else:
args = { 'error': _('Access not granted by user.') }
else:
# try to get custom authorize view
authorize_view_str = getattr(settings, OAUTH_AUTHORIZE_VIEW,
'oauth_provider.views.fake_authorize_view')
try:
authorize_view = get_callable(authorize_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % authorize_view_str
params = oauth_request.get_normalized_parameters()
# set the oauth flag
request.session['oauth'] = token.key
return authorize_view(request, token, callback, params, form)
except OAuthError, err:
response = send_oauth_error(err)
if callback:
if "?" in callback:
url_delimiter = "&"
else:
url_delimiter = "?"
if 'token' in args:
query_args = args['token'].to_string(only_key=True)
else: # access is not authorized i.e. error
query_args = 'error=%s' % args['error']
response = HttpResponseRedirect('%s%s%s' % (callback, url_delimiter, query_args))
else:
# try to get custom callback view
callback_view_str = getattr(settings, OAUTH_CALLBACK_VIEW,
'oauth_provider.views.fake_callback_view')
try:
callback_view = get_callable(callback_view_str)
except AttributeError:
raise Exception, "%s view doesn't exist." % callback_view_str
response = callback_view(request, **args)
else:
response = send_oauth_error(OAuthError(_('Action not allowed.')))
return response
def access_token(request):
"""
The Consumer exchanges the Request Token for an Access Token capable of
accessing the Protected Resources.
"""
oauth_server, oauth_request = initialize_server_request(request)
if oauth_request is None:
return INVALID_PARAMS_RESPONSE
try:
# get the request token
token = oauth_server.fetch_access_token(oauth_request)
# return the token
response = HttpResponse(token.to_string(), mimetype="text/plain")
except OAuthError, err:
response = send_oauth_error(err)
return response
def authorize_client(request, token=None, callback=None, params=None, form=None):
if not form:
form = AuthClientForm(initial={'scopes': token.scope_to_list(),
'obj_id': token.pk})
d = {}
d['form'] = form
d['name'] = token.consumer.name
d['description'] = token.consumer.description
d['params'] = params
return render_to_response('oauth_authorize_client.html', d, context_instance=RequestContext(request))
def callback_view(request, **args):
d = {}
if 'error' in args:
d['error'] = args['error']
d['verifier'] = args['token'].verifier
return render_to_response('oauth_verifier_pin.html', args, context_instance=RequestContext(request))
| apache-2.0 | 868,409,682,363,586,200 | 41.573604 | 120 | 0.595088 | false |
joshuaunderwood7/HaskeLinGeom | pysrc/LG/Board.py | 1 | 2993 | def indexToLocation(x):
return ( (8-(x%8)) , (int(x/8)+1) )
class Location:
def __init__(self, x=1, y=1, z=1):
self.x = x
self.y = y
self.z = z
def parseStr(self, inStr):
inStr = inStr[1:-1]
inStr = inStr.split(',')
self.x = int(inStr[0])
self.y = int(inStr[1])
self.z = int(inStr[2])
def arrayShift(self):
self.x-=1
self.y-=1
self.z-=1
return self
def shiftBack(self):
self.x+=1
self.y+=1
self.z+=1
return self
def __repr__(self):
return '(' + str(self.x) + ', ' + \
str(self.y) + ', ' + \
str(self.z) + ')'
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(str(self))
class Board:
def __init__(self, minx=1, maxx=1, miny=1, maxy=1, minz=1, maxz=1):
"""Default board is 1x1x1 and filled with #'s"""
self.minX = minx
self.maxX = maxx
self.minY = miny
self.maxY = maxy
self.minZ = minz
self.maxZ = maxz
self.locations = set()
for loc in [ (x,y,z) for z in range(minz, maxz+1) for y in range(miny, maxy+1) for x in range(minx, maxx+1)]:
self.locations.add(loc)
def fill(self, locations):
"""give a Location to assign to each square"""
self.locations.union(locations)
return self
def canAccess(self, location):
return (location in self.locations)
def get(self, location):
if self.canAccess(location):
return '#'
return ''
def set(self, location):
self.locations.add(location)
return self
def rangeOfX(self):
"""Return an eager list of X values"""
return range(self.minX, self.maxX+1)
def rangeOfY(self):
"""Return an eager list of Y values"""
return range(self.minY, self.maxY+1)
def rangeOfZ(self):
"""Return an eager list of Z values"""
return range(self.minZ, self.maxZ+1)
def __repr__(self):
returnString = "loacations = set("
for loc in self.locations:
returnString += srt(loc) + ", "
returnString += ")"
return returnString
def __eq__(self, other):
return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def getDistanceboard(self):
return Board(maxx=((self.maxX*2)-1), maxy=((self.maxY*2)-1), maxz=((self.maxZ*2)-1))
def middle(self):
"""Only returns approximate middle of the distance Board"""
return Location(self.maxX, self.maxY, self.maxZ)
chessboard = Board(maxx= 8, maxy=8)
distanceboard = chessboard.getDistanceboard()
chessboard3D = Board(maxx= 8, maxy=8, maxz=8)
| gpl-3.0 | -2,676,868,010,432,471,000 | 26.712963 | 117 | 0.539926 | false |
sole/high-fidelity | test/marionette/test_app.py | 1 | 2990 | from unittest import skip
from gaiatest import GaiaTestCase
class TestApp(GaiaTestCase):
"""Test standard app functionality like menu bar and tab-switching."""
popular_tab = ('css selector', '#popular-tab-container')
popular_tab_link = ('css selector', '#popular-tab a')
search_input = ('id', 'podcast-search')
search_tab = ('css selector', '#search-tab-container')
search_tab_link = ('css selector', '#search-tab a')
def setUp(self):
"""Run the standard Gaia setUp and open Podcasts for every test."""
GaiaTestCase.setUp(self)
# Launch the app!
self.app = self.apps.launch('Podcasts')
# Popular podcasts are on hold while we look for a new API.
@skip('Feature disabled until "popular" API/service is found.')
def test_popular_tab_exists(self):
"""Test the "Top Podcasts" tab.
Make sure activating the popular tab works and that the appropriate
DOM elements are in place.
"""
# Make sure the popular podcasts tab exists.
self.wait_for_element_displayed(*self.popular_tab_link)
popular_tab_link_element = self.marionette.find_element(
*self.popular_tab_link)
self.assertEqual(popular_tab_link_element.text, 'Popular',
'Popular tab link should exist')
# Clicking on the popular tab link should open the popular tab.
self.marionette.tap(popular_tab_link_element)
self.wait_for_element_displayed(*self.popular_tab)
self.assertTrue(self.marionette.find_element(*self.popular_tab)
.is_displayed(),
'Popular podcasts tab should appear when link is '
'tapped')
def test_search_tab_exists(self):
"""Test the Podcast search tab.
Make sure activating the search tab works and that the appropriate
DOM elements are in place.
"""
# Make sure the search tab exists.
self.wait_for_element_displayed(*self.search_tab_link)
search_tab_link_element = self.marionette.find_element(
*self.search_tab_link)
self.assertEqual(search_tab_link_element.text, 'Search',
'Search tab link should exist')
# Clicking on the search tab link should open the search tab.
self.marionette.tap(search_tab_link_element)
self.wait_for_element_displayed(*self.search_tab)
self.assertTrue(self.marionette.find_element(*self.search_tab)
.is_displayed(),
'Search tab should appear when link is tapped')
# Search field should have a placeholder value.
self.wait_for_element_displayed(*self.search_input)
self.assertTrue(self.marionette.find_element(*self.search_input)
.get_attribute('placeholder'),
'Search field should have a placeholder')
| mit | -3,328,228,603,148,573,000 | 42.333333 | 75 | 0.616722 | false |
i19870503/i19870503 | Python/eggnog2go_anno.py | 1 | 2591 | import os
import re
import pandas as pd
import string
import itertools
import numpy as np
import sys
import argparse
from collections import OrderedDict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create GO annotation and enrichment file')
parser.add_argument('-i',type=str,dest='infile',required=True,help="Input file")
parser.add_argument('-o',type=str,dest='out',required=True,help="Ouput file")
parser.add_argument('-db',type=str,dest='db',required=True,help="GO Database file")
args = parser.parse_args()
print (args)
def sort_uniq(sequence):
return (x[0] for x in itertools.groupby(sorted(sequence)))
path = "/home/zluna/Work/GO"
fout = open(args.out+"_anno.xls", 'w')
print("Gene_id", "GO_annotation", sep = '\t', file = fout)
go_db = pd.read_table(os.path.join(path, args.db), header = None)
eggout = pd.read_table(os.path.join(path, args.infile), header = None)
#pd.DataFrame.head(eggout)
#eggout.head(100)
dict = OrderedDict()
first_flag = 1
a = list(go_db[0])
for i in range(len(eggout)):
gene_id = eggout[0][i]
go_id = eggout[5][i]
if pd.isnull(eggout[5][i]):
go_id = ''
#print(gene_id, kegg_id, type(kegg_id), sep ='\t')
go_id = go_id.split(',')
if len(go_id) == 0:
continue
go_term = '; '.join(list(go_db[go_db[2].isin(go_id)][0]))
#print(gene_id, go_id, go_term, sep ='\t')
go_sum = []
sel_go_table = go_db[go_db[2].isin(go_id)]
for j in range(len(sel_go_table)):
go_sum.append(''.join(( list(sel_go_table[2])[j], "~", list(sel_go_table[0])[j])))
print(gene_id, str(go_sum).strip('[]').replace(']','').replace("'","").replace(", ","; "), sep = '\t', file = fout)
a = list(go_db[2])
### Use dictionary
for k in range(len(a)):
if str(go_sum).find(a[k]) != -1 :
if a[k] not in dict.keys():
### The value must be list type, if just give the 'gene_id' as the value of key, it can not use 'append' method to add the new 'gene_id' to the existing key.
dict[a[k]] = []
dict[a[k]].append(gene_id)
else:
dict[a[k]].append(gene_id)
#dict[a[j]] = [dict[a[j]], gene_id]
fout.close()
fout2 = open(args.out+"_enrich.xls", 'w')
print('GOID', 'Term', 'Genes', 'Gene_count', sep = '\t', file = fout2)
for key,values in dict.items():
print(key, list(go_db[go_db[2] == key][0]), str(values).strip('[]').replace(']','').replace("'",""), len(values), sep ='\t', file = fout2)
fout2.cloes()
| gpl-2.0 | -6,269,192,895,930,068,000 | 37.102941 | 157 | 0.580085 | false |
OCA/program | program_multi_menu_budget/program_result_region.py | 1 | 1213 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Savoir-faire Linux (<www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class program_result_region(orm.Model):
_inherit = 'program.result.region'
_columns = {
'top_level_menu_id': fields.many2one('ir.ui.menu', 'Top Level Menu'),
}
| agpl-3.0 | 8,760,379,846,450,138,000 | 38.129032 | 78 | 0.612531 | false |
avanzosc/avanzosc6.1 | avanzosc_tree_grid_ext/__openerp__.py | 1 | 1850 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2013 AvanzOSC S.L. All Rights Reserved
# Date: 01/07/2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "AvanzOSC - tree_grid extension",
"version": "1.0",
"depends": ["tree_grid","sale","purchase","stock","account","avanzosc_calculate_coeficient_udv_automatically"],
"author": "AvanzOSC S.L.",
"category": "Generic",
"description": """
Este módulo añade la unidad de venta, y cantidad de venta editables en los tree de
líneas de pedido de compra, y de venta, líneas de factura, y líneas de albaranes.
""",
"init_xml": [],
'update_xml': ['sale_order_view_ext.xml',
'purchase_order_view_ext.xml',
'stock_picking_view_ext.xml',
'account_invoice_view_ext.xml',
'product_product_view_ext.xml'
],
'demo_xml': [],
'installable': True,
'active': False,
# 'certificate': 'certificate',
} | agpl-3.0 | -7,973,430,295,569,346,000 | 40.954545 | 115 | 0.583198 | false |
rcwoolley/device-cloud-python | device_cloud/osal.py | 1 | 3073 | '''
Copyright (c) 2016-2017 Wind River Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
OR CONDITIONS OF ANY KIND, either express or implied.
'''
"""
Operating System Abstraction Layer (OSAL). This module provides abstractions of
functions that are different on different operating systems.
"""
import os
import platform
import subprocess
import sys
# Constants
NOT_SUPPORTED = -20
EXECUTION_FAILURE = -21
BAD_PARAMETER = -22
# Setup platform info statics
WIN32 = sys.platform.startswith('win32')
LINUX = sys.platform.startswith('linux')
MACOS = sys.platform.startswith('darwin')
POSIX = LINUX or MACOS
OTHER = not POSIX and not WIN32
# Define Functions
def execl(*args):
"""
Replaces the current process with a new instance of the specified
executable. This function will only return if there is an issue starting the
new instance, in which case it will return false. Otherwise, it will not
return.
"""
retval = EXECUTION_FAILURE
if POSIX:
os.execvp(args[0], args)
elif WIN32:
os.execvp(sys.executable, args)
else:
retval = NOT_SUPPORTED
return retval
def os_kernel():
"""
Get the operating system's kernel version
"""
ker = "Unknown"
if LINUX:
ker = platform.release()
elif WIN32 or MACOS:
ker = platform.version()
return ker
def os_name():
"""
Get the operating system name
"""
name = "Unknown"
if LINUX:
distro = platform.linux_distribution()
plat = subprocess.check_output(["uname", "-o"])[:-1].decode()
name = "{} ({})".format(distro[0], plat)
elif WIN32:
name = platform.system()
elif MACOS:
name = "macOS"
return name
def os_version():
"""
Get the operating system version
"""
ver = "Unknown"
if LINUX:
distro = platform.linux_distribution()
ver = "{}-{}".format(distro[1], distro[2])
elif WIN32:
ver = platform.release()
elif MACOS:
ver = platform.mac_ver()[0]
return ver
def system_reboot(delay=0, force=True):
"""
Reboot the system.
"""
return system_shutdown(delay=delay, reboot=True, force=force)
def system_shutdown(delay=0, reboot=False, force=True):
"""
Run the system shutdown command. Can be used to reboot the system.
"""
command = "shutdown "
if POSIX:
command += "-r " if reboot else "-h "
command += "now " if delay == 0 else "+{} ".format(delay)
elif WIN32:
command += "/r " if reboot else "/s "
command += "/t {} ".format(delay*60)
command += "/f" if force else ""
else:
return NOT_SUPPORTED
return os.system(command)
| apache-2.0 | 6,464,276,360,981,115,000 | 25.491379 | 84 | 0.633257 | false |
dnarvaez/virtualenv-bootstrap | bootstrap.py | 1 | 4429 | #!/usr/bin/env python3
# Copyright 2013 Daniel Narvaez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is from https://github.com/dnarvaez/virtualenv-bootstrap
import hashlib
import json
import os
import shutil
import subprocess
import sys
import tarfile
import urllib.request
base_dir = os.path.dirname(os.path.abspath(__file__))
environ_namespace = "TEST"
start_message = "Installing virtualenv"
end_message = "\n"
packages = ["osourcer"]
submodules = []
virtualenv_version = "1.8.4"
virtualenv_dir = "sandbox"
cache_dir = "cache"
run_module = "osourcer.tool"
etag = "1"
def get_cache_dir():
return os.path.join(base_dir, cache_dir)
def get_virtualenv_dir():
return os.path.join(base_dir, virtualenv_dir)
def get_stamp_path():
return get_virtualenv_dir() + ".stamp"
def get_bin_path(name):
return os.path.join(get_virtualenv_dir(), "bin", name)
def create_virtualenv():
source_dir = os.path.join(get_cache_dir(),
"virtualenv-%s" % virtualenv_version)
if not os.path.exists(source_dir):
url = "https://pypi.python.org/packages/source/v/" \
"virtualenv/virtualenv-%s.tar.gz" % virtualenv_version
f = urllib.request.urlopen(url)
with tarfile.open(fileobj=f, mode="r:gz") as tar:
tar.extractall(get_cache_dir())
subprocess.check_call(["python3",
os.path.join(source_dir, "virtualenv.py"),
"-q", get_virtualenv_dir()])
def get_submodule_dirs():
return [os.path.join(base_dir, submodule) for submodule in submodules]
def install_packages():
args = [get_bin_path("pip"), "-q", "install"]
args.extend(packages)
args.extend(get_submodule_dirs())
subprocess.check_call(args)
def upgrade_submodules():
args = [get_bin_path("pip"), "-q", "install", "--no-deps", "--upgrade"]
args.extend(get_submodule_dirs())
subprocess.check_call(args)
def compute_submodules_hash():
data = ""
for submodule in submodules:
for root, dirs, files in os.walk(os.path.join(base_dir, submodule)):
for name in files:
path = os.path.join(root, name)
mtime = os.lstat(path).st_mtime
data = "%s%s %s\n" % (data, mtime, path)
return hashlib.sha256(data.encode("utf-8")).hexdigest()
def check_stamp():
try:
with open(get_stamp_path()) as f:
stamp = json.load(f)
except (IOError, ValueError):
return True, True
return (stamp["etag"] != etag,
stamp["submodules_hash"] != compute_submodules_hash())
def write_stamp():
stamp = {"etag": etag,
"submodules_hash": compute_submodules_hash()}
with open(get_stamp_path(), "w") as f:
json.dump(stamp, f)
def update_submodules():
update = os.environ.get(environ_namespace + "_UPDATE_SUBMODULES", "yes")
if update != "yes":
return
os.chdir(base_dir)
for module in submodules:
subprocess.check_call(["git", "submodule", "update", "--init",
module])
def main():
os.environ["PIP_DOWNLOAD_CACHE"] = get_cache_dir()
os.environ[environ_namespace + "_BASE_DIR"] = base_dir
os.environ[environ_namespace + "_VIRTUALENV"] = get_virtualenv_dir()
etag_changed, submodules_changed = check_stamp()
if etag_changed:
print(start_message)
update_submodules()
try:
shutil.rmtree(get_virtualenv_dir())
except OSError:
pass
create_virtualenv()
install_packages()
write_stamp()
print(end_message)
elif submodules_changed:
upgrade_submodules()
write_stamp()
args = [get_bin_path("python3"), "-m", run_module]
if len(sys.argv) > 1:
args.extend(sys.argv[1:])
os.execl(args[0], *args)
if __name__ == "__main__":
main()
| apache-2.0 | 4,880,690,577,747,265,000 | 24.601156 | 76 | 0.621585 | false |
CCI-Tools/cate-core | cate/ops/index.py | 1 | 8641 |
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Index calculation operations
Functions
=========
"""
import xarray as xr
import pandas as pd
from cate.core.op import op, op_input
from cate.ops.select import select_var
from cate.ops.subset import subset_spatial
from cate.ops.anomaly import anomaly_external
from cate.core.types import PolygonLike, VarName, ValidationError
from cate.util.monitor import Monitor
_ALL_FILE_FILTER = dict(name='All Files', extensions=['*'])
@op(tags=['index'])
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('var', value_set_source='ds', data_type=VarName)
def enso_nino34(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate nino34 index, which is defined as a five month running mean of
anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170
lat_min=-5 lon_max=-120 lat_max=5.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable (geophysial quantity) to use for index
calculation.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset according to the given
threshold. Where anomaly larger than the positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
n34 = '-170, -5, -120, 5'
name = 'ENSO N3.4 Index'
return _generic_index_calculation(ds, var, n34, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
@op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom'])
@op_input('custom_region', data_type=PolygonLike)
def enso(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
region: str = 'n34',
custom_region: PolygonLike.TYPE = None,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ENSO index, which is defined as a five month running mean of
anomalies of monthly means of SST data in the given region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param region: Region for index calculation, the default is Nino3.4
:param custom_region: If 'custom' is chosen as the 'region', this parameter
has to be provided to set the desired region.
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries.
"""
regions = {'N1+2': '-90, -10, -80, 0',
'N3': '-150, -5, -90, 5',
'N3.4': '-170, -5, -120, 5',
'N4': '160, -5, -150, 5',
'custom': custom_region}
converted_region = PolygonLike.convert(regions[region])
if not converted_region:
raise ValidationError('No region has been provided to ENSO index calculation')
name = 'ENSO ' + region + ' Index'
if 'custom' == region:
name = 'ENSO Index over ' + PolygonLike.format(converted_region)
return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold, monitor)
@op(tags=['index'])
@op_input('var', value_set_source='ds', data_type=VarName)
@op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER])
def oni(ds: xr.Dataset,
var: VarName.TYPE,
file: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
Calculate ONI index, which is defined as a three month running mean of
anomalies of monthly means of SST data in the Nino3.4 region.
:param ds: A monthly SST dataset
:param file: Path to the reference data file e.g. a climatology. A suitable reference dataset
can be generated using the long_term_average operation
:param var: Dataset variable to use for index calculation
:param threshold: If given, boolean El Nino/La Nina timeseries will be
calculated and added to the output dataset, according to the given
threshold. Where anomaly larger than then positive value of the threshold
indicates El Nino and anomaly smaller than the negative of the given
threshold indicates La Nina.
:param monitor: a progress monitor.
:return: A dataset that containts the index timeseries
"""
n34 = '-170, -5, -120, 5'
name = 'ONI Index'
return _generic_index_calculation(ds, var, n34, 3, file, name, threshold, monitor)
def _generic_index_calculation(ds: xr.Dataset,
var: VarName.TYPE,
region: PolygonLike.TYPE,
window: int,
file: str,
name: str,
threshold: float = None,
monitor: Monitor = Monitor.NONE) -> pd.DataFrame:
"""
A generic index calculation. Where an index is defined as an anomaly
against the given reference of a moving average of the given window size of
the given given region of the given variable of the given dataset.
:param ds: Dataset from which to calculate the index
:param var: Variable from which to calculate index
:param region: Spatial subset from which to calculate the index
:param window: Window size for the moving average
:param file: Path to the reference file
:param threshold: Absolute threshold that indicates an ENSO event
:param name: Name of the index
:param monitor: a progress monitor.
:return: A dataset that contains the index timeseries
"""
var = VarName.convert(var)
region = PolygonLike.convert(region)
with monitor.starting("Calculate the index", total_work=2):
ds = select_var(ds, var)
ds_subset = subset_spatial(ds, region)
anom = anomaly_external(ds_subset, file, monitor=monitor.child(1))
with monitor.child(1).observing("Calculate mean"):
ts = anom.mean(dim=['lat', 'lon'])
df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time.values)
retval = df.rolling(window=window, center=True).mean().dropna()
if threshold is None:
return retval
retval['El Nino'] = pd.Series((retval[name] > threshold),
index=retval.index)
retval['La Nina'] = pd.Series((retval[name] < -threshold),
index=retval.index)
return retval
| mit | 3,199,853,828,415,307,300 | 43.312821 | 110 | 0.671566 | false |
los-cocos/etc_code | cocos#248--RectMapCollider, player sometimes stuck/start.py | 1 | 6979 | """
A script to demo a defect in RectMapCollider, initial report by Netanel at
https://groups.google.com/forum/#!topic/cocos-discuss/a494vcH-u3I
The defect is that the player gets stuck at some positions, and it was confirmed
for cocos master Aug 1, 2015 (292ae676) and cocos-0.6.3-release, see cocos #248
The package 'blinker' (available from pipy) is needed to run this script
Further investigation shows that this happens when both of this concur
1. the player actively pushes against a blocking surface
2. player rect alligns with the grid tile.
changes from the OP bugdemo code:
lines irrelevant to the bug removed
changed player controls
added a view to show the potentially colliding cells that RectMapCollider
will consider (sin as red rectangle overlapping the player)
player pic edited to make visible the actual player boundary
Controlling the player:
use left-right for horizontal move, must keep pressing to move
use up-down to move vertical; a press adds/substracts up to y-velocity
Demoing the bug:
1. move to touch the left wall.
2. release 'left' key
3. move up and down, this works
4. keep pressed the 'left' key, and try to move down: player gets stuck
at some alineations
scene
background
scroller=ScrollingManager
tilemap <- load(...)['map0']
layer=Game (a ScrollableLayer)
sprite
particles
potential collisions view, ShowCollision
"""
from __future__ import division, print_function
from cocos.particle_systems import *
from cocos.particle import Color
from cocos.text import Label
from cocos.tiles import load, RectMapLayer
from cocos.mapcolliders import RectMapWithPropsCollider
from cocos.layer import Layer, ColorLayer, ScrollingManager, ScrollableLayer
from cocos.sprite import Sprite
from cocos.actions import *
from cocos.scene import Scene
from cocos.director import director
from pyglet.window import key
from pyglet.window.key import symbol_string, KeyStateHandler
from menu import GameMenu
import blinker
director.init(width=1920, height=480, autoscale = True, resizable = True)
Map = load("mapmaking.tmx")
scroller = ScrollingManager()
tilemap = Map['map0']
assert tilemap.origin_x == 0
assert tilemap.origin_y == 0
class Background(ColorLayer):
def __init__(self):
super(Background, self).__init__(65,120,255,255)
class ShowCollision(ScrollableLayer):
"""
A layer to show the cells a RectMapCollider considers potentially
colliding with the 'new' rect.
Use with CustomRectMapCollider so the event of interest is published
"""
def __init__(self):
super(ShowCollision, self).__init__()
self.collision_view = []
for i in range(10):
self.collision_view.append(ColorLayer(255, 0, 0, 255, width=64, height=64))
for e in self.collision_view:
self.add(e)
signal = blinker.signal("collider cells")
signal.connect(self.on_collision_changed)
def on_collision_changed(self, sender, payload=None):
for cell, view in zip(payload, self.collision_view):
view.position = (cell.i * 64, cell.j * 64)
view.opacity = 140
for i in range(len(payload), len(self.collision_view)):
self.collision_view[i].opacity = 0
class Game(ScrollableLayer):
is_event_handler = True
def __init__(self):
super(Game, self).__init__()
self.score = 0
# Add player
self.sprite = Sprite('magic.png')
self.sprite.position = 320, 240
self.sprite.direction = "right"
self.sprite.dx = 0
self.sprite.dy = 0
self.add(self.sprite, z=1)
# A list of balls
self.balls = set()
# Teleportation counter
self.teleportation = 0
self.sprite.jump = 0
def on_key_press(self, inp, modifers):
if symbol_string(inp) == "LEFT":
self.sprite.dx -= 3
print("press left, dx:", self.sprite.dx)
if symbol_string(inp) == "RIGHT":
self.sprite.dx += 3
print("press right, dx:", self.sprite.dx)
if symbol_string(inp) == "UP":
self.sprite.dy += 3
if self.sprite.dy > 6:
self.sprite.dy = 6
print("press up, dy:", self.sprite.dy)
if symbol_string(inp) == "DOWN":
self.sprite.dy -= 3
if self.sprite.dy < -6:
self.sprite.dy = -6
print("press down, dy:", self.sprite.dy)
def on_key_release(self, inp, modifers):
if symbol_string(inp) == "LEFT":
self.sprite.dx = 0
print("release left, dx:", self.sprite.dx)
if symbol_string(inp) == "RIGHT":
self.sprite.dx = 0
print("release right, dx:", self.sprite.dx)
class SpyCollider(RectMapWithPropsCollider):
"""
Same as RectMapWithPropsCollider, except it publishes which cells will be considered
for collision.
Usage:
# istantiate
a = SpyCollider()
# set the behavior for velocity change on collision with
# a.on_bump_handler = a.on_bump_slide
# add the signal we want to emit
a.signal = blinker.signal("collider cells")
# use as stock RectMapCollider
# catch the signal with something like ShowCollision
"""
def collide_map(self, maplayer, last, new, vx, vy):
"""collide_map en dos pasadas; """
objects = maplayer.get_in_region(*(new.bottomleft + new.topright))
self.signal.send(payload=objects)
return super(SpyCollider, self).collide_map(maplayer, last, new, vx, vy)
layer = Game()
collider = SpyCollider()
collider.on_bump_handler = collider.on_bump_slide
collider.signal = blinker.signal("collider cells")
#collider = RectMapCollider()
# WARN: this was hacked for bugdemo purposes only; don't use in real code:
# lots of globals
# position delta must use dt, else unpredictable view velocity
def update(dt):
""" Update game"""
last = layer.sprite.get_rect()
new = last.copy()
new.x += layer.sprite.dx
new.y += layer.sprite.dy
# dont care about velocity, pass 0, 0
collider.collide_map(tilemap, last, new, 0.0, 0.0)
layer.sprite.position = new.center
scroller.set_focus(*new.center)
# Schedule Updates
layer.schedule(update)
# Add map to scroller
scroller.add(tilemap)
#Create Scene
scene = Scene()
# Create and add background
background = Background()
scene.add(background)
#Add main layer to scroller
scroller.add(layer)
scroller.add(ShowCollision())
# Add scroller to scene
scene.add(scroller)
# Game menu configuration
menu = GameMenu(scene)
menuScene = Scene()
menuScene.add(menu)
director.run(menuScene)
| mit | 2,921,396,511,402,230,300 | 29.609649 | 91 | 0.640923 | false |
mozman/ezdxf | tests/test_01_dxf_entities/test_131_field_list.py | 1 | 2369 | # Copyright (c) 2019 Manfred Moitzi
# License: MIT License
from typing import cast
import pytest
import ezdxf
from ezdxf.entities.idbuffer import FieldList
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
FIELDLIST = """0
FIELDLIST
5
0
102
{ACAD_REACTORS
330
0
102
}
330
0
100
AcDbIdSet
90
12
100
AcDbFieldList
"""
@pytest.fixture
def entity():
return FieldList.from_text(FIELDLIST)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'FIELDLIST' in ENTITY_CLASSES
def test_default_init():
entity = FieldList()
assert entity.dxftype() == 'FIELDLIST'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = FieldList.new(handle='ABBA', owner='0', dxfattribs={
})
assert entity.dxf.flags == 0
assert len(entity.handles) == 0
def test_load_from_text(entity):
assert entity.dxf.flags == 12
assert len(entity.handles) == 0
def test_write_dxf():
entity = FieldList.from_text(FIELDLIST)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(FIELDLIST)
assert result == expected
@pytest.fixture(scope='module')
def doc():
return ezdxf.new('R2007')
def test_generic_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
assert len(field_list.handles) == 0
def test_set_get_field_list(doc):
field_list = doc.objects.new_entity('FIELDLIST', {})
assert field_list.dxftype() == 'FIELDLIST'
field_list.handles = ['FF', 'EE', 'DD']
handles = field_list.handles
assert len(handles) == 3
assert handles == ['FF', 'EE', 'DD']
handles.append('FFFF')
assert handles[-1] == 'FFFF'
def test_dxf_tags(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
tags = TagCollector.dxftags(buffer)[-4:]
assert len(tags) == 4
assert tags[0] == (330, 'FF')
assert tags[-1] == (330, 'CC')
def test_clone(doc):
buffer = cast(FieldList, doc.objects.new_entity('FIELDLIST', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
buffer2 = cast(FieldList, buffer.copy())
buffer2.handles[-1] = 'ABCD'
assert buffer.handles[:-1] == buffer2.handles[:-1]
assert buffer.handles[-1] != buffer2.handles[-1]
| mit | -7,176,212,482,512,180,000 | 21.561905 | 69 | 0.662727 | false |
krautradio/PyRfK | lib/rfk/database/base.py | 1 | 14479 | import time
import hashlib
from datetime import timedelta
from passlib.hash import bcrypt
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref, exc
from sqlalchemy.dialects.mysql import INTEGER as Integer
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql.expression import case
import re
import os
from flask.ext.login import AnonymousUserMixin
import rfk.database
from rfk.types import SET, ENUM
from rfk import exc as rexc
from rfk import CONFIG
from rfk.database import Base, UTCDateTime
from rfk.database.show import UserShow, Show
from rfk.helper import now, get_path
class Anonymous(AnonymousUserMixin):
def __init__(self):
AnonymousUserMixin.__init__(self)
self.locale = 'de'
self.timezone = 'Europe/Berlin'
def get_locale(self):
return self.locale
def get_timezone(self):
return self.timezone
def has_permission(self, code=None, permission=None):
return False
class User(Base):
__tablename__ = 'users'
user = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
username = Column(String(50), unique=True)
password = Column(String(64))
mail = Column(String(255))
country = Column(String(3))
register_date = Column(UTCDateTime, default=now)
last_login = Column(UTCDateTime, default=None)
def get_id(self):
return unicode(self.user)
def is_anonymous(self):
return False
def is_active(self):
return True
def is_authenticated(self):
return True
def get_locale(self):
return self.get_setting(code='locale')
def get_timezone(self):
return self.get_setting(code='timezone')
@staticmethod
def authenticate(username, password):
"""shorthand function for authentication a user
returns the user object
Keyword arguments:
username -- username
password -- unencrypted password
"""
user = User.get_user(username=username)
if user.check_password(password):
return user
else:
raise rexc.base.InvalidPasswordException()
@staticmethod
def get_user(id=None, username=None):
assert id or username
try:
if username is None:
return User.query.filter(User.user == id).one()
else:
return User.query.filter(User.username == username).one()
except exc.NoResultFound:
raise rexc.base.UserNotFoundException
@staticmethod
def check_username(username):
if re.match('^[0-9a-zA-Z_-]{3,}$', username) is None:
return False
else:
return True
@staticmethod
def make_password(password):
return bcrypt.encrypt(password)
@staticmethod
def add_user(username, password):
if not User.check_username(username):
raise rexc.base.InvalidUsernameException
try:
User.query.filter(User.username == username).one()
raise rexc.base.UserNameTakenException()
except exc.NoResultFound:
user = User(username=username, password=User.make_password(password))
rfk.database.session.add(user)
rfk.database.session.flush()
return user
def check_password(self, password):
try:
return bcrypt.verify(password, self.password)
except ValueError:
if hashlib.sha1(password).hexdigest() == self.password:
self.password = User.make_password(password)
return True
else:
return False
def add_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return False
except exc.NoResultFound:
self.permissions.append(UserPermission(permission))
return True
def has_permission(self, code=None, permission=None):
assert code or permission
if permission is None:
permission = Permission.get_permission(code)
try:
UserPermission.query.filter(UserPermission.user == self,
UserPermission.permission == permission) \
.one()
return True
except exc.NoResultFound:
return False
def get_setting(self, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
try:
us = UserSetting.query.filter(UserSetting.user == self,
UserSetting.setting == setting).one()
return us.get_value()
except exc.NoResultFound:
return None
def set_setting(self, value, setting=None, code=None):
assert setting or code
if setting is None:
setting = Setting.get_setting(code)
UserSetting.set_value(self, setting, value)
rfk.database.session.flush()
def get_total_streamtime(self):
"""Returns a timedelta Object with the users total time streamed"""
try:
return timedelta(seconds= float(rfk.database.session
.query( func.sum( func.time_to_sec( func.timediff(Show.end,Show.begin) ) ) ) \
.join(UserShow).filter(UserShow.status == UserShow.STATUS.STREAMED,
UserShow.user == self).first()[0]))
except TypeError:
return timedelta(seconds=0)
def __repr__(self):
return "<USER username={0}>".format(self.username)
class Setting(Base):
__tablename__ = 'settings'
setting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
val_type = Column(Integer(unsigned=True))
TYPES = ENUM(['INT', 'STR'])
@staticmethod
def get_setting(code):
return Setting.query.filter(Setting.code == code).one()
@staticmethod
def add_setting(code, name, val_type):
try:
return Setting.query.filter(Setting.code == code).one()
except exc.NoResultFound:
return Setting(code=code, name=name, val_type=val_type)
class UserSetting(Base):
__tablename__ = 'user_settings'
userSetting = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('settings'))
setting_id = Column("setting", Integer(unsigned=True),
ForeignKey('settings.setting',
onupdate="CASCADE",
ondelete="RESTRICT"))
setting = relationship("Setting")
val_int = Column(Integer)
val_str = Column(String(255))
def get_value(self):
if self.setting.val_type == Setting.TYPES.INT:
return self.val_int
elif self.setting.val_type == Setting.TYPES.STR:
return self.val_str
@staticmethod
def set_value(user, setting, value):
if value == True:
value = 1
elif value == False:
value = 0
try:
us = UserSetting.query.filter(UserSetting.user == user,
UserSetting.setting == setting).one()
except exc.NoResultFound:
us = UserSetting(user=user, setting=setting)
if us.setting.val_type == Setting.TYPES.INT:
us.val_int = value
elif us.setting.val_type == Setting.TYPES.STR:
us.val_str = value
class Permission(Base):
__tablename__ = 'permissions'
permission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
code = Column(String(25), unique=True)
name = Column(String(50))
@staticmethod
def get_permission(code):
return Permission.query.filter(Permission.code == code).one()
@staticmethod
def add_permission(code, name):
try:
return Permission.query.filter(Permission.code == code).one()
except exc.NoResultFound:
return Permission(code=code, name=name)
class UserPermission(Base):
__tablename__ = 'user_permissions'
userPermission = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('permissions', cascade="all, delete-orphan"))
permission_id = Column("permission", Integer(unsigned=True),
ForeignKey('permissions.permission',
onupdate="CASCADE",
ondelete="RESTRICT"))
permission = relationship("Permission", backref=backref('users', cascade="all, delete-orphan"))
def __init__(self, permission):
self.permission = permission
class Ban(Base):
__tablename__ = 'bans'
ban = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref=backref('bans'))
range = Column(String(50))
expiration = Column(UTCDateTime)
class News(Base):
__tablename__ = 'news'
news = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
time = Column(UTCDateTime, default=now())
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User")
title = Column(String(255))
content = Column(Text)
class ApiKey(Base):
__tablename__ = 'apikeys'
apikey = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
user_id = Column("user", Integer(unsigned=True), ForeignKey('users.user',
onupdate="CASCADE",
ondelete="RESTRICT"))
user = relationship("User", backref="apikeys")
key = Column(String(128))
counter = Column(Integer(unsigned=True), default=0)
access = Column(UTCDateTime, default=now())
application = Column(String(128))
description = Column(String(255))
flag = Column(Integer(unsigned=True), default=0)
FLAGS = SET(['DISABLED', 'FASTQUERY', 'KICK', 'BAN', 'AUTH'])
def gen_key(self):
c = 0
while True:
key = hashlib.sha1("%s%s%d%d" % (self.application, self.description, time.time(), c)).hexdigest()
if ApiKey.query.filter(ApiKey.key == key).first() == None:
break
self.key = key
@staticmethod
def check_key(key):
try:
apikey = ApiKey.query.filter(ApiKey.key == key).one()
except (exc.NoResultFound, exc.MultipleResultsFound):
raise rexc.api.KeyInvalidException()
if apikey.flag & ApiKey.FLAGS.DISABLED:
raise rexc.api.KeyDisabledException()
elif not apikey.flag & ApiKey.FLAGS.FASTQUERY:
if now() - apikey.access <= timedelta(seconds=1):
raise rexc.api.FastQueryException(last_access=apikey.access)
apikey.counter += 1
apikey.access = now()
return apikey
class Log(Base):
__tablename__ = 'log'
log = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
timestamp = Column(UTCDateTime, default=now)
severity = Column(Integer(unsigned=True))
module = Column(String(50))
message = Column(Text)
class Loop(Base):
__tablename__ = 'loops'
loop = Column(Integer(unsigned=True), primary_key=True, autoincrement=True)
begin = Column(Integer(unsigned=True), default=0)
end = Column(Integer(unsigned=True), default=1440)
filename = Column(String(50))
@hybrid_property
def length(self):
if (self.end >= self.begin):
return abs(self.end - self.begin)
else:
return abs((self.end + 2400) - self.begin)
@length.expression
def length(cls):
return func.abs(cast(case([(cls.begin <= cls.end, cls.end),
(cls.begin >= cls.end, cls.end + 2400)]), Integer) - cast(cls.begin, Integer))
@hybrid_method
def contains(self, point):
return case([(self.begin <= self.end, (self.begin <= point) & (self.end >= point)),
(self.begin >= self.end, (self.begin <= point) | (self.end >= point))])
@hybrid_property
def file_exists(self):
if self.filename is None:
return False
return os.path.exists(os.path.join(get_path(CONFIG.get('liquidsoap', 'looppath')), self.filename))
@staticmethod
def get_current_loop():
"""
returns the current loop to be scheduled
@todo maybe broken ;_;
"""
n = now()
#try to find a loop that should be running
loops = Loop.query.filter(Loop.contains(int(n.hour * 100 + (n.minute / 60.) * 100))).order_by(
Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
# we found no loops
# just try to find the longest one
loops = Loop.query.order_by(Loop.length.asc()).all()
for loop in loops:
if loop.file_exists:
return loop;
#okay, now we have a problem, just retun none
return None
| bsd-3-clause | 1,369,849,253,642,008,600 | 34.662562 | 113 | 0.58243 | false |
albertoferna/compmech | setup.py | 1 | 1198 | from glob import glob
import sys
import os
from subprocess import Popen
import numpy
#params = 'build_ext -inplace -IC:\clones\cubature\cubature ' + ' '.join(sys.argv[1:])
params = 'build_ext --inplace -I%s' % numpy.get_include() + ' '.join(sys.argv[1:]) + ' clean'
cwd = os.getcwd()
if os.name == 'nt':
use_sdk = 'DISTUTILS_USE_SDK'
if not use_sdk in os.environ.keys():
os.environ[use_sdk] = '1'
print('####################')
print('Compiling modules...')
print('####################')
print('')
basedirs = [
os.path.join('compmech', 'conecyl', 'clpt'),
os.path.join('compmech', 'conecyl', 'fsdt'),
os.path.join('compmech', 'integrate'),
os.path.join('compmech', 'conecyl', 'imperfections'),
os.path.join('compmech', 'aero', 'pistonplate', 'clpt'),
os.path.join('compmech', 'aero', 'pistonstiffpanel', 'clpt'),
]
for basedir in basedirs:
print('Compiling setup.py in %s' % basedir)
basedir = os.path.sep.join([cwd, basedir])
os.chdir(basedir)
for fname in glob('setup*.py'):
p = Popen(('python {} '.format(fname) + params), shell=True)
p.wait()
os.chdir(cwd)
| bsd-3-clause | 8,871,118,216,028,705,000 | 29.717949 | 93 | 0.569282 | false |
MicBrain/Scheme-Interpreter | scheme.py | 1 | 21214 | """This module implements the core Scheme interpreter functions, including the
eval/apply mutual recurrence, environment model, and read-eval-print loop.
"""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env):
"""Evaluate Scheme expression EXPR in environment ENV. If ENV is None,
simply returns EXPR as its value without further evaluation.
>>> expr = read_line("(+ 2 2)")
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
scnum(4)
"""
while env is not None:
# Note: until extra-credit problem 22 is complete, env will
# always be None on the second iteration of the loop, so that
# the value of EXPR is returned at that point.
if expr is None:
raise SchemeError("Cannot evaluate an undefined expression.")
# Evaluate Atoms
if scheme_symbolp(expr):
expr, env = env.lookup(expr).get_actual_value(), None
elif scheme_atomp(expr):
env = None
# All non-atomic expressions are lists.
elif not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
else:
first, rest = scheme_car(expr), scheme_cdr(expr)
# Evaluate Combinations
if (scheme_symbolp(first) # first might be unhashable
and first in SPECIAL_FORMS):
if proper_tail_recursion:
expr, env = SPECIAL_FORMS[first](rest, env)
else:
expr, env = SPECIAL_FORMS[first](rest, env)
expr, env = scheme_eval(expr, env), None
else:
procedure = scheme_eval(first, env)
args = procedure.evaluate_arguments(rest, env)
if proper_tail_recursion:
expr, env = procedure.apply(args, env)
else:
# UPDATED 4/14/2014 @ 19:08
expr, env = scheme_apply(procedure, args, env), None
return expr
proper_tail_recursion = True
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
# proper_tail_recursion = True
def scheme_apply(procedure, args, env):
"""Apply PROCEDURE (type Procedure) to argument values ARGS
in environment ENV. Returns the resulting Scheme value."""
# UPDATED 4/14/2014 @ 19:08
# Since .apply is allowed to do a partial evaluation, we finish up
# with a call to scheme_eval to complete the evaluation. scheme_eval
# will simply return expr if its env argument is None.
expr, env = procedure.apply(args, env)
return scheme_eval(expr, env)
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with a PARENT frame (that may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return "<Global Frame>"
else:
s = sorted('{0}: {1}'.format(k,v) for k,v in self.bindings.items())
return "<{{{0}}} -> {1}>".format(', '.join(s), repr(self.parent))
def __eq__(self, other):
return isinstance(other, Frame) and \
self.parent == other.parent
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found.
As a convenience, also accepts Python strings, which it turns into
symbols."""
if type(symbol) is str:
symbol = intern(symbol)
if symbol in self.bindings:
return self.bindings[symbol]
if self.parent is not None:
return self.parent.lookup(symbol)
raise SchemeError("unknown identifier: {0}".format(str(symbol)))
def global_frame(self):
"""The global environment at the root of the parent chain."""
e = self
while e.parent is not None:
e = e.parent
return e
def make_call_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in the Scheme formal parameter list FORMALS are bound to the Scheme
values in the Scheme value list VALS. Raise an error if too many or too
few arguments are given.
>>> env = create_global_frame()
>>> formals, vals = read_line("(a b c)"), read_line("(1 2 3)")
>>> env.make_call_frame(formals, vals)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
frame = Frame(self)
if len(formals) != len(vals):
raise SchemeError
for expression in range(len(formals)):
frame.define(formals[expression], vals[expression])
return frame
def define(self, sym, val):
"""Define Scheme symbol SYM to have value VAL in SELF. As a
convenience, SYM may be Python string, which is converted first
to a Scheme symbol. VAL must be a SchemeValue."""
assert isinstance(val, SchemeValue), "values must be SchemeValues"
if type(sym) is str:
sym = intern(sym)
self.bindings[sym] = val
#####################
# Procedures #
#####################
class Procedure(SchemeValue):
"""The superclass of all kinds of procedure in Scheme."""
# Arcane Technical Note: The odd placement of the import from scheme in
# evaluate_arguments is necessary because it introduces mutually recursive
# imports between this file and scheme.py. The effect of putting it
# here is that we delay attempting to access scheme.scheme_eval until
# after the scheme module's initialization is finished.
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
from scheme import scheme_eval
return arg_list.map(lambda operand: scheme_eval(operand, env))
class PrimitiveProcedure(Procedure):
"""A Scheme procedure defined as a Python function."""
def __init__(self, fn, use_env=False):
self.fn = fn
self.use_env = use_env
def __str__(self):
return '#[primitive]'
def __repr__(self):
return "PrimitiveProcedure({})".format(str(self))
def apply(self, args, env):
"""Apply a primitive procedure to ARGS in ENV. Returns
a pair (val, None), where val is the resulting value.
>>> twos = Pair(SchemeInt(2), Pair(SchemeInt(2), nil))
>>> plus = PrimitiveProcedure(scheme_add, False)
>>> plus.apply(twos, None)
(scnum(4), None)
"""
try:
converted_list = []
while args != nil:
converted_list.append(args.first)
args = args.second
if self.use_env:
converted_list.append(env)
val = self.fn(*converted_list)
return val, None
except TypeError:
raise SchemeError
class LambdaProcedure(Procedure):
"""A procedure defined by a lambda expression or the complex define form."""
def __init__(self, formals, body, env = None):
"""A procedure whose formal parameter list is FORMALS (a Scheme list),
whose body is the single Scheme expression BODY, and whose parent
environment is the Frame ENV. A lambda expression containing multiple
expressions, such as (lambda (x) (display x) (+ x 1)) can be handled by
using (begin (display x) (+ x 1)) as the body."""
self.formals = formals
self.body = body
self.env = env
def _symbol(self):
return 'lambda'
def __str__(self):
# UPDATED 4/16/2014 @ 13:20
return "({0} {1} {2})".format(self._symbol(),
str(self.formals), str(self.body))
def __repr__(self):
args = (self.formals, self.body, self.env)
return "{0}Procedure({1}, {2}, {3})".format(self._symbol().capitalize(),
*(repr(a) for a in args))
def __eq__(self, other):
return type(other) is type(self) and \
self.formals == other.formals and \
self.body == other.body and \
self.env == other.env
def apply(self, args, env):
environment = self.env.make_call_frame(self.formals, args)
if proper_tail_recursion:
return self.body, self.env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, self.env.make_call_frame(self.formals, args)), None
class MuProcedure(LambdaProcedure):
"""A procedure defined by a mu expression, which has dynamic scope.
"""
def _symbol(self):
return 'mu'
def apply(self, args, env):
if proper_tail_recursion:
return self.body, env.make_call_frame(self.formals, args)
else:
return scheme_eval(self.body, env.make_call_frame(self.formals, args)), None
# Call-by-name (nu) extension.
class NuProcedure(LambdaProcedure):
"""A procedure whose parameters are to be passed by name."""
def _symbol(self):
return 'nu'
def evaluate_arguments(self, arg_list, env):
"""Evaluate the expressions in ARG_LIST in ENV to produce
arguments for this procedure. Default definition for procedures."""
return arg_list.map(lambda operand: Thunk(nil, operand, env))
class Thunk(LambdaProcedure):
"""A by-name value that is to be called as a parameterless function when
its value is fetched to be used."""
def get_actual_value(self):
return scheme_eval(self.body, self.env)
#################
# Special forms #
#################
# All of the 'do_..._form' methods return a value and an environment,
# as for the 'apply' method on Procedures. That is, they either return
# (V, None), indicating that the value of the special form is V, or they
# return (Expr, Env), indicating that the value of the special form is what
# you would get by evaluating Expr in the environment Env.
def do_lambda_form(vals, env, function_type=LambdaProcedure):
"""Evaluate a lambda form with formals VALS[0] and body VALS.second
in environment ENV, create_global_frame eating a procedure of type FUNCTION_TYPE
(a subtype of Procedure)."""
check_form(vals, 2)
operands = vals.first
check_formals(operands)
body = vals.second
if len(body)!= 1:
return function_type(operands, Pair("begin", body), env), None
return function_type(operands, body.first, env), None
def do_mu_form(vals, env):
"""Evaluate a mu (dynamically scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=MuProcedure)
def do_nu_form(vals, env):
"""Evaluate a mu (call-by-name scoped lambda) form with formals VALS[0]
and body VALS.second in environment ENV."""
return do_lambda_form(vals, env, function_type=NuProcedure)
def do_define_form(vals, env):
"""Evaluate a define form with parameters VALS in environment ENV."""
check_form(vals, 2)
target = vals[0]
if scheme_symbolp(target):
check_form(vals, 2, 2)
env.define(target, scheme_eval(vals[1], env))
return (target, None)
elif scheme_pairp(target):
func_name = target.first
if isinstance(func_name, SchemeNumber) or isinstance(func_name, SchemeFloat):
raise SchemeError("bad argument to define")
lambda_vals = Pair(target.second, vals.second)
lambda_func = do_lambda_form(lambda_vals, env)[0]
env.define(func_name, lambda_func)
return func_name, None
else:
raise SchemeError("bad argument to define")
def do_quote_form(vals, env):
"""Evaluate a quote form with parameters VALS. ENV is ignored."""
check_form(vals, 1, 1)
return vals[0], None
def do_let_form(vals, env):
"""Evaluate a let form with parameters VALS in environment ENV."""
check_form(vals, 2)
bindings = vals[0]
exprs = vals.second
if not scheme_listp(bindings):
raise SchemeError("bad bindings list in let form")
# Add a frame containing bindings
names, values = nil, nil
for item in bindings:
values = Pair(scheme_eval(item.second.first, env), values)
names = Pair(item.first, names)
new_env = env.make_call_frame(names, values)
# Evaluate all but the last expression after bindings, and return the last
last = len(exprs)-1
for i in range(0, last):
scheme_eval(exprs[i], new_env)
return exprs[last], new_env
#########################
# Logical Special Forms #
#########################
def do_if_form(vals, env):
"""Evaluate if form with parameters VALS in environment ENV."""
check_form(vals, 2, 3)
if (scheme_eval(vals.first, env)):
return vals.second.first, env
elif len(vals) == 2:
return okay, None
return vals.second.second.first, env
def do_and_form(vals, env):
"""Evaluate short-circuited and with parameters VALS in environment ENV."""
if len(vals):
for i in range(len(vals) - 1):
if not(scheme_eval(vals[i], env)):
return scheme_false, None
return vals[len(vals) - 1], env
return scheme_true, None
def quote(value):
"""Return a Scheme expression quoting the Scheme VALUE.
>>> s = quote('hello')
>>> print(s)
(quote hello)
>>> scheme_eval(s, Frame(None)) # "hello" is undefined in this frame.
intern('hello')
"""
return Pair("quote", Pair(value, nil))
def do_or_form(vals, env):
"""Evaluate short-circuited or with parameters VALS in environment ENV."""
for value in vals:
eval_expression = scheme_eval(value, env)
if eval_expression:
return eval_expression, None
return scheme_false, None
def do_cond_form(vals, env):
"""Evaluate cond form with parameters VALS in environment ENV."""
num_clauses = len(vals)
for i, clause in enumerate(vals):
check_form(clause, 1)
if clause.first is else_sym:
if i < num_clauses-1:
raise SchemeError("else must be last")
test = scheme_true
if clause.second is nil:
raise SchemeError("badly formed else clause")
else:
test = scheme_eval(clause.first, env)
if test:
if len(clause.second) == 0:
return test, None
if len(clause.second) >= 2:
return Pair('begin', clause.second), env
return clause.second.first, env
return okay, None
def do_begin_form(vals, env):
"""Evaluate begin form with parameters VALS in environment ENV."""
check_form(vals, 0)
if scheme_nullp(vals):
return okay, None
for i in range(len(vals) - 1):
scheme_eval(vals[i], env)
return vals[len(vals) - 1], env
# Collected symbols with significance to the interpreter
and_sym = intern("and")
begin_sym = intern("begin")
cond_sym = intern("cond")
define_macro_sym = intern("define-macro")
define_sym = intern("define")
else_sym = intern("else")
if_sym = intern("if")
lambda_sym = intern("lambda")
let_sym = intern("let")
mu_sym = intern("mu")
nu_sym = intern("nu")
or_sym = intern("or")
quasiquote_sym = intern("quasiquote")
quote_sym = intern("quote")
set_bang_sym = intern("set!")
unquote_splicing_sym = intern("unquote-splicing")
unquote_sym = intern("unquote")
# Collected special forms
SPECIAL_FORMS = {
and_sym: do_and_form,
begin_sym: do_begin_form,
cond_sym: do_cond_form,
define_sym: do_define_form,
if_sym: do_if_form,
lambda_sym: do_lambda_form,
let_sym: do_let_form,
mu_sym: do_mu_form,
nu_sym: do_nu_form,
or_sym: do_or_form,
quote_sym: do_quote_form,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max = None):
"""Check EXPR (default SELF.expr) is a proper list whose length is
at least MIN and no more than MAX (default: no maximum). Raises
a SchemeError if this is not the case."""
if not scheme_listp(expr):
raise SchemeError("badly formed expression: " + str(expr))
length = len(expr)
if length < min:
raise SchemeError("too few operands in form")
elif max is not None and length > max:
raise SchemeError("too many operands in form")
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of formals
is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line("(a b c)"))
"""
seen_symbols = []
while len(formals):
if not(scheme_symbolp(formals.first)) or formals.first in seen_symbols:
raise SchemeError
seen_symbols.append(formals.first)
formals = formals.second
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, quiet=False, startup=False,
interactive=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(scstr(filename), True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
scheme_print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in err.args[0]):
raise
print("Error:", err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print("\nKeyboardInterrupt")
if not interactive:
return
except EOFError: # <Control>-D, etc.
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
vals = args[:-1]
raise SchemeError("wrong number of arguments to load: {0}".format(vals))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = intern(str(sym))
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(str(sym)) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env.global_frame(), quiet=quiet)
return okay
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define("eval", PrimitiveProcedure(scheme_eval, True))
env.define("apply", PrimitiveProcedure(scheme_apply, True))
env.define("load", PrimitiveProcedure(scheme_load, True))
for names, fn in get_primitive_bindings():
for name in names:
proc = PrimitiveProcedure(fn)
env.define(name, proc)
return env
@main
def run(*argv):
next_line = buffer_input
interactive = True
load_files = ()
if argv:
try:
filename = argv[0]
if filename == '-load':
load_files = argv[1:]
else:
input_file = open(argv[0])
lines = input_file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
except IOError as err:
print(err)
sys.exit(1)
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
| apache-2.0 | 1,368,474,208,198,917,400 | 35.139693 | 93 | 0.593335 | false |
beefoo/still-i-rise | collect_sound_data.py | 1 | 2076 | # -*- coding: utf-8 -*-
# Description: generate audio clips for lines, words, and syllables
import argparse
import json
import os
from pprint import pprint
import re
import subprocess
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="still_i_rise.wav", help="Path to input audio file file")
parser.add_argument('-pitch', dest="OUTPUT_PITCH_FILE", default="data/still_i_rise.Pitch", help="Path to output pitch data file")
parser.add_argument('-pulse', dest="OUTPUT_PULSE_FILE", default="data/still_i_rise.PointProcess", help="Path to output pulse data file")
parser.add_argument('-ts', dest="TIME_STEP", default="0.01", help="Time step in seconds")
parser.add_argument('-p0', dest="PITCH_FLOOR", default="70", help="Pitch floor in Hz")
parser.add_argument('-mc', dest="MAX_CANDIDATES", default="4", help="Maximum candidates per frame")
parser.add_argument('-va', dest="VERY_ACCURATE", default="on", help="Very accurate, on/off")
parser.add_argument('-st', dest="SILENCE_THRESHOLD", default="0.01", help="Silence threshold")
parser.add_argument('-vt', dest="VOICING_THRESHOLD", default="0.3", help="Voicing threshold")
parser.add_argument('-oc', dest="OCTAVE_COST", default="0.001", help="Octave cost")
parser.add_argument('-ojc', dest="OCTAVE_JUMP_COST", default="0.3", help="Octave jump cost")
parser.add_argument('-vc', dest="VOICED_COST", default="0.2", help="Voiced cost")
parser.add_argument('-p1', dest="PITCH_CEILING", default="400", help="Pitch ceiling in Hz")
# init input
args = parser.parse_args()
# cut the clip
command = ['Praat', '--run', 'collect_sound_data.praat', args.INPUT_FILE, args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE, args.TIME_STEP, args.PITCH_FLOOR, args.MAX_CANDIDATES, args.VERY_ACCURATE, args.SILENCE_THRESHOLD, args.VOICING_THRESHOLD, args.OCTAVE_COST, args.OCTAVE_JUMP_COST, args.VOICED_COST, args.PITCH_CEILING]
print "Running %s" % " ".join(command)
finished = subprocess.check_call(command)
print "Wrote data to %s and %s" % (args.OUTPUT_PITCH_FILE, args.OUTPUT_PULSE_FILE)
| mit | 2,398,684,887,728,343,000 | 55.108108 | 323 | 0.725915 | false |
googleapis/googleapis-gen | google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/job_service/transports/grpc.py | 1 | 23359 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.talent_v4beta1.types import job
from google.cloud.talent_v4beta1.types import job as gct_job
from google.cloud.talent_v4beta1.types import job_service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import JobServiceTransport, DEFAULT_CLIENT_INFO
class JobServiceGrpcTransport(JobServiceTransport):
"""gRPC backend transport for JobService.
A service handles job management, including job CRUD,
enumeration and search.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'jobs.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_job(self) -> Callable[
[job_service.CreateJobRequest],
gct_job.Job]:
r"""Return a callable for the create job method over gRPC.
Creates a new job.
Typically, the job becomes searchable within 10 seconds,
but it may take up to 5 minutes.
Returns:
Callable[[~.CreateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_job' not in self._stubs:
self._stubs['create_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/CreateJob',
request_serializer=job_service.CreateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['create_job']
@property
def batch_create_jobs(self) -> Callable[
[job_service.BatchCreateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch create jobs method over gRPC.
Begins executing a batch create jobs operation.
Returns:
Callable[[~.BatchCreateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_create_jobs' not in self._stubs:
self._stubs['batch_create_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchCreateJobs',
request_serializer=job_service.BatchCreateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_create_jobs']
@property
def get_job(self) -> Callable[
[job_service.GetJobRequest],
job.Job]:
r"""Return a callable for the get job method over gRPC.
Retrieves the specified job, whose status is OPEN or
recently EXPIRED within the last 90 days.
Returns:
Callable[[~.GetJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_job' not in self._stubs:
self._stubs['get_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/GetJob',
request_serializer=job_service.GetJobRequest.serialize,
response_deserializer=job.Job.deserialize,
)
return self._stubs['get_job']
@property
def update_job(self) -> Callable[
[job_service.UpdateJobRequest],
gct_job.Job]:
r"""Return a callable for the update job method over gRPC.
Updates specified job.
Typically, updated contents become visible in search
results within 10 seconds, but it may take up to 5
minutes.
Returns:
Callable[[~.UpdateJobRequest],
~.Job]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_job' not in self._stubs:
self._stubs['update_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/UpdateJob',
request_serializer=job_service.UpdateJobRequest.serialize,
response_deserializer=gct_job.Job.deserialize,
)
return self._stubs['update_job']
@property
def batch_update_jobs(self) -> Callable[
[job_service.BatchUpdateJobsRequest],
operations_pb2.Operation]:
r"""Return a callable for the batch update jobs method over gRPC.
Begins executing a batch update jobs operation.
Returns:
Callable[[~.BatchUpdateJobsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_update_jobs' not in self._stubs:
self._stubs['batch_update_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchUpdateJobs',
request_serializer=job_service.BatchUpdateJobsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['batch_update_jobs']
@property
def delete_job(self) -> Callable[
[job_service.DeleteJobRequest],
empty_pb2.Empty]:
r"""Return a callable for the delete job method over gRPC.
Deletes the specified job.
Typically, the job becomes unsearchable within 10
seconds, but it may take up to 5 minutes.
Returns:
Callable[[~.DeleteJobRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_job' not in self._stubs:
self._stubs['delete_job'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/DeleteJob',
request_serializer=job_service.DeleteJobRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_job']
@property
def batch_delete_jobs(self) -> Callable[
[job_service.BatchDeleteJobsRequest],
empty_pb2.Empty]:
r"""Return a callable for the batch delete jobs method over gRPC.
Deletes a list of [Job][google.cloud.talent.v4beta1.Job]s by
filter.
Returns:
Callable[[~.BatchDeleteJobsRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'batch_delete_jobs' not in self._stubs:
self._stubs['batch_delete_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/BatchDeleteJobs',
request_serializer=job_service.BatchDeleteJobsRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['batch_delete_jobs']
@property
def list_jobs(self) -> Callable[
[job_service.ListJobsRequest],
job_service.ListJobsResponse]:
r"""Return a callable for the list jobs method over gRPC.
Lists jobs by filter.
Returns:
Callable[[~.ListJobsRequest],
~.ListJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_jobs' not in self._stubs:
self._stubs['list_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/ListJobs',
request_serializer=job_service.ListJobsRequest.serialize,
response_deserializer=job_service.ListJobsResponse.deserialize,
)
return self._stubs['list_jobs']
@property
def search_jobs(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs that the caller
has permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs' not in self._stubs:
self._stubs['search_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobs',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs']
@property
def search_jobs_for_alert(self) -> Callable[
[job_service.SearchJobsRequest],
job_service.SearchJobsResponse]:
r"""Return a callable for the search jobs for alert method over gRPC.
Searches for jobs using the provided
[SearchJobsRequest][google.cloud.talent.v4beta1.SearchJobsRequest].
This API call is intended for the use case of targeting passive
job seekers (for example, job seekers who have signed up to
receive email alerts about potential job opportunities), and has
different algorithmic adjustments that are targeted to passive
job seekers.
This call constrains the
[visibility][google.cloud.talent.v4beta1.Job.visibility] of jobs
present in the database, and only returns jobs the caller has
permission to search against.
Returns:
Callable[[~.SearchJobsRequest],
~.SearchJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'search_jobs_for_alert' not in self._stubs:
self._stubs['search_jobs_for_alert'] = self.grpc_channel.unary_unary(
'/google.cloud.talent.v4beta1.JobService/SearchJobsForAlert',
request_serializer=job_service.SearchJobsRequest.serialize,
response_deserializer=job_service.SearchJobsResponse.deserialize,
)
return self._stubs['search_jobs_for_alert']
__all__ = (
'JobServiceGrpcTransport',
)
| apache-2.0 | 5,724,087,368,380,103,000 | 42.580224 | 87 | 0.609615 | false |
chungjjang80/FRETBursts | fretbursts/burstlib.py | 1 | 133746 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2013-2016 The Regents of the University of California,
# Antonino Ingargiola <[email protected]>
#
"""
This module contains all the main FRETBursts analysis functions.
`burstslib.py` defines the fundamental object `Data()` that contains both the
experimental data (attributes) and the high-level analysis routines (methods).
Furthermore it loads all the remaining **FRETBursts** modules (except for
`loaders.py`).
For usage example see the IPython Notebooks in sub-folder "notebooks".
"""
from __future__ import print_function, absolute_import, division
from future.utils import raise_from
from builtins import range, zip
import os
import hashlib
import numpy as np
import copy
from numpy import zeros, size, r_
import scipy.stats as SS
from .utils.misc import pprint, clk_to_s, deprecate
from .poisson_threshold import find_optimal_T_bga
from . import fret_fit
from . import bg_cache
from .ph_sel import Ph_sel
from .fretmath import gamma_correct_E, gamma_uncorrect_E
from .phtools import burstsearch as bslib
from .phtools.burstsearch import (
# Burst search function
bsearch,
# Photon counting function,
mch_count_ph_in_bursts
)
from .phtools import phrates
from . import background as bg
from . import select_bursts
from . import fit
from .fit.gaussian_fitting import (gaussian_fit_hist,
gaussian_fit_cdf,
two_gaussian_fit_hist,
two_gaussian_fit_hist_min,
two_gaussian_fit_hist_min_ab,
two_gaussian_fit_EM,
two_gauss_mix_pdf,
two_gauss_mix_ab,)
# Redefine some old functions that have been renamed so old scripts will not
# break but will print a warning
bg_calc_exp = deprecate(bg.exp_fit, 'bg_calc_exp', 'bg.exp_fit')
bg_calc_exp_cdf = deprecate(bg.exp_cdf_fit, 'bg_calc_exp_cdf', 'bg.exp_cdf_fit')
def _get_bsearch_func(pure_python=False):
if pure_python:
# return the python version
return bslib.bsearch_py
else:
# or what is available
return bsearch
def _get_mch_count_ph_in_bursts_func(pure_python=False):
if pure_python:
# return the python version
return bslib.mch_count_ph_in_bursts_py
else:
# or what is available
return mch_count_ph_in_bursts
def isarray(obj):
"""Test if the object support the array interface.
Returns True for numpy arrays and pandas sequences.
"""
return hasattr(obj, '__array__')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# BURST SELECTION FUNCTIONS
#
def Sel(d_orig, filter_fun, negate=False, nofret=False, **kwargs):
"""Uses `filter_fun` to select a sub-set of bursts from `d_orig`.
This function is deprecated. Use :meth:`Data.select_bursts` instead.
"""
d_sel = d_orig.select_bursts(filter_fun, negate=negate,
computefret=not nofret,
**kwargs)
return d_sel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Bursts and Timestamps utilities
#
def get_alex_fraction(on_range, alex_period):
"""Get the fraction of period beween two numbers indicating a range.
"""
assert len(on_range) == 2
if on_range[0] < on_range[1]:
fraction = (on_range[1] - on_range[0]) / alex_period
else:
fraction = (alex_period + on_range[1] - on_range[0]) / alex_period
return fraction
def top_tail(nx, a=0.1):
"""Return for each ch the mean size of the top `a` fraction.
nx is one of nd, na, nt from Data() (list of burst size in each ch).
"""
assert a > 0 and a < 1
return np.r_[[n[n > n.max() * (1 - a)].mean() for n in nx]]
##
# Per-burst quatitites from ph-data arrays (timestamps, lifetime, etc..)
#
def _excitation_width(excitation_range, alex_period):
"""Returns duration of alternation period outside selected excitation.
"""
if excitation_range[1] > excitation_range[0]:
return alex_period - excitation_range[1] + excitation_range[0]
elif excitation_range[1] < excitation_range[0]:
return excitation_range[0] - excitation_range[1]
def _ph_times_compact(ph_times_sel, alex_period, excitation_width):
"""Compact ph_times inplace by removing gaps between alternation periods.
Arguments:
ph_times_sel (array): array of timestamps from one alternation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Returns nothing, ph_times is modified in-place.
"""
# The formula is
#
# gaps = (ph_times_sel // alex_period)*excitation_width
# ph_times_sel = ph_times_sel - gaps
#
# As a memory optimization the `-gaps` array is reused inplace
times_minusgaps = (ph_times_sel // alex_period) * (-1 * excitation_width)
# The formula is ph_times_sel = ph_times_sel - "gaps"
times_minusgaps += ph_times_sel
return times_minusgaps
def iter_bursts_start_stop(bursts):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
arr_istart = bursts.istart
arr_istop = bursts.istop + 1
for istart, istop in zip(arr_istart, arr_istop):
yield istart, istop
def iter_bursts_ph(ph_data, bursts, mask=None, compact=False,
alex_period=None, excitation_width=None):
"""Iterator over arrays of photon-data for each burst.
Arguments:
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
mask (boolean mask or None): if not None, is a boolean mask
to select photons in `ph_data` (for example Donor-ch photons).
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
alex_period (scalar): period of alternation in timestamp units.
excitation_width (float): fraction of `alex_period` covered by
current photon selection.
Yields an array with a selection of "photons" for each burst.
"""
if isinstance(mask, slice) and mask == slice(None):
mask = None
if compact:
assert alex_period is not None
assert excitation_width is not None
assert mask is not None
for start, stop in iter_bursts_start_stop(bursts):
ph = ph_data[start:stop]
if mask is not None:
ph = ph[mask[start:stop]]
if compact:
ph = _ph_times_compact(ph, alex_period, excitation_width)
yield ph
def bursts_ph_list(ph_data, bursts, mask=None):
"""Returna list of ph-data for each burst.
ph_data can be either the timestamp array on which the burst search
has been performed or any other array with same size (boolean array,
nanotimes, etc...)
"""
return [ph for ph in iter_bursts_ph(ph_data, bursts, mask=mask)]
def burst_ph_stats(ph_data, bursts, func=np.mean, func_kw=None, **kwargs):
"""Reduce burst photons (timestamps, nanotimes) to a scalar using `func`.
Arguments
ph_data (1D array): array of photon-data (timestamps, nanotimes).
bursts (Bursts object): bursts computed from `ph`.
func (callable): function that takes the burst photon timestamps
as first argument and returns a scalar.
func_kw (callable): additional arguments in `func` beyond photon-data.
**kwargs: additional arguments passed to :func:`iter_bursts_ph`.
Return
Array one element per burst.
"""
if func_kw is None:
func_kw = {}
burst_stats = []
for burst_ph in iter_bursts_ph(ph_data, bursts, **kwargs):
burst_stats.append(func(burst_ph, **func_kw))
return np.asfarray(burst_stats) # NOTE: asfarray converts None to nan
def ph_in_bursts_mask(ph_data_size, bursts):
"""Return bool mask to select all "ph-data" inside any burst."""
mask = zeros(ph_data_size, dtype=bool)
for start, stop in iter_bursts_start_stop(bursts):
mask[start:stop] = True
return mask
def fuse_bursts_direct(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-seconds).
This function is a direct implementation using a single loop.
For a faster implementation see :func:`fuse_bursts_iter`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
fused_bursts_list = []
fused_burst = None
for burst1, burst2 in zip(bursts[:-1], bursts[1:]):
if fused_burst is not None:
burst1c = fused_burst
else:
burst1c = bslib.BurstGap.from_burst(burst1)
separation = burst2.start - burst1c.stop
if separation <= max_delay_clk:
gap = burst2.start - burst1c.stop
gap_counts = burst2.istart - burst1c.istop - 1
if burst1c.istop >= burst2.istart:
gap = 0
gap_counts = 0
fused_burst = bslib.BurstGap(
start = burst1c.start,
istart = burst1c.istart,
stop = burst2.stop,
istop = burst2.istop,
gap = burst1c.gap + gap,
gap_counts = burst1c.gap_counts + gap_counts)
else:
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
fused_burst = None
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst1c))
# Append the last bursts (either a fused or an isolated one)
if fused_burst is not None:
fused_bursts_list.append(fused_burst)
else:
fused_bursts_list.append(bslib.BurstGap.from_burst(burst2))
fused_bursts = bslib.BurstsGap.from_list(fused_bursts_list)
init_num_bursts = bursts.num_bursts
delta_b = init_num_bursts - fused_bursts.num_bursts
pprint(" --> END Fused %d bursts (%.1f%%)\n\n" %
(delta_b, 100 * delta_b / init_num_bursts), mute=not verbose)
return fused_bursts
def fuse_bursts_iter(bursts, ms=0, clk_p=12.5e-9, verbose=True):
"""Fuse bursts separated by less than `ms` (milli-secs).
This function calls iteratively :func:`b_fuse` until there are no more
bursts to fuse. For a slower but more readable version see
:func:`fuse_bursts_direct`.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
verbose (bool): if True print a summary of fused bursts.
Returns:
A BurstsGap object containing the new fused bursts.
"""
init_nburst = bursts.num_bursts
bursts = bslib.BurstsGap(bursts.data)
z = 0
new_nburst, nburst = 0, 1 # starting condition
while new_nburst < nburst:
z += 1
nburst = bursts.num_bursts
bursts = b_fuse(bursts, ms=ms, clk_p=clk_p)
new_nburst = bursts.num_bursts
delta_b = init_nburst - nburst
pprint(" --> END Fused %d bursts (%.1f%%, %d iter)\n\n" %
(delta_b, 100 * delta_b / init_nburst, z), mute=not verbose)
return bursts
def b_fuse(bursts, ms=0, clk_p=12.5e-9):
"""Fuse bursts separated by less than `ms` (milli-secs).
This is a low-level function which fuses pairs of consecutive
bursts separated by less than `ms` millisec.
If there are 3 or more consecutive bursts separated by less than `ms`
only the first 2 are fused.
See :func:`fuse_bursts_iter` or :func:`fuse_bursts_direct` for
higher level functions.
Parameters:
bursts (BurstsGap object): bursts to be fused.
See `phtools.burstsearch` for details.
ms (float): minimum waiting time between bursts (in millisec).
Bursts closer than that will be fused in a single burst.
clk_p (float): clock period or timestamp units in seconds.
Returns:
A BurstsGap object containing the new fused bursts.
"""
max_delay_clk = (ms * 1e-3) / clk_p
# Nearby bursts masks
delays_below_th = (bursts.separation <= max_delay_clk)
if not np.any(delays_below_th):
return bursts
buffer_mask = np.hstack([(False,), delays_below_th, (False,)])
first_bursts = buffer_mask[1:]
second_bursts = buffer_mask[:-1]
# Keep only the first pair in case of more than 2 consecutive bursts
first_bursts ^= (second_bursts * first_bursts)
# note that previous in-place operation also modifies `second_bursts`
both_bursts = first_bursts + second_bursts
# istart is from the first burst, istop is from the second burst
fused_bursts1 = bursts[first_bursts]
fused_bursts2 = bursts[second_bursts]
# Compute gap and gap_counts
gap = fused_bursts2.start - fused_bursts1.stop
gap_counts = fused_bursts2.istart - fused_bursts1.istop - 1 # yes it's -1
overlaping = fused_bursts1.istop >= fused_bursts2.istart
gap[overlaping] = 0
gap_counts[overlaping] = 0
# Assign the new burst data
# fused_bursts1 has alredy the right start and istart
fused_bursts1.istop = fused_bursts2.istop
fused_bursts1.stop = fused_bursts2.stop
fused_bursts1.gap += gap
fused_bursts1.gap_counts += gap_counts
# Join fused bursts with the remaining bursts
new_burst = fused_bursts1.join(bursts[~both_bursts], sort=True)
return new_burst
def mch_fuse_bursts(MBurst, ms=0, clk_p=12.5e-9, verbose=True):
"""Multi-ch version of `fuse_bursts`. `MBurst` is a list of Bursts objects.
"""
mburst = [b.copy() for b in MBurst] # safety copy
new_mburst = []
ch = 0
for mb in mburst:
ch += 1
pprint(" - - - - - CHANNEL %2d - - - - \n" % ch, not verbose)
if mb.num_bursts == 0:
new_bursts = bslib.Bursts.empty()
else:
new_bursts = fuse_bursts_iter(mb, ms=ms, clk_p=clk_p,
verbose=verbose)
new_mburst.append(new_bursts)
return new_mburst
def burst_stats(mburst, clk_p):
"""Compute average duration, size and burst-delay for bursts in mburst.
"""
nans = [np.nan, np.nan]
width_stats = np.array([[b.width.mean(), b.width.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
height_stats = np.array([[b.counts.mean(), b.counts.std()]
if b.num_bursts > 0 else nans for b in mburst]).T
mean_burst_delay = np.array([b.separation.mean() if b.num_bursts > 0
else np.nan for b in mburst])
return (clk_to_s(width_stats, clk_p) * 1e3, height_stats,
clk_to_s(mean_burst_delay, clk_p))
def print_burst_stats(d):
"""Print some bursts statistics."""
nch = len(d.mburst)
width_ms, height, delays = burst_stats(d.mburst, d.clk_p)
s = "\nNUMBER OF BURSTS: m = %d, L = %d" % (d.m, d.L)
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\n#: "+"%7d "*nch % tuple([b.num_bursts for b in d.mburst])
s += "\nT (us) [BS par] "+"%7d "*nch % tuple(np.array(d.T)*1e6)
s += "\nBG Rat T (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel('all')])
s += "\nBG Rat D (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Dem')])
s += "\nBG Rat A (cps): "+"%7d "*nch % tuple(d.bg_mean[Ph_sel(Dex='Aem')])
s += "\n\nBURST WIDTH STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (ms): "+"%7.3f "*nch % tuple(width_ms[0, :])
s += "\nStd.dev (ms): "+"%7.3f "*nch % tuple(width_ms[1, :])
s += "\n\nBURST SIZE STATS"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nMean (# ph): "+"%7.2f "*nch % tuple(height[0, :])
s += "\nStd.dev (# ph): "+"%7.2f "*nch % tuple(height[1, :])
s += "\n\nBURST MEAN DELAY"
s += "\nPixel: "+"%7d "*nch % tuple(range(1, nch+1))
s += "\nDelay (s): "+"%7.3f "*nch % tuple(delays)
return s
def ES_histog(E, S, bin_step=0.05, E_bins=None, S_bins=None):
"""Returns 2D (ALEX) histogram and bins of bursts (E,S).
"""
if E_bins is None:
E_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
if S_bins is None:
S_bins = np.arange(-0.6, 1.6+1e-4, bin_step)
H, E_bins, S_bins = np.histogram2d(E, S, bins=[E_bins, S_bins])
return H, E_bins, S_bins
def delta(x):
"""Return x.max() - x.min()"""
return x.max() - x.min()
def mask_empty(mask):
"""Returns True if `mask` is empty, otherwise False.
`mask` can be a boolean array or a slice object.
"""
if isinstance(mask, slice):
is_slice_empty = (mask.stop == 0)
return is_slice_empty
else:
# Bolean array
return not mask.any()
class DataContainer(dict):
"""
Generic class for storing data.
It's a dictionary in which each key is also an attribute d['nt'] or d.nt.
"""
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
for k in self:
dict.__setattr__(self, k, self[k])
def add(self, **kwargs):
"""Adds or updates elements (attributes and/or dict entries). """
self.update(**kwargs)
for k, v in kwargs.items():
setattr(self, k, v)
def delete(self, *args, **kwargs):
"""Delete an element (attribute and/or dict entry). """
warning = kwargs.get('warning', True)
for name in args:
try:
self.pop(name)
except KeyError:
if warning:
print(' WARNING: Name %s not found (dict).' % name)
try:
delattr(self, name)
except AttributeError:
if warning:
print(' WARNING: Name %s not found (attr).' % name)
class Data(DataContainer):
"""
Container for all the information (timestamps, bursts) of a dataset.
Data() contains all the information of a dataset (name, timestamps, bursts,
correction factors) and provides several methods to perform analysis
(background estimation, burst search, FRET fitting, etc...).
When loading a measurement file a Data() object is created by one
of the loader functions in `loaders.py`. Data() objects can be also
created with :meth:`Data.copy`, :meth:`Data.fuse_bursts()` or
:meth:`Data.select_bursts`.
To add or delete data-attributes use `.add()` or `.delete()` methods.
All the standard data-attributes are listed below.
Note:
Attributes of type "*list*" contain one element per channel.
Each element, in turn, can be an array. For example `.ph_times_m[i]`
is the array of timestamps for channel `i`; or `.nd[i]` is the array
of donor counts in each burst for channel `i`.
**Measurement attributes**
Attributes:
fname (string): measurements file name
nch (int): number of channels
clk_p (float): clock period in seconds for timestamps in `ph_times_m`
ph_times_m (list): list of timestamp arrays (int64). Each array
contains all the timestamps (donor+acceptor) in one channel.
A_em (list): list of boolean arrays marking acceptor timestamps. Each
array is a boolean mask for the corresponding ph_times_m array.
leakage (float or array of floats): leakage (or bleed-through) fraction.
May be scalar or same size as nch.
gamma (float or array of floats): gamma factor.
May be scalar or same size as nch.
D_em (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` for donor emission
D_ex, A_ex (list of boolean arrays): **[ALEX-only]**
boolean mask for `.ph_times_m[i]` during donor or acceptor
excitation
D_ON, A_ON (2-element tuples of int ): **[ALEX-only]**
start-end values for donor and acceptor excitation selection.
alex_period (int): **[ALEX-only]**
duration of the alternation period in clock cycles.
**Background Attributes**
The background is computed with :meth:`Data.calc_bg`
and is estimated in chunks of equal duration called *background periods*.
Estimations are performed in each spot and photon stream.
The following attributes contain the estimated background rate.
Attributes:
bg (dict): background rates for the different photon streams,
channels and background periods. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period) of background rates.
bg_mean (dict): mean background rates across the entire measurement
for the different photon streams and channels. Keys are `Ph_sel`
objects and values are lists (one element per channel) of
background rates.
nperiods (int): number of periods in which timestamps are split for
background calculation
bg_fun (function): function used to compute the background rates
Lim (list): each element of this list is a list of index pairs for
`.ph_times_m[i]` for **first** and **last** photon in each period.
Ph_p (list): each element in this list is a list of timestamps pairs
for **first** and **last** photon of each period.
bg_ph_sel (Ph_sel object): photon selection used by Lim and Ph_p.
See :mod:`fretbursts.ph_sel` for details.
Th_us (dict): thresholds in us used to select the tail of the
interphoton delay distribution. Keys are `Ph_sel` objects
and values are lists (one element per channel) of arrays (one
element per background period).
Additionlly, there are a few deprecated attributes (`bg_dd`, `bg_ad`,
`bg_da`, `bg_aa`, `rate_dd`, `rate_ad`, `rate_da`, `rate_aa` and `rate_m`)
which will be removed in a future version.
Please use :attr:`Data.bg` and :attr:`Data.bg_mean` instead.
**Burst search parameters (user input)**
These are the parameters used to perform the burst search
(see :meth:`burst_search`).
Attributes:
ph_sel (Ph_sel object): photon selection used for burst search.
See :mod:`fretbursts.ph_sel` for details.
m (int): number of consecutive timestamps used to compute the
local rate during burst search
L (int): min. number of photons for a burst to be identified and saved
P (float, probability): valid values [0..1].
Probability that a burst-start is due to a Poisson background.
The employed Poisson rate is the one computed by `.calc_bg()`.
F (float): `(F * background_rate)` is the minimum rate for burst-start
**Burst search data (available after burst search)**
When not specified, parameters marked as (list of arrays) contains arrays
with one element per bursts. `mburst` arrays contain one "row" per burst.
`TT` arrays contain one element per `period` (see above: background
attributes).
Attributes:
mburst (list of Bursts objects): list Bursts() one element per channel.
See :class:`fretbursts.phtools.burstsearch.Bursts`.
TT (list of arrays): list of arrays of *T* values (in sec.). A *T*
value is the maximum delay between `m` photons to have a
burst-start. Each channels has an array of *T* values, one for
each background "period" (see above).
T (array): per-channel mean of `TT`
nd, na (list of arrays): number of donor or acceptor photons during
donor excitation in each burst
nt (list of arrays): total number photons (nd+na+naa)
naa (list of arrays): number of acceptor photons in each burst
during acceptor excitation **[ALEX only]**
nar (list of arrays): number of acceptor photons in each burst
during donor excitation, not corrected for D-leakage and
A-direct-excitation. **[PAX only]**
bp (list of arrays): time period for each burst. Same shape as `nd`.
This is needed to identify the background rate for each burst.
bg_bs (list): background rates used for threshold computation in burst
search (is a reference to `bg`, `bg_dd` or `bg_ad`).
fuse (None or float): if not None, the burst separation in ms below
which bursts have been fused (see `.fuse_bursts()`).
E (list): FRET efficiency value for each burst:
E = na/(na + gamma*nd).
S (list): stoichiometry value for each burst:
S = (gamma*nd + na) /(gamma*nd + na + naa)
"""
# Attribute names containing per-photon data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per photon).
ph_fields = ['ph_times_m', 'nanotimes', 'particles',
'A_em', 'D_em', 'A_ex', 'D_ex']
# Attribute names containing background data.
# The attribute `bg` is a dict with photon-selections as keys and
# list of arrays as values. Each list contains one element per channel and
# each array one element per background period.
# The attributes `.Lim` and `.Ph_p` are lists with one element per channel.
# Each element is a lists-of-tuples (one tuple per background period).
# These attributes do not exist before computing the background.
bg_fields = ['bg', 'Lim', 'Ph_p']
# Attribute names containing per-burst data.
# Each attribute is a list (1 element per ch) of arrays (1 element
# per burst).
# They do not necessarly exist. For example 'naa' exists only for ALEX
# data. Also none of them exist before performing a burst search.
burst_fields = ['E', 'S', 'mburst', 'nd', 'na', 'nt', 'bp', 'nda', 'naa',
'max_rate', 'sbr', 'nar']
# Quantities (scalars or arrays) defining the current set of bursts
burst_metadata = ['m', 'L', 'T', 'TT', 'F', 'FF', 'P', 'PP', 'rate_th',
'bg_bs', 'ph_sel', 'bg_corrected', 'leakage_corrected',
'dir_ex_corrected', 'dithering', 'fuse', 'lsb']
# List of photon selections on which the background is computed
_ph_streams = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'),
Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')]
@property
def ph_streams(self):
if self.alternated:
return self._ph_streams
else:
return [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
def __init__(self, leakage=0., gamma=1., dir_ex=0., **kwargs):
# Default values
init_kw = dict(ALEX=False, _leakage=float(leakage), _gamma=float(gamma),
_dir_ex=float(dir_ex), _beta=1., _chi_ch=1., s=[])
# Override with user data
init_kw.update(**kwargs)
DataContainer.__init__(self, **init_kw)
# def __getattr__(self, name):
# """Single-channel shortcuts for per-channel fields.
#
# Appending a '_' to a per-channel field avoids specifying the channel.
# For example use d.nd_ instead if d.nd[0].
# """
# msg_missing_attr = "'%s' object has no attribute '%s'" %\
# (self.__class__.__name__, name)
# if name.startswith('_') or not name.endswith('_'):
# raise AttributeError(msg_missing_attr)
#
# field = name[:-1]
# try:
# value = self.__getitem__(field)
# except KeyError:
# raise AttributeError(msg_missing_attr)
# else:
# # Support lists, tuples and object with array interface
# if isinstance(value, (list, tuple)) or isarray(value):
# if len(value) == self.nch:
# return value[0]
# raise ValueError('Name "%s" is not a per-channel field.' % field)
def copy(self, mute=False):
"""Copy data in a new object. All arrays copied except for ph_times_m
"""
pprint('Deep copy executed.\n', mute)
new_d = Data(**self) # this make a shallow copy (like a pointer)
# Deep copy (not just reference) or array data
for field in self.burst_fields + self.bg_fields:
# Making sure k is defined
if field in self:
# Make a deepcopy of the per-channel lists
new_d[field] = copy.deepcopy(self[field])
# Set the attribute: new_d.k = new_d[k]
setattr(new_d, field, new_d[field])
return new_d
##
# Methods for photon timestamps (ph_times_m) access
#
def ph_times_hash(self, hash_name='md5', hexdigest=True):
"""Return an hash for the timestamps arrays.
"""
m = hashlib.new(hash_name)
for ph in self.iter_ph_times():
if isinstance(ph, np.ndarray):
m.update(ph.data)
else:
# TODO Handle ph_times in PyTables files
raise NotImplementedError
if hexdigest:
return m.hexdigest()
else:
return m
@property
def ph_data_sizes(self):
"""Array of total number of photons (ph-data) for each channel.
"""
if not hasattr(self, '_ph_data_sizes'):
# This works both for numpy arrays and pytables arrays
self._ph_data_sizes = np.array([ph.shape[0] for ph in
self.ph_times_m])
return self._ph_data_sizes
def _fix_ph_sel(self, ph_sel):
"""For non-ALEX data fix Aex to allow stable comparison."""
msg = 'Photon selection must be of type `Ph_sel` (it was `%s` instead).'
assert isinstance(ph_sel, Ph_sel), (msg % type(ph_sel))
if self.alternated or ph_sel.Dex != 'DAem':
return ph_sel
else:
return Ph_sel(Dex=ph_sel.Dex, Aex='DAem')
def _is_allph(self, ph_sel):
"""Return whether a photon selection `ph_sel` covers all photon."""
if self.alternated:
return ph_sel == Ph_sel(Dex='DAem', Aex='DAem')
else:
return ph_sel.Dex == 'DAem'
def get_ph_mask(self, ich=0, ph_sel=Ph_sel('all')):
"""Returns a mask for `ph_sel` photons in channel `ich`.
The masks are either boolean arrays or slices (full or empty). In
both cases they can be used to index the timestamps of the
corresponding channel.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
assert isinstance(ich, int)
if self._is_allph(ph_sel):
# Note that slice(None) is equivalent to [:].
# Also, numpy arrays are not copied when sliced.
# So getting all photons with this mask is efficient
# Note: the drawback is that the slice cannot be indexed
# (where a normal boolean array would)
return slice(None)
# Handle the case when A_em contains slice objects
if isinstance(self.A_em[ich], slice):
if self.A_em[ich] == slice(None):
if ph_sel.Dex == 'Dem':
return slice(0)
if ph_sel.Dex == 'Aem':
return slice(None)
elif self.A_em[ich] == slice(0):
if ph_sel.Dex == 'Dem':
return slice(None)
if ph_sel.Dex == 'Aem':
return slice(0)
else:
msg = 'When a slice, A_em can only be slice(None) or slice(0).'
raise NotImplementedError(msg)
# Base selections
elif ph_sel == Ph_sel(Dex='Dem'):
return self.get_D_em_D_ex(ich)
elif ph_sel == Ph_sel(Dex='Aem'):
return self.get_A_em_D_ex(ich)
elif ph_sel == Ph_sel(Aex='Dem'):
return self.get_D_em(ich) * self.get_A_ex(ich)
elif ph_sel == Ph_sel(Aex='Aem'):
return self.get_A_em(ich) * self.get_A_ex(ich)
# Selection of all photon in one emission ch
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
return self.get_D_em(ich)
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
return self.get_A_em(ich)
# Selection of all photon in one excitation period
elif ph_sel == Ph_sel(Dex='DAem'):
return self.get_D_ex(ich)
elif ph_sel == Ph_sel(Aex='DAem'):
return self.get_A_ex(ich)
# Selection of all photons except for Dem during Aex
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
return self.get_D_ex(ich) + self.get_A_em(ich) * self.get_A_ex(ich)
else:
raise ValueError('Photon selection not implemented.')
def iter_ph_masks(self, ph_sel=Ph_sel('all')):
"""Iterator returning masks for `ph_sel` photons.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
"""
for ich in range(self.nch):
yield self.get_ph_mask(ich, ph_sel=ph_sel)
def get_ph_times(self, ich=0, ph_sel=Ph_sel('all'), compact=False):
"""Returns the timestamps array for channel `ich`.
This method always returns in-memory arrays, even when ph_times_m
is a disk-backed list of arrays.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
"""
ph = self.ph_times_m[ich]
# If not a list is an on-disk array, we need to load it
if not isinstance(ph, np.ndarray):
if hasattr(self, '_ph_cache') and self._ph_cache_ich == ich:
ph = self._ph_cache
else:
ph = ph.read()
self._ph_cache = ph
self._ph_cache_ich = ich
ph = ph[self.get_ph_mask(ich, ph_sel=ph_sel)]
if compact:
ph = self._ph_times_compact(ph, ph_sel)
return ph
def iter_ph_times(self, ph_sel=Ph_sel('all'), compact=False):
"""Iterator that returns the arrays of timestamps in `.ph_times_m`.
Arguments:
Same arguments as :meth:`get_ph_mask` except for `ich`.
"""
for ich in range(self.nch):
yield self.get_ph_times(ich, ph_sel=ph_sel, compact=compact)
def _get_ph_mask_single(self, ich, mask_name, negate=False):
"""Get the bool array `mask_name` for channel `ich`.
If the internal "bool array" is a scalar return a slice (full or empty)
"""
mask = np.asarray(getattr(self, mask_name)[ich])
if negate:
mask = np.logical_not(mask)
if len(mask.shape) == 0:
# If mask is a boolean scalar, select all or nothing
mask = slice(None) if mask else slice(0)
return mask
def get_A_em(self, ich=0):
"""Returns a mask to select photons detected in the acceptor ch."""
return self._get_ph_mask_single(ich, 'A_em')
def get_D_em(self, ich=0):
"""Returns a mask to select photons detected in the donor ch."""
return self._get_ph_mask_single(ich, 'A_em', negate=True)
def get_A_ex(self, ich=0):
"""Returns a mask to select photons in acceptor-excitation periods."""
return self._get_ph_mask_single(ich, 'A_ex')
def get_D_ex(self, ich=0):
"""Returns a mask to select photons in donor-excitation periods."""
if self.alternated:
return self._get_ph_mask_single(ich, 'D_ex')
else:
return slice(None)
def get_D_em_D_ex(self, ich=0):
"""Returns a mask of donor photons during donor-excitation."""
if self.alternated:
return self.get_D_em(ich) * self.get_D_ex(ich)
else:
return self.get_D_em(ich)
def get_A_em_D_ex(self, ich=0):
"""Returns a mask of acceptor photons during donor-excitation."""
if self.alternated:
return self.get_A_em(ich) * self.get_D_ex(ich)
else:
return self.get_A_em(ich)
def iter_ph_times_period(self, ich=0, ph_sel=Ph_sel('all')):
"""Iterate through arrays of ph timestamps in each background period.
"""
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period in range(self.nperiods):
yield self.get_ph_times_period(period, ich=ich, mask=mask)
def get_ph_times_period(self, period, ich=0, ph_sel=Ph_sel('all'),
mask=None):
"""Return the array of ph_times in `period`, `ich` and `ph_sel`.
"""
istart, iend = self.Lim[ich][period]
period_slice = slice(istart, iend + 1)
ph_times = self.get_ph_times(ich=ich)
if mask is None:
mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
if isinstance(mask, slice) and mask == slice(None):
ph_times_period = ph_times[period_slice]
else:
ph_times_period = ph_times[period_slice][mask[period_slice]]
return ph_times_period
def _assert_compact(self, ph_sel):
msg = ('Option compact=True requires a photon selection \n'
'from a single excitation period (either Dex or Aex).')
if not self.alternated:
raise ValueError('Option compact=True requires ALEX data.')
if ph_sel.Dex is not None and ph_sel.Aex is not None:
raise ValueError(msg)
def _excitation_width(self, ph_sel, ich=0):
"""Returns duration of alternation period outside selected excitation.
"""
self._assert_compact(ph_sel)
if ph_sel.Aex is None:
excitation_range = self._D_ON_multich[ich]
elif ph_sel.Dex is None:
excitation_range = self._A_ON_multich[ich]
return _excitation_width(excitation_range, self.alex_period)
def _ph_times_compact(self, ph, ph_sel):
"""Return timestamps in one excitation period with "gaps" removed.
It takes timestamps in the specified alternation period and removes
gaps due to time intervals outside the alternation period selection.
This allows to correct the photon rates distorsion due to alternation.
Arguments:
ph (array): timestamps array from which gaps have to be removed.
This array **is modified in-place**.
ph_sel (Ph_sel object): photon selection to be compacted.
Note that only one excitation must be specified, but the
emission can be 'Dem', 'Aem' or 'DAem'.
See :mod:`fretbursts.ph_sel` for details.
Returns:
Array of timestamps in one excitation periods with "gaps" removed.
"""
excitation_width = self._excitation_width(ph_sel)
return _ph_times_compact(ph, self.alex_period, excitation_width)
def _get_tuple_multich(self, name):
"""Get a n-element tuple field in multi-ch format (1 row per ch)."""
field = np.array(self[name])
if field.ndim == 1:
field = np.repeat([field], self.nch, axis=0)
return field
@property
def _D_ON_multich(self):
return self._get_tuple_multich('D_ON')
@property
def _A_ON_multich(self):
return self._get_tuple_multich('A_ON')
@property
def _det_donor_accept_multich(self):
return self._get_tuple_multich('det_donor_accept')
##
# Methods and properties for burst-data access
#
@property
def num_bursts(self):
"""Array of number of bursts in each channel."""
return np.array([bursts.num_bursts for bursts in self.mburst])
@property
def burst_widths(self):
"""List of arrays of burst duration in seconds. One array per channel.
"""
return [bursts.width * self.clk_p for bursts in self.mburst]
def burst_sizes_pax_ich(self, ich=0, gamma=1., add_aex=True,
beta=1., donor_ref=True, aex_corr=True):
r"""Return corrected burst sizes for channel `ich`. PAX-only.
When `donor_ref = False`, the formula for PAX-enhanced burst size is:
.. math::
\gamma(F_{D_{ex}D_{em}} + F_{DA_{ex}D_{em}}) +
\frac{1}{\alpha} F_{FRET}
where :math:`\alpha` is the Dex duty-cycle (0.5 if alternation
periods are equal) and :math:`F_{FRET}` is `na`, the AemAex
signal after leakage and direct-excitation corrections.
If `add_ex = True`, we add the term:
.. math::
\tilde{F}_{A_{ex}A_{em}} / (\alpha\beta)
where :math:`\tilde{F}_{A_{ex}A_{em}}` in A emission due to
A excitation (and not due to FRET).
If `aex_corr = False`, then :math:`\alpha` is fixed to 1.
If `donor_ref = True`, the above burst size expression is divided by
:math:`\gamma`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
donor_ref (bool): True or False select different conventions
for burst size correction. For details see
:meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
add_aex (boolean): when True, the returned burst size also
includes photons detected during the DAex. Default is True.
aex_corr (bool): If True, and `add_aex == True`, then divide
the DAexAem term (naa) by the Dex duty cycle. For example,
if Dex and DAex alternation periods are equal, naa is
multiplied by 2. This correction makes the returned value
equal to the denominator of the stoichiometry ratio S_pax
(PAX-enhanced formula). If False, naa is not divided by
the Dex duty-cycle (gamma and beta corrections may still be
applied). If `add_aex == False`, `aex_corr` is ignored.
beta (float): beta correction factor used for the DAexAem term
(naa) of the burst size.
If `add_aex == False` this argument is ignored. Default 1.
Returns
Array of burst sizes for channel `ich`.
See also:
:meth:`Data.burst_sizes_ich`
"""
assert 'PAX' in self.meas_type
naa = self._get_naa_ich(ich) # nar-subtracted
aex_dex_ratio = self._aex_dex_ratio()
alpha = 1
if aex_corr:
alpha = 1 - self._aex_fraction() # Dex duty-cycle
burst_size_dex = self.nd[ich] * gamma + self.na[ich]
burst_size_aex = (self.nda[ich] * gamma +
self.na[ich] * aex_dex_ratio +
naa / (alpha * beta))
burst_size = burst_size_dex
if add_aex:
burst_size += burst_size_aex
if donor_ref:
burst_size /= gamma
return burst_size
def burst_sizes_ich(self, ich=0, gamma=1., add_naa=False,
beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for channel `ich`.
If `donor_ref == True` (default) the gamma corrected burst size is
computed according to::
1) nd + na / gamma
Otherwise, if `donor_ref == False`, the gamma corrected burst size is::
2) nd * gamma + na
With the definition (1) the corrected burst size is equal to the raw
burst size for zero-FRET or D-only bursts (that's why is `donor_ref`).
With the definition (2) the corrected burst size is equal to the raw
burst size for 100%-FRET bursts.
In an ALEX measurement, use `add_naa = True` to add counts from
AexAem stream to the returned burst size. The argument `gamma` and
`beta` are used to correctly scale `naa` so that it become
commensurate with the Dex corrected burst size. In particular,
when using definition (1) (i.e. `donor_ref = True`), the total
burst size is::
(nd + na/gamma) + naa / (beta * gamma)
Conversely, when using definition (2) (`donor_ref = False`), the
total burst size is::
(nd * gamma + na) + naa / beta
Arguments:
ich (int): the spot number, only relevant for multi-spot.
In single-spot data there is only one channel (`ich=0`)
so this argument may be omitted. Default 0.
add_naa (boolean): when True, add a term for AexAem photons when
computing burst size. Default False.
gamma (float): coefficient for gamma correction of burst
sizes. Default: 1. For more info see explanation above.
beta (float): beta correction factor used for the AexAem term
of the burst size. Default 1. If `add_naa = False` or
measurement is not ALEX this argument is ignored.
For more info see explanation above.
donor_ref (bool): select the convention for burst size correction.
See details above in the function description.
Returns
Array of burst sizes for channel `ich`.
See also :meth:`fretbursts.burstlib.Data.get_naa_corrected`.
"""
if donor_ref:
burst_size = self.nd[ich] + self.na[ich] / gamma
else:
burst_size = self.nd[ich] * gamma + self.na[ich]
if add_naa and self.alternated:
kws = dict(ich=ich, gamma=gamma, beta=beta, donor_ref=donor_ref)
burst_size += self.get_naa_corrected(**kws)
return burst_size
def get_naa_corrected(self, ich=0, gamma=1., beta=1., donor_ref=True):
"""Return corrected naa array for channel `ich`.
Arguments:
ich (int): the spot number, only relevant for multi-spot.
gamma (floats): gamma-factor to use in computing the corrected naa.
beta (float): beta-factor to use in computing the corrected naa.
donor_ref (bool): Select the convention for `naa` correction.
If True (default), uses `naa / (beta * gamma)`. Otherwise,
uses `naa / beta`. A consistent convention should be used
for the corrected Dex burst size in order to make it
commensurable with naa.
See also :meth:`fretbursts.burstlib.Data.burst_sizes_ich`.
"""
naa = self._get_naa_ich(ich) # with eventual duty-cycle correction
if donor_ref:
correction = (gamma * beta)
else:
correction = beta
return naa / correction
def _get_naa_ich(self, ich=0):
"""Return naa for `ich` both in ALEX and PAX measurements.
In case of PAX, returns naa using the duty-cycle correction::
naa = self.naa - aex_dex_ratio * self.nar
where `self.nar` is equal to `self.na` before leakage and direct
excitation correction, and `aex_dex_ratio` is the Aex duty-cycle.
"""
naa = self.naa[ich]
if 'PAX' in self.meas_type:
# ATTENTION: do not modify naa inplace
naa = naa - self._aex_dex_ratio() * self.nar[ich]
return naa
def burst_sizes(self, gamma=1., add_naa=False, beta=1., donor_ref=True):
"""Return gamma corrected burst sizes for all the channel.
Compute burst sizes by calling, for each channel,
:meth:`burst_sizes_ich`.
See :meth:`burst_sizes_ich` for description of the arguments.
Returns
List of arrays of burst sizes, one array per channel.
"""
kwargs = dict(gamma=gamma, add_naa=add_naa, beta=beta,
donor_ref=donor_ref)
bsize_list = [self.burst_sizes_ich(ich, **kwargs) for ich in
range(self.nch)]
return np.array(bsize_list)
def iter_bursts_ph(self, ich=0):
"""Iterate over (start, stop) indexes to slice photons for each burst.
"""
for istart, istop in iter_bursts_start_stop(self.mburst[ich]):
yield istart, istop
def bursts_slice(self, N1=0, N2=-1):
"""Return new Data object with bursts between `N1` and `N2`
`N1` and `N2` can be scalars or lists (one per ch).
"""
if np.isscalar(N1): N1 = [N1] * self.nch
if np.isscalar(N2): N2 = [N2] * self.nch
assert len(N1) == len(N2) == self.nch
d = Data(**self)
d.add(mburst=[b[n1:n2].copy() for b, n1, n2 in zip(d.mburst, N1, N2)])
d.add(nt=[nt[n1:n2] for nt, n1, n2 in zip(d.nt, N1, N2)])
d.add(nd=[nd[n1:n2] for nd, n1, n2 in zip(d.nd, N1, N2)])
d.add(na=[na[n1:n2] for na, n1, n2 in zip(d.na, N1, N2)])
for name in ('naa', 'nda', 'nar'):
if name in d:
d.add(**{name:
[x[n1:n2] for x, n1, n2 in zip(d[name], N1, N2)]})
if 'nda' in self:
d.add(nda=[da[n1:n2] for da, n1, n2 in zip(d.nda, N1, N2)])
d.calc_fret(pax=self.pax) # recalculate fret efficiency
return d
def delete_burst_data(self):
"""Erase all the burst data"""
for name in self.burst_fields + self.burst_metadata:
if name in self:
self.delete(name)
for name in ('E_fitter', 'S_fitter'):
if hasattr(self, name):
delattr(self, name)
##
# Methods for high-level data transformation
#
def slice_ph(self, time_s1=0, time_s2=None, s='slice'):
"""Return a new Data object with ph in [`time_s1`,`time_s2`] (seconds)
If ALEX, this method must be called right after
:func:`fretbursts.loader.alex_apply_periods` (with `delete_ph_t=True`)
and before any background estimation or burst search.
"""
if time_s2 is None:
time_s2 = self.time_max
if time_s2 >= self.time_max and time_s1 <= 0:
return self.copy()
assert time_s1 < self.time_max
t1_clk, t2_clk = int(time_s1 / self.clk_p), int(time_s2 / self.clk_p)
masks = [(ph >= t1_clk) * (ph < t2_clk) for ph in self.iter_ph_times()]
new_d = Data(**self)
for name in self.ph_fields:
if name in self:
new_d[name] = [a[mask] for a, mask in zip(self[name], masks)]
setattr(new_d, name, new_d[name])
new_d.delete_burst_data()
# Shift timestamps to start from 0 to avoid problems with BG calc
for ich in range(self.nch):
ph_i = new_d.get_ph_times(ich)
ph_i -= t1_clk
new_d.s.append(s)
# Delete eventual cached properties
for attr in ['_time_min', '_time_max']:
if hasattr(new_d, attr):
delattr(new_d, attr)
return new_d
def collapse(self, update_gamma=True, skip_ch=None):
"""Returns an object with 1-spot data joining the multi-spot data.
Arguments:
skip_ch (tuple of ints): list of channels to skip.
If None, keep all channels.
update_gamma (bool): if True, recompute gamma as mean of the
per-channel gamma. If False, do not update gamma.
If True, gamma becomes a single value and the update has the
side effect of recomputing E and S values, discarding
previous per-channel corrections. If False, gamma is not
updated (it stays with multi-spot values) and E and S are
not recomputed.
Note:
When using `update_gamma=False`, burst selections on the
collapsed `Data` object should be done with
`computefret=False`, otherwise any attempt to use multi-spot
gamma for single-spot data will raise an error.
"""
dc = Data(**self)
mch_bursts = self.mburst
if skip_ch is not None:
mch_bursts = [bursts for i, bursts in enumerate(mch_bursts)
if i not in skip_ch]
bursts = bslib.Bursts.merge(mch_bursts, sort=False)
# Sort by start times, and when equal by stop times
indexsort = np.lexsort((bursts.stop, bursts.start))
dc.add(mburst=[bursts[indexsort]])
ich_burst = [i * np.ones(nb) for i, nb in enumerate(self.num_bursts)]
dc.add(ich_burst=np.hstack(ich_burst)[indexsort])
for name in self.burst_fields:
if name in self and name is not 'mburst':
# Concatenate arrays along axis = 0
value = [np.concatenate(self[name])[indexsort]]
dc.add(**{name: value})
dc.add(nch=1)
dc.add(_chi_ch=1.)
# NOTE: Updating gamma has the side effect of recomputing E
# (and S if ALEX). We need to update gamma because, in general,
# gamma can be an array with a value for each ch.
# However, the per-channel gamma correction is lost once both
# gamma and chi_ch are made scalar.
if update_gamma:
dc._update_gamma(np.mean(self.get_gamma_array()))
return dc
##
# Utility methods
#
def get_params(self):
"""Returns a plain dict containing only parameters and no arrays.
This can be used as a summary of data analysis parameters.
Additional keys `name' and `Names` are added with values
from `.name` and `.Name()`.
"""
p_names = ['fname', 'clk_p', 'nch', 'ph_sel', 'L', 'm', 'F', 'P',
'_leakage', '_dir_ex', '_gamma', 'bg_time_s',
'T', 'rate_th',
'bg_corrected', 'leakage_corrected', 'dir_ex_corrected',
'dithering', '_chi_ch', 's', 'ALEX']
p_dict = dict(self)
for name in p_dict.keys():
if name not in p_names:
p_dict.pop(name)
p_dict.update(name=self.name, Name=self.Name(), bg_mean=self.bg_mean,
nperiods=self.nperiods)
return p_dict
def expand(self, ich=0, alex_naa=False, width=False):
"""Return per-burst D and A sizes (nd, na) and their background counts.
This method returns for each bursts the corrected signal counts and
background counts in donor and acceptor channels. Optionally, the
burst width is also returned.
Arguments:
ich (int): channel for the bursts (can be not 0 only in multi-spot)
alex_naa (bool): if True and self.ALEX, returns burst sizes and
background also for acceptor photons during accept. excitation
width (bool): whether return the burst duration (in seconds).
Returns:
List of arrays: nd, na, donor bg, acceptor bg.
If `alex_naa` is True returns: nd, na, naa, bg_d, bg_a, bg_aa.
If `width` is True returns the bursts duration (in sec.) as last
element.
"""
period = self.bp[ich]
w = self.mburst[ich].width * self.clk_p
bg_a = self.bg[Ph_sel(Dex='Aem')][ich][period] * w
bg_d = self.bg[Ph_sel(Dex='Dem')][ich][period] * w
res = [self.nd[ich], self.na[ich]]
if self.alternated and alex_naa:
bg_aa = self.bg[Ph_sel(Aex='Aem')][ich][period] * w
res.extend([self.naa[ich], bg_d, bg_a, bg_aa])
else:
res.extend([bg_d, bg_a])
if width:
res.append(w)
return res
def burst_data_ich(self, ich):
"""Return a dict of burst data for channel `ich`."""
bursts = {}
bursts['size_raw'] = self.mburst[ich].counts
bursts['t_start'] = self.mburst[ich].start * self.clk_p
bursts['t_stop'] = self.mburst[ich].stop * self.clk_p
bursts['i_start'] = self.mburst[ich].istart
bursts['i_stop'] = self.mburst[ich].istop
period = bursts['bg_period'] = self.bp[ich]
width = self.mburst[ich].width * self.clk_p
bursts['width_ms'] = width * 1e3
bursts['bg_ad'] = self.bg[Ph_sel(Dex='Aem')][ich][period] * width
bursts['bg_dd'] = self.bg[Ph_sel(Dex='Dem')][ich][period] * width
if self.alternated:
bursts['bg_aa'] = self.bg[Ph_sel(Aex='Aem')][ich][period] * width
bursts['bg_da'] = self.bg[Ph_sel(Aex='Dem')][ich][period] * width
burst_fields = self.burst_fields[:]
burst_fields.remove('mburst')
burst_fields.remove('bp')
for field in burst_fields:
if field in self:
bursts[field] = self[field][ich]
return bursts
@property
def time_max(self):
"""The last recorded time in seconds."""
if not hasattr(self, '_time_max'):
self._time_max = self._time_reduce(last=True, func=max)
return self._time_max
@property
def time_min(self):
"""The first recorded time in seconds."""
if not hasattr(self, '_time_min'):
self._time_min = self._time_reduce(last=False, func=min)
return self._time_min
def _time_reduce(self, last=True, func=max):
"""Return first or last timestamp per-ch, reduced with `func`.
"""
idx = -1 if last else 0
# Get either ph_times_m or ph_times_t
ph_times = None
for ph_times_name in ['ph_times_m', 'ph_times_t']:
try:
ph_times = self[ph_times_name]
except KeyError:
pass
else:
break
if ph_times is not None:
# This works with both numpy arrays and pytables arrays
time = func(t[idx] for t in ph_times if t.shape[0] > 0)
elif 'mburst' in self:
if last:
time = func(bursts[idx].stop for bursts in self.mburst)
else:
time = func(bursts[idx].start for bursts in self.mburst)
else:
raise ValueError("No timestamps or bursts found.")
return time * self.clk_p
def ph_in_bursts_mask_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return mask of all photons inside bursts for channel `ich`.
Returns
Boolean array for photons in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
bursts_mask = ph_in_bursts_mask(self.ph_data_sizes[ich],
self.mburst[ich])
if self._is_allph(ph_sel):
return bursts_mask
else:
ph_sel_mask = self.get_ph_mask(ich=ich, ph_sel=ph_sel)
return ph_sel_mask * bursts_mask
def ph_in_bursts_ich(self, ich=0, ph_sel=Ph_sel('all')):
"""Return timestamps of photons inside bursts for channel `ich`.
Returns
Array of photon timestamps in channel `ich` and photon
selection `ph_sel` that are inside any burst.
"""
ph_all = self.get_ph_times(ich=ich)
bursts_mask = self.ph_in_bursts_mask_ich(ich, ph_sel)
return ph_all[bursts_mask]
##
# Background analysis methods
#
def _obsolete_bg_attr(self, attrname, ph_sel):
print('The Data.%s attribute is deprecated. Please use '
'Data.bg(%s) instead.' % (attrname, repr(ph_sel)))
bg_attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa')
bg_mean_attrs = ('rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
assert attrname in bg_attrs or attrname in bg_mean_attrs
if attrname in bg_attrs:
bg_field = 'bg'
elif attrname in bg_mean_attrs:
bg_field = 'bg_mean'
try:
value = getattr(self, bg_field)[ph_sel]
except AttributeError as e:
# This only happens when trying to access 'bg' because
# 'bg_mean' raises RuntimeError when missing.
msg = 'No attribute `%s` found. Please compute background first.'
raise_from(RuntimeError(msg % bg_field), e)
return value
@property
def rate_m(self):
return self._obsolete_bg_attr('rate_m', Ph_sel('all'))
@property
def rate_dd(self):
return self._obsolete_bg_attr('rate_dd', Ph_sel(Dex='Dem'))
@property
def rate_ad(self):
return self._obsolete_bg_attr('rate_ad', Ph_sel(Dex='Aem'))
@property
def rate_da(self):
return self._obsolete_bg_attr('rate_da', Ph_sel(Aex='Dem'))
@property
def rate_aa(self):
return self._obsolete_bg_attr('rate_aa', Ph_sel(Aex='Aem'))
@property
def bg_dd(self):
return self._obsolete_bg_attr('bg_dd', Ph_sel(Dex='Dem'))
@property
def bg_ad(self):
return self._obsolete_bg_attr('bg_ad', Ph_sel(Dex='Aem'))
@property
def bg_da(self):
return self._obsolete_bg_attr('bg_da', Ph_sel(Aex='Dem'))
@property
def bg_aa(self):
return self._obsolete_bg_attr('bg_aa', Ph_sel(Aex='Aem'))
def calc_bg_cache(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True,
recompute=False):
"""Compute time-dependent background rates for all the channels.
This version is the cached version of :meth:`calc_bg`.
This method tries to load the background data from a cache file.
If a saved background data is not found, it computes
the background and stores it to disk.
The arguments are the same as :meth:`calc_bg` with the only addition
of `recompute` (bool) to force a background recomputation even if
a cached version is found.
Form more details on the other arguments see :meth:`calc_bg`.
"""
bg_cache.calc_bg_cache(self, fun, time_s=time_s,
tail_min_us=tail_min_us, F_bg=F_bg,
error_metrics=error_metrics, fit_allph=fit_allph,
recompute=recompute)
def _get_auto_bg_th_arrays(self, F_bg=2, tail_min_us0=250):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
Th_us = {}
for ph_sel in self.ph_streams:
th_us = np.zeros(self.nch)
for ich, ph in enumerate(self.iter_ph_times(ph_sel=ph_sel)):
if ph.size > 0:
bg_rate, _ = bg.exp_fit(ph, tail_min_us=tail_min_us0)
th_us[ich] = 1e6 * F_bg / bg_rate
Th_us[ph_sel] = th_us
# Save the input used to generate Th_us
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
return Th_us
def _get_bg_th_arrays(self, tail_min_us, nperiods):
"""Return a dict of threshold values for background estimation.
The keys are the ph selections in self.ph_streams and the values
are 1-D arrays of size nch.
"""
n_streams = len(self.ph_streams)
if np.size(tail_min_us) == 1:
tail_min_us = np.repeat(tail_min_us, n_streams)
elif np.size(tail_min_us) == n_streams:
tail_min_us = np.asarray(tail_min_us)
elif np.size(tail_min_us) != n_streams:
raise ValueError('Wrong tail_min_us length (%d).' %
len(tail_min_us))
th_us = {}
for i, key in enumerate(self.ph_streams):
th_us[key] = np.ones(nperiods) * tail_min_us[i]
# Save the input used to generate Th_us
self.add(bg_th_us_user=tail_min_us)
return th_us
def _clean_bg_data(self):
"""Remove background fields specific of only one fit type.
Computing background with manual or 'auto' threshold results in
different sets of attributes being saved. This method removes these
attributes and should be called before recomputing the background
to avoid having old stale attributes of a previous background fit.
"""
# Attributes specific of manual or 'auto' bg fit
field_list = ['bg_auto_th_us0', 'bg_auto_F_bg', 'bg_th_us_user']
for field in field_list:
if field in self:
self.delete(field)
if hasattr(self, '_bg_mean'):
delattr(self, '_bg_mean')
def _get_num_periods(self, time_s):
"""Return the number of periods using `time_s` as period duration.
"""
duration = self.time_max - self.time_min
# Take the ceil to have at least 1 periods
nperiods = np.ceil(duration / time_s)
# Discard last period if negligibly small to avoid problems with
# background fit with very few photons.
if nperiods > 1:
last_period = self.time_max - time_s * (nperiods - 1)
# Discard last period if smaller than 3% of the bg period
if last_period < time_s * 0.03:
nperiods -= 1
return int(nperiods)
def calc_bg(self, fun, time_s=60, tail_min_us=500, F_bg=2,
error_metrics=None, fit_allph=True):
"""Compute time-dependent background rates for all the channels.
Compute background rates for donor, acceptor and both detectors.
The rates are computed every `time_s` seconds, allowing to
track possible variations during the measurement.
Arguments:
fun (function): function for background estimation (example
`bg.exp_fit`)
time_s (float, seconds): compute background each time_s seconds
tail_min_us (float, tuple or string): min threshold in us for
photon waiting times to use in background estimation.
If float is the same threshold for 'all', DD, AD and AA photons
and for all the channels.
If a 3 or 4 element tuple, each value is used for 'all', DD, AD
or AA photons, same value for all the channels.
If 'auto', the threshold is computed for each stream ('all',
DD, DA, AA) and for each channel as `bg_F * rate_ml0`.
`rate_ml0` is an initial estimation of the rate performed using
:func:`bg.exp_fit` and a fixed threshold (default 250us).
F_bg (float): when `tail_min_us` is 'auto', is the factor by which
the initial background estimation if multiplied to compute the
threshold.
error_metrics (string): Specifies the error metric to use.
See :func:`fretbursts.background.exp_fit` for more details.
fit_allph (bool): if True (default) the background for the
all-photon is fitted. If False it is computed as the sum of
backgrounds in all the other streams.
The background estimation functions are defined in the module
`background` (conventionally imported as `bg`).
Example:
Compute background with `bg.exp_fit` (inter-photon delays MLE
tail fitting), every 30s, with automatic tail-threshold::
d.calc_bg(bg.exp_fit, time_s=20, tail_min_us='auto')
Returns:
None, all the results are saved in the object itself.
"""
pprint(" - Calculating BG rates ... ")
self._clean_bg_data()
kwargs = dict(clk_p=self.clk_p, error_metrics=error_metrics)
nperiods = self._get_num_periods(time_s)
streams_noall = [s for s in self.ph_streams if s != Ph_sel('all')]
bg_auto_th = tail_min_us == 'auto'
if bg_auto_th:
tail_min_us0 = 250
self.add(bg_auto_th_us0=tail_min_us0, bg_auto_F_bg=F_bg)
auto_th_kwargs = dict(clk_p=self.clk_p, tail_min_us=tail_min_us0)
th_us = {}
for key in self.ph_streams:
th_us[key] = np.zeros(nperiods)
else:
th_us = self._get_bg_th_arrays(tail_min_us, nperiods)
Lim, Ph_p = [], []
BG, BG_err = [], []
Th_us = []
for ich, ph_ch in enumerate(self.iter_ph_times()):
masks = {sel: self.get_ph_mask(ich, ph_sel=sel)
for sel in self.ph_streams}
bins = ((np.arange(nperiods + 1) * time_s + self.time_min) /
self.clk_p)
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
bg = {sel: np.zeros(nperiods) for sel in self.ph_streams}
bg_err = {sel: np.zeros(nperiods) for sel in self.ph_streams}
i1 = 0
for ip in range(nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1 - 1]))
ph_i = ph_ch[i0:i1]
if fit_allph:
sel = Ph_sel('all')
if bg_auto_th:
_bg, _ = fun(ph_i, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i, tail_min_us=th_us[sel][ip], **kwargs)
for sel in streams_noall:
# This supports cases of D-only or A-only timestamps
# where self.A_em[ich] is a bool and not a bool-array
# In this case, the mask of either DexDem or DexAem is
# slice(None) (all-elements selection).
if isinstance(masks[sel], slice):
if masks[sel] == slice(None):
bg[sel][ip] = bg[Ph_sel('all')][ip]
bg_err[sel][ip] = bg_err[Ph_sel('all')][ip]
continue
else:
ph_i_sel = ph_i[masks[sel][i0:i1]]
if ph_i_sel.size > 0:
if bg_auto_th:
_bg, _ = fun(ph_i_sel, **auto_th_kwargs)
th_us[sel][ip] = 1e6 * F_bg / _bg
bg[sel][ip], bg_err[sel][ip] = \
fun(ph_i_sel, tail_min_us=th_us[sel][ip], **kwargs)
if not fit_allph:
bg[Ph_sel('all')] += sum(bg[s] for s in streams_noall)
bg_err[Ph_sel('all')] += sum(bg_err[s] for s in streams_noall)
Lim.append(lim)
Ph_p.append(ph_p)
BG.append(bg)
BG_err.append(bg_err)
Th_us.append(th_us)
# Make Dict Of Lists (DOL) from Lists of Dicts
BG_dol, BG_err_dol, Th_us_dol = {}, {}, {}
for sel in self.ph_streams:
BG_dol[sel] = [bg_ch[sel] for bg_ch in BG]
BG_err_dol[sel] = [err_ch[sel] for err_ch in BG_err]
Th_us_dol[sel] = [th_ch[sel] for th_ch in Th_us]
self.add(bg=BG_dol, bg_err=BG_err_dol, bg_th_us=Th_us_dol,
Lim=Lim, Ph_p=Ph_p,
bg_fun=fun, bg_fun_name=fun.__name__,
bg_time_s=time_s, bg_ph_sel=Ph_sel('all'),
bg_auto_th=bg_auto_th, # bool, True if the using auto-threshold
)
pprint("[DONE]\n")
@property
def nperiods(self):
return len(self.bg[Ph_sel('all')][0])
@property
def bg_mean(self):
if 'bg' not in self:
raise RuntimeError('No background found, compute it first.')
if not hasattr(self, '_bg_mean'):
self._bg_mean = {k: [bg_ch.mean() for bg_ch in bg_ph_sel]
for k, bg_ph_sel in self.bg.items()}
return self._bg_mean
def recompute_bg_lim_ph_p(self, ph_sel, mute=False):
"""Recompute self.Lim and selp.Ph_p relative to ph selection `ph_sel`
`ph_sel` is a Ph_sel object selecting the timestamps in which self.Lim
and self.Ph_p are being computed.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if self.bg_ph_sel == ph_sel:
return
pprint(" - Recomputing background limits for %s ... " %
str(ph_sel), mute)
bg_time_clk = self.bg_time_s / self.clk_p
Lim, Ph_p = [], []
for ph_ch, lim in zip(self.iter_ph_times(ph_sel), self.Lim):
bins = np.arange(self.nperiods + 1) * bg_time_clk
# Note: histogram bins are half-open, e.g. [a, b)
counts, _ = np.histogram(ph_ch, bins=bins)
lim, ph_p = [], []
i1 = 0
for ip in range(self.nperiods):
i0 = i1
i1 += counts[ip]
lim.append((i0, i1 - 1))
ph_p.append((ph_ch[i0], ph_ch[i1-1]))
Lim.append(lim)
Ph_p.append(ph_p)
self.add(Lim=Lim, Ph_p=Ph_p, bg_ph_sel=ph_sel)
pprint("[DONE]\n", mute)
##
# Burst analysis methods
#
def _calc_burst_period(self):
"""Compute for each burst the "background period" `bp`.
Background periods are the time intervals on which the BG is computed.
"""
P = []
for b, lim in zip(self.mburst, self.Lim):
p = zeros(b.num_bursts, dtype=np.int16)
if b.num_bursts > 0:
istart = b.istart
for i, (l0, l1) in enumerate(lim):
p[(istart >= l0) * (istart <= l1)] = i
P.append(p)
self.add(bp=P)
def _param_as_mch_array(self, par):
"""Regardless of `par` size, return an arrays with size == nch.
if `par` is scalar the arrays repeats the calar multiple times
if `par is a list/array must be of length `nch`.
"""
assert size(par) == 1 or size(par) == self.nch
return np.repeat(par, self.nch) if size(par) == 1 else np.asarray(par)
def bg_from(self, ph_sel):
"""Return the background rates for the specified photon selection.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if ph_sel in self.ph_streams:
return self.bg[ph_sel]
elif ph_sel == Ph_sel(Dex='DAem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Aex='DAem'):
sel = Ph_sel(Aex='Dem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Dem', Aex='Dem'):
sel = Ph_sel(Dex='Dem'), Ph_sel(Aex='Dem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='Aem', Aex='Aem'):
sel = Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem')
bg = [b1 + b2 for b1, b2 in zip(self.bg[sel[0]], self.bg[sel[1]])]
elif ph_sel == Ph_sel(Dex='DAem', Aex='Aem'):
sel = (Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem'), Ph_sel(Aex='Aem'))
bg = [b1 + b2 + b3 for b1, b2, b3 in
zip(self.bg[sel[0]], self.bg[sel[1]], self.bg[sel[2]])]
else:
raise NotImplementedError('Photon selection %s not implemented.' %
str(ph_sel))
return bg
def _calc_T(self, m, P, F=1., ph_sel=Ph_sel('all'), c=-1):
"""If P is None use F, otherwise uses both P *and* F (F defaults to 1).
When P is None, compute the time lag T for burst search according to::
T = (m - 1 - c) / (F * bg_rate)
"""
# Regardless of F and P sizes, FF and PP are arrays with size == nch
FF = self._param_as_mch_array(F)
PP = self._param_as_mch_array(P)
if P is None:
# NOTE: the following lambda ignores Pi
find_T = lambda m, Fi, Pi, bg: (m - 1 - c) / (bg * Fi)
else:
if F != 1:
print("WARNING: BS prob. th. with modified BG rate (F=%.1f)"
% F)
find_T = lambda m, Fi, Pi, bg: find_optimal_T_bga(bg*Fi, m, 1-Pi)
TT, T, rate_th = [], [], []
bg_bs = self.bg_from(ph_sel)
for bg_ch, F_ch, P_ch in zip(bg_bs, FF, PP):
# All "T" are in seconds
Tch = find_T(m, F_ch, P_ch, bg_ch)
TT.append(Tch)
T.append(Tch.mean())
rate_th.append(np.mean(m / Tch))
self.add(TT=TT, T=T, bg_bs=bg_bs, FF=FF, PP=PP, F=F, P=P,
rate_th=rate_th)
def _burst_search_rate(self, m, L, min_rate_cps, c=-1, ph_sel=Ph_sel('all'),
compact=False, index_allph=True, verbose=True,
pure_python=False):
"""Compute burst search using a fixed minimum photon rate.
The burst starts when, for `m` consecutive photons::
(m - 1 - c) / (t[last] - t[first]) >= min_rate_cps
Arguments:
min_rate_cps (float or array): minimum photon rate for burst start.
If array is one value per channel.
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
Min_rate_cps = self._param_as_mch_array(min_rate_cps)
mburst = []
T_clk = (m - 1 - c) / Min_rate_cps / self.clk_p
for ich, t_clk in enumerate(T_clk):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
label = '%s CH%d' % (ph_sel, ich + 1) if verbose else None
burstarray = bsearch(ph_bs, L, m, t_clk, label=label, verbose=verbose)
if burstarray.size > 1:
bursts = bslib.Bursts(burstarray)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
mburst.append(bursts)
self.add(mburst=mburst, rate_th=Min_rate_cps, T=T_clk * self.clk_p)
if ph_sel != Ph_sel('all') and index_allph:
self._fix_mburst_from(ph_sel=ph_sel)
def _burst_search_TT(self, m, L, ph_sel=Ph_sel('all'), verbose=True,
compact=False, index_allph=True, pure_python=False,
mute=False):
"""Compute burst search with params `m`, `L` on ph selection `ph_sel`
Requires the list of arrays `self.TT` with the max time-thresholds in
the different burst periods for each channel (use `._calc_T()`).
"""
bsearch = _get_bsearch_func(pure_python=pure_python)
self.recompute_bg_lim_ph_p(ph_sel=ph_sel, mute=mute)
MBurst = []
label = ''
for ich, T in enumerate(self.TT):
ph_bs = ph = self.get_ph_times(ich=ich, ph_sel=ph_sel)
if compact:
ph_bs = self._ph_times_compact(ph, ph_sel)
burstarray_ch_list = []
Tck = T / self.clk_p
for ip, (l0, l1) in enumerate(self.Lim[ich]):
if verbose:
label = '%s CH%d-%d' % (ph_sel, ich + 1, ip)
burstarray = bsearch(ph_bs, L, m, Tck[ip], slice_=(l0, l1 + 1),
label=label, verbose=verbose)
if burstarray.size > 1:
burstarray_ch_list.append(burstarray)
if len(burstarray_ch_list) > 0:
data = np.vstack(burstarray_ch_list)
bursts = bslib.Bursts(data)
if compact:
bursts.recompute_times(ph, out=bursts)
else:
bursts = bslib.Bursts.empty()
MBurst.append(bursts)
self.add(mburst=MBurst)
if ph_sel != Ph_sel('all') and index_allph:
# Convert the burst data to be relative to ph_times_m.
# Convert both Lim/Ph_p and mburst, as they are both needed
# to compute `.bp`.
self.recompute_bg_lim_ph_p(ph_sel=Ph_sel('all'), mute=mute)
self._fix_mburst_from(ph_sel=ph_sel, mute=mute)
def _fix_mburst_from(self, ph_sel, mute=False):
"""Convert burst data from any ph_sel to 'all' timestamps selection.
"""
assert isinstance(ph_sel, Ph_sel) and not self._is_allph(ph_sel)
pprint(' - Fixing burst data to refer to ph_times_m ... ', mute)
for bursts, mask in zip(self.mburst,
self.iter_ph_masks(ph_sel=ph_sel)):
bursts.recompute_index_expand(mask, out=bursts)
pprint('[DONE]\n', mute)
def burst_search(self, L=None, m=10, F=6., P=None, min_rate_cps=None,
ph_sel=Ph_sel('all'), compact=False, index_allph=True,
c=-1, computefret=True, max_rate=False, dither=False,
pure_python=False, verbose=False, mute=False, pax=False):
"""Performs a burst search with specified parameters.
This method performs a sliding-window burst search without
binning the timestamps. The burst starts when the rate of `m`
photons is above a minimum rate, and stops when the rate falls below
the threshold. The result of the burst search is stored in the
`mburst` attribute (a list of Bursts objects, one per channel)
containing start/stop times and indexes. By default, after burst
search, this method computes donor and acceptor counts, it applies
burst corrections (background, leakage, etc...) and computes
E (and S in case of ALEX). You can skip these steps by passing
`computefret=False`.
The minimum rate can be explicitly specified with the `min_rate_cps`
argument, or computed as a function of the background rate with the
`F` argument.
Parameters:
m (int): number of consecutive photons used to compute the
photon rate. Typical values 5-20. Default 10.
L (int or None): minimum number of photons in burst. If None
(default) L = m is used.
F (float): defines how many times higher than the background rate
is the minimum rate used for burst search
(`min rate = F * bg. rate`), assuming that `P = None` (default).
Typical values are 3-9. Default 6.
P (float): threshold for burst detection expressed as a
probability that a detected bursts is not due to a Poisson
background. If not None, `P` overrides `F`. Note that the
background process is experimentally super-Poisson so this
probability is not physically very meaningful. Using this
argument is discouraged.
min_rate_cps (float or list/array): minimum rate in cps for burst
start. If not None, it has the precedence over `P` and `F`.
If non-scalar, contains one rate per each multispot channel.
Typical values range from 20e3 to 100e3.
ph_sel (Ph_sel object): defines the "photon selection" (or stream)
to be used for burst search. Default: all photons.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
index_allph (bool): if True (default), the indexes of burst start
and stop (`istart`, `istop`) are relative to the full
timestamp array. If False, the indexes are relative to
timestamps selected by the `ph_sel` argument.
c (float): correction factor used in the rate vs time-lags relation.
`c` affects the computation of the burst-search parameter `T`.
When `F` is not None, `T = (m - 1 - c) / (F * bg_rate)`.
When using `min_rate_cps`, `T = (m - 1 - c) / min_rate_cps`.
computefret (bool): if True (default) compute donor and acceptor
counts, apply corrections (background, leakage, direct
excitation) and compute E (and S). If False, skip all these
steps and stop just after the initial burst search.
max_rate (bool): if True compute the max photon rate inside each
burst using the same `m` used for burst search. If False
(default) skip this step.
dither (bool): if True applies dithering corrections to burst
counts. Default False. See :meth:`Data.dither`.
pure_python (bool): if True, uses the pure python functions even
when optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Note:
when using `P` or `F` the background rates are needed, so
`.calc_bg()` must be called before the burst search.
Example:
d.burst_search(m=10, F=6)
Returns:
None, all the results are saved in the `Data` object.
"""
ph_sel = self._fix_ph_sel(ph_sel)
if compact:
self._assert_compact(ph_sel)
pprint(" - Performing burst search (verbose=%s) ..." % verbose, mute)
# Erase any previous burst data
self.delete_burst_data()
if L is None:
L = m
if min_rate_cps is not None:
# Saves rate_th in self
self._burst_search_rate(m=m, L=L, min_rate_cps=min_rate_cps, c=c,
ph_sel=ph_sel, compact=compact,
index_allph=index_allph,
verbose=verbose, pure_python=pure_python)
else:
# Compute TT, saves P and F in self
self._calc_T(m=m, P=P, F=F, ph_sel=ph_sel, c=c)
# Use TT and compute mburst
self._burst_search_TT(L=L, m=m, ph_sel=ph_sel, compact=compact,
index_allph=index_allph, verbose=verbose,
pure_python=pure_python, mute=mute)
pprint("[DONE]\n", mute)
pprint(" - Calculating burst periods ...", mute)
self._calc_burst_period() # writes bp
pprint("[DONE]\n", mute)
# (P, F) or rate_th are saved in _calc_T() or _burst_search_rate()
self.add(m=m, L=L, ph_sel=ph_sel)
# The correction flags are both set here and in calc_ph_num() so that
# they are always consistent. Case 1: we perform only burst search
# (with no call to calc_ph_num). Case 2: we re-call calc_ph_num()
# without doing a new burst search
self.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
self._burst_search_postprocess(
computefret=computefret, max_rate=max_rate, dither=dither,
pure_python=pure_python, mute=mute, pax=pax)
def _burst_search_postprocess(self, computefret, max_rate, dither,
pure_python, mute, pax):
if computefret:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
self.calc_fret(count_ph=True, corrections=True, dither=dither,
mute=mute, pure_python=pure_python, pax=pax)
pprint(" [DONE Counting D/A]\n", mute)
if max_rate:
pprint(" - Computing max rates in burst ...", mute)
self.calc_max_rate(m=self.m)
pprint("[DONE]\n", mute)
def calc_ph_num(self, alex_all=False, pure_python=False):
"""Computes number of D, A (and AA) photons in each burst.
Arguments:
alex_all (bool): if True and self.ALEX is True, computes also the
donor channel photons during acceptor excitation (`nda`)
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
Returns:
Saves `nd`, `na`, `nt` (and eventually `naa`, `nda`) in self.
Returns None.
"""
mch_count_ph_in_bursts = _get_mch_count_ph_in_bursts_func(pure_python)
if not self.alternated:
nt = [b.counts.astype(float) if b.num_bursts > 0 else np.array([])
for b in self.mburst]
A_em = [self.get_A_em(ich) for ich in range(self.nch)]
if isinstance(A_em[0], slice):
# This is to support the case of A-only or D-only data
n0 = [np.zeros(mb.num_bursts) for mb in self.mburst]
if A_em[0] == slice(None):
nd, na = n0, nt # A-only case
elif A_em[0] == slice(0):
nd, na = nt, n0 # D-only case
else:
# This is the usual case with photons in both D and A channels
na = mch_count_ph_in_bursts(self.mburst, A_em)
nd = [t - a for t, a in zip(nt, na)]
assert (nt[0] == na[0] + nd[0]).all()
else:
# The "new style" would be:
#Mask = [m for m in self.iter_ph_masks(Ph_sel(Dex='Dem'))]
Mask = [d_em * d_ex for d_em, d_ex in zip(self.D_em, self.D_ex)]
nd = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * d_ex for a_em, d_ex in zip(self.A_em, self.D_ex)]
na = mch_count_ph_in_bursts(self.mburst, Mask)
Mask = [a_em * a_ex for a_em, a_ex in zip(self.A_em, self.A_ex)]
naa = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(naa=naa)
if alex_all or 'PAX' in self.meas_type:
Mask = [d_em * a_ex for d_em, a_ex in zip(self.D_em, self.A_ex)]
nda = mch_count_ph_in_bursts(self.mburst, Mask)
self.add(nda=nda)
if self.ALEX:
nt = [d + a + aa for d, a, aa in zip(nd, na, naa)]
assert (nt[0] == na[0] + nd[0] + naa[0]).all()
elif 'PAX' in self.meas_type:
nt = [d + a + da + aa for d, a, da, aa in zip(nd, na, nda, naa)]
assert (nt[0] == na[0] + nd[0] + nda[0] + naa[0]).all()
# This is a copy of na which will never be corrected
# (except for background). It is used to compute the
# equivalent of naa for PAX:
# naa~ = naa - nar
# where naa~ is the A emission due to direct excitation
# by A laser during D+A-excitation,
# nar is the uncorrected A-channel signal during D-excitation,
# and naa is the A-channel signal during D+A excitation.
nar = [a.copy() for a in na]
self.add(nar=nar)
self.add(nd=nd, na=na, nt=nt,
bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
def fuse_bursts(self, ms=0, process=True, mute=False):
"""Return a new :class:`Data` object with nearby bursts fused together.
Arguments:
ms (float): fuse all burst separated by less than `ms` millisecs.
If < 0 no burst is fused. Note that with ms = 0, overlapping
bursts are fused.
process (bool): if True (default), reprocess the burst data in
the new object applying corrections and computing FRET.
mute (bool): if True suppress any printed output.
"""
if ms < 0:
return self
mburst = mch_fuse_bursts(self.mburst, ms=ms, clk_p=self.clk_p)
new_d = Data(**self)
for k in ['E', 'S', 'nd', 'na', 'naa', 'nda', 'nar', 'nt', 'lsb', 'bp']:
if k in new_d:
new_d.delete(k)
new_d.add(bg_corrected=False, leakage_corrected=False,
dir_ex_corrected=False, dithering=False)
new_d.add(mburst=mburst, fuse=ms)
if 'bg' in new_d:
new_d._calc_burst_period()
if process:
pprint(" - Counting D and A ph and calculating FRET ... \n", mute)
new_d.calc_fret(count_ph=True, corrections=True,
dither=self.dithering, mute=mute, pax=self.pax)
pprint(" [DONE Counting D/A and FRET]\n", mute)
return new_d
##
# Burst selection and filtering
#
def select_bursts(self, filter_fun, negate=False, computefret=True,
args=None, **kwargs):
"""Return an object with bursts filtered according to `filter_fun`.
This is the main method to select bursts according to different
criteria. The selection rule is defined by the selection function
`filter_fun`. FRETBursts provides a several predefined selection
functions see :ref:`burst_selection`. New selection
functions can be defined and passed to this method to implement
arbitrary selection rules.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
"""
Masks, str_sel = self.select_bursts_mask(filter_fun, negate=negate,
return_str=True, args=args,
**kwargs)
d_sel = self.select_bursts_mask_apply(Masks, computefret=computefret,
str_sel=str_sel)
return d_sel
def select_bursts_mask(self, filter_fun, negate=False, return_str=False,
args=None, **kwargs):
"""Returns mask arrays to select bursts according to `filter_fun`.
The function `filter_fun` is called to compute the mask arrays for
each channel.
This method is useful when you want to apply a selection from one
object to a second object. Otherwise use :meth:`Data.select_bursts`.
Arguments:
filter_fun (fuction): function used for burst selection
negate (boolean): If True, negates (i.e. take the complementary)
of the selection returned by `filter_fun`. Default `False`.
return_str: if True return, for each channel, a tuple with
a bool array and a string that can be added to the measurement
name to indicate the selection. If False returns only
the bool array. Default False.
args (tuple or None): positional arguments for `filter_fun()`
kwargs:
Additional keyword arguments passed to `filter_fun()`.
Returns:
A list of boolean arrays (one per channel) that define the burst
selection. If `return_str` is True returns a list of tuples, where
each tuple is a bool array and a string.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_bursts_mask_apply`
"""
# Create the list of bool masks for the bursts selection
if args is None:
args = tuple()
M = [filter_fun(self, i, *args, **kwargs) for i in range(self.nch)]
# Make sure the selection function has the right return signature
msg = 'The second argument returned by `%s` must be a string.'
assert np.all([isinstance(m[1], str) for m in M]), msg % filter_fun
# Make sure all boolean masks have the right size
msg = ("The size of boolean masks returned by `%s` needs to match "
"the number of bursts.")
assert np.all([m[0].size == n for m, n in zip(M, self.num_bursts)]), (
msg % filter_fun)
Masks = [-m[0] if negate else m[0] for m in M]
str_sel = M[0][1]
if return_str:
return Masks, str_sel
else:
return Masks
def select_bursts_mask_apply(self, masks, computefret=True, str_sel=''):
"""Returns a new Data object with bursts selected according to `masks`.
This method select bursts using a list of boolean arrays as input.
Since the user needs to create the boolean arrays first, this method
is useful when experimenting with new selection criteria that don't
have a dedicated selection function. Usually, however, it is easier
to select bursts through :meth:`Data.select_bursts` (using a
selection function).
Arguments:
masks (list of arrays): each element in this list is a boolean
array that selects bursts in a channel.
computefret (boolean): If True (default) recompute donor and
acceptor counts, corrections and FRET quantities (i.e. E, S)
in the new returned object.
Returns:
A new :class:`Data` object containing only the selected bursts.
Note:
In order to save RAM, the timestamp arrays (`ph_times_m`)
of the new Data() points to the same arrays of the original
Data(). Conversely, all the bursts data (`mburst`, `nd`, `na`,
etc...) are new distinct objects.
See also:
:meth:`Data.select_bursts`, :meth:`Data.select_mask`
"""
# Attributes of ds point to the same objects of self
ds = Data(**self)
##Copy the per-burst fields that must be filtered
used_fields = [field for field in Data.burst_fields if field in self]
for name in used_fields:
# Recreate the current attribute as a new list to avoid modifying
# the old list that is also in the original object.
# The list is initialized with empty arrays because this is the
# valid value when a ch has no bursts.
empty = bslib.Bursts.empty() if name == 'mburst' else np.array([])
ds.add(**{name: [empty] * self.nch})
# Assign the new data
for ich, mask in enumerate(masks):
if self[name][ich].size == 0:
continue # -> no bursts in ch
# Note that boolean masking implies numpy array copy
# On the contrary slicing only makes a new view of the array
ds[name][ich] = self[name][ich][mask]
# Recompute E and S
if computefret:
ds.calc_fret(count_ph=False, pax=self.pax)
# Add the annotation about the filter function
ds.s = list(self.s + [str_sel]) # using append would modify also self
return ds
##
# Burst corrections
#
def background_correction(self, relax_nt=False, mute=False):
"""Apply background correction to burst sizes (nd, na,...)
"""
if self.bg_corrected:
return -1
pprint(" - Applying background correction.\n", mute)
self.add(bg_corrected=True)
for ich, bursts in enumerate(self.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
period = self.bp[ich]
nd, na, bg_d, bg_a, width = self.expand(ich, width=True)
nd -= bg_d
na -= bg_a
if 'nar' in self:
# Apply background correction to PAX field nar
self.nar[ich][:] = na
if relax_nt:
# This does not guarantee that nt = nd + na
self.nt[ich] -= self.bg_from(Ph_sel('all'))[ich][period] * width
else:
self.nt[ich] = nd + na
if self.alternated:
bg_aa = self.bg_from(Ph_sel(Aex='Aem'))
self.naa[ich] -= bg_aa[ich][period] * width
if 'nda' in self:
bg_da = self.bg_from(Ph_sel(Aex='Dem'))
self.nda[ich] -= bg_da[ich][period] * width
self.nt[ich] += self.naa[ich]
if 'PAX' in self.meas_type:
self.nt[ich] += self.nda[ich]
def leakage_correction(self, mute=False):
"""Apply leakage correction to burst sizes (nd, na,...)
"""
if self.leakage_corrected:
return -1
elif self.leakage != 0:
pprint(" - Applying leakage correction.\n", mute)
Lk = self.get_leakage_array()
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
self.na[i] -= self.nd[i] * Lk[i]
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(leakage_corrected=True)
def direct_excitation_correction(self, mute=False):
"""Apply direct excitation correction to bursts (ALEX-only).
The applied correction is: na -= naa*dir_ex
"""
if self.dir_ex_corrected:
return -1
elif self.dir_ex != 0:
pprint(" - Applying direct excitation correction.\n", mute)
for i, num_bursts in enumerate(self.num_bursts):
if num_bursts == 0:
continue # if no bursts skip this ch
naa = self.naa[i]
if 'PAX' in self.meas_type:
naa = naa - self.nar[i] # do not modify inplace
self.na[i] -= naa * self.dir_ex
self.nt[i] = self.nd[i] + self.na[i]
if self.ALEX:
self.nt[i] += self.naa[i]
elif 'PAX' in self.meas_type:
self.nt[i] += (self.nda[i] + self.naa[i])
self.add(dir_ex_corrected=True)
def dither(self, lsb=2, mute=False):
"""Add dithering (uniform random noise) to burst counts (nd, na,...).
The dithering amplitude is the range -0.5*lsb .. 0.5*lsb.
"""
if self.dithering:
return -1
pprint(" - Applying burst-size dithering.\n", mute)
self.add(dithering=True)
for nd, na in zip(self.nd, self.na):
nd += lsb * (np.random.rand(nd.size) - 0.5)
na += lsb * (np.random.rand(na.size) - 0.5)
if self.alternated:
for naa in self.naa:
naa += lsb * (np.random.rand(naa.size) - 0.5)
if 'nda' in self:
for nda in self.nda:
nda += lsb * (np.random.rand(nda.size) - 0.5)
self.add(lsb=lsb)
def calc_chi_ch(self, E):
"""Calculate the gamma correction prefactor factor `chi_ch` (array).
Computes `chi_ch`, a channel-dependent prefactor for gamma used
to correct dispersion of E across channels.
Returns:
array of `chi_ch` correction factors (one per spot).
To apply the correction assign the returned array to `Data.chi_ch`.
Upon assignment E values for all bursts will be corrected.
"""
chi_ch = (1 / E.mean() - 1) / (1 / E - 1)
return chi_ch
def corrections(self, mute=False):
"""Apply corrections on burst-counts: nd, na, nda, naa.
The corrections are: background, leakage (or bleed-through) and
direct excitation (dir_ex).
"""
self.background_correction(mute=mute)
self.leakage_correction(mute=mute)
if self.alternated:
self.direct_excitation_correction(mute=mute)
def _update_corrections(self):
"""Recompute corrections whose flag is True.
Checks the flags .bg_corrected, .leakage_corrected, .dir_ex_corrected,
.dithering and recomputes the correction if the corresponding flag
is True (i.e. if the correction was already applied).
Note that this method is not used for gamma and beta corrections
because these do not affect the `nd`, `na` and `naa` quantities but
are only applied when computing E, S and corrected size.
Differently from :meth:`corrections`, this allows to recompute
corrections that have already been applied.
"""
if 'mburst' not in self:
return # no burst search performed yet
old_bg_corrected = self.bg_corrected
old_leakage_corrected = self.leakage_corrected
old_dir_ex_corrected = self.dir_ex_corrected
old_dithering = self.dithering
self.calc_ph_num() # recompute uncorrected na, nd, nda, naa
if old_bg_corrected:
self.background_correction()
if old_leakage_corrected:
self.leakage_correction()
if old_dir_ex_corrected:
self.direct_excitation_correction()
if old_dithering:
self.dither(self.lsb)
# Recompute E and S with no corrections (because already applied)
self.calc_fret(count_ph=False, corrections=False, pax=self.pax)
@property
def leakage(self):
"""Spectral leakage (bleed-through) of D emission in the A channel.
"""
return self._leakage
@leakage.setter
def leakage(self, leakage):
self._update_leakage(leakage)
def _update_leakage(self, leakage):
"""Apply/update leakage (or bleed-through) correction.
"""
assert (np.size(leakage) == 1) or (np.size(leakage) == self.nch)
self.add(_leakage=np.asfarray(leakage), leakage_corrected=True)
self._update_corrections()
@property
def dir_ex(self):
"""Direct excitation correction factor."""
return self._dir_ex
@dir_ex.setter
def dir_ex(self, value):
self._update_dir_ex(value)
def _update_dir_ex(self, dir_ex):
"""Apply/update direct excitation correction with value `dir_ex`.
"""
assert np.size(dir_ex) == 1
self.add(_dir_ex=float(dir_ex), dir_ex_corrected=True)
self._update_corrections()
@property
def beta(self):
"""Beta factor used to correct S (compensates Dex and Aex unbalance).
"""
return self._beta
@beta.setter
def beta(self, value):
self._update_beta(value)
def _update_beta(self, beta):
"""Change the `beta` value and recompute E and S."""
assert np.size(beta) == 1
self.add(_beta=float(beta))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def chi_ch(self):
"""Per-channel relative gamma factor."""
return self._chi_ch
@chi_ch.setter
def chi_ch(self, value):
self._update_chi_ch(value)
def _update_chi_ch(self, chi_ch):
"""Change the `chi_ch` value and recompute E and S."""
msg = 'chi_ch is a per-channel correction and must have size == nch.'
assert np.size(chi_ch) == self.nch, ValueError(msg)
self.add(_chi_ch=np.asfarray(chi_ch))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
@property
def gamma(self):
"""Gamma correction factor (compensates DexDem and DexAem unbalance).
"""
return self._gamma
@gamma.setter
def gamma(self, value):
self._update_gamma(value)
def _update_gamma(self, gamma):
"""Change the `gamma` value and recompute E and S."""
assert (np.size(gamma) == 1) or (np.size(gamma) == self.nch)
self.add(_gamma=np.asfarray(gamma))
if 'mburst' in self:
# Recompute E and S and delete fitter objects
self.calc_fret(corrections=False, pax=self.pax)
def get_gamma_array(self):
"""Get the array of gamma factors, one per ch.
It always returns an array of gamma factors regardless of
whether `self.gamma` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
gamma = self.gamma
G = np.repeat(gamma, self.nch) if np.size(gamma) == 1 else gamma
G *= self.chi_ch
return G
def get_leakage_array(self):
"""Get the array of leakage coefficients, one per ch.
It always returns an array of leakage coefficients regardless of
whether `self.leakage` is scalar or array.
Each element of the returned array is multiplied by `chi_ch`.
"""
leakage = self.leakage
Lk = np.r_[[leakage] * self.nch] if np.size(leakage) == 1 else leakage
Lk *= self.chi_ch
return Lk
##
# Methods to compute burst quantities: FRET, S, SBR, max_rate, etc ...
#
def calc_sbr(self, ph_sel=Ph_sel('all'), gamma=1.):
"""Return Signal-to-Background Ratio (SBR) for each burst.
Arguments:
ph_sel (Ph_sel object): object defining the photon selection
for which to compute the sbr. Changes the photons used for
burst size and the corresponding background rate. Valid values
here are Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem').
See :mod:`fretbursts.ph_sel` for details.
gamma (float): gamma value used to compute corrected burst size
in the case `ph_sel` is Ph_sel('all'). Ignored otherwise.
Returns:
A list of arrays (one per channel) with one value per burst.
The list is also saved in `sbr` attribute.
"""
ph_sel = self._fix_ph_sel(ph_sel)
sbr = []
for ich, mb in enumerate(self.mburst):
if mb.num_bursts == 0:
sbr.append(np.array([]))
continue # if no bursts skip this ch
nd, na, bg_d, bg_a = self.expand(ich)
nt = self.burst_sizes_ich(ich=ich, gamma=gamma)
signal = {Ph_sel('all'): nt,
Ph_sel(Dex='Dem'): nd, Ph_sel(Dex='Aem'): na}
background = {Ph_sel('all'): bg_d + bg_a,
Ph_sel(Dex='Dem'): bg_d, Ph_sel(Dex='Aem'): bg_a}
sbr.append(signal[ph_sel] / background[ph_sel])
self.add(sbr=sbr)
return sbr
def calc_burst_ph_func(self, func, func_kw, ph_sel=Ph_sel('all'),
compact=False, ich=0):
"""Evaluate a scalar function from photons in each burst.
This method allow calling an arbitrary function on the photon
timestamps of each burst. For example if `func` is `np.mean` it
computes the mean time in each bursts.
Arguments:
func (callable): function that takes as first argument an array of
timestamps for one burst.
func_kw (callable): additional arguments to be passed `func`.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
compact (bool): if True, a photon selection of only one excitation
period is required and the timestamps are "compacted" by
removing the "gaps" between each excitation period.
Returns:
A list (on element per channel) array. The array size is equal to
the number of bursts in the corresponding channel.
"""
if compact:
self._assert_compact(ph_sel)
kwargs = dict(func=func, func_kw=func_kw, compact=compact)
if self.alternated:
kwargs.update(alex_period=self.alex_period)
if compact:
kwargs.update(excitation_width=self._excitation_width(ph_sel))
results_mch = [burst_ph_stats(ph, bursts, mask=mask, **kwargs)
for ph, mask, bursts in
zip(self.iter_ph_times(),
self.iter_ph_masks(ph_sel=ph_sel),
self.mburst)]
return results_mch
def calc_max_rate(self, m, ph_sel=Ph_sel('all'), compact=False,
c=phrates.default_c):
"""Compute the max m-photon rate reached in each burst.
Arguments:
m (int): number of timestamps to use to compute the rate.
As for burst search, typical values are 5-20.
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
c (float): this parameter is used in the definition of the
rate estimator which is `(m - 1 - c) / t[last] - t[first]`.
For more details see :func:`.phtools.phrates.mtuple_rates`.
"""
ph_sel = self._fix_ph_sel(ph_sel)
Max_Rate = self.calc_burst_ph_func(func=phrates.mtuple_rates_max,
func_kw=dict(m=m, c=c),
ph_sel=ph_sel, compact=compact)
Max_Rate = [mr / self.clk_p - bg[bp] for bp, bg, mr in
zip(self.bp, self.bg_from(ph_sel), Max_Rate)]
params = dict(m=m, ph_sel=ph_sel, compact=compact)
self.add(max_rate=Max_Rate, max_rate_params=params)
def calc_fret(self, count_ph=False, corrections=True, dither=False,
mute=False, pure_python=False, pax=False):
"""Compute FRET (and stoichiometry if ALEX) for each burst.
This is an high-level functions that can be run after burst search.
By default, it will count Donor and Acceptor photons, perform
corrections (background, leakage), and compute gamma-corrected
FRET efficiencies (and stoichiometry if ALEX).
Arguments:
count_ph (bool): if True (default), calls :meth:`calc_ph_num` to
counts Donor and Acceptor photons in each bursts
corrections (bool): if True (default), applies background and
bleed-through correction to burst data
dither (bool): whether to apply dithering to burst size.
Default False.
mute (bool): whether to mute all the printed output. Default False.
pure_python (bool): if True, uses the pure python functions even
when the optimized Cython functions are available.
pax (bool): this has effect only if measurement is PAX.
In this case, when True computes E using a PAX-enhanced
formula: ``(2 na) / (2 na + nd + nda)``.
Otherwise use the usual usALEX formula: ``na / na + nd``.
Quantities `nd`/`na` are D/A burst counts during D excitation
period, while `nda` is D emission during A excitation period.
Returns:
None, all the results are saved in the object.
"""
if count_ph:
self.calc_ph_num(pure_python=pure_python, alex_all=True)
if dither:
self.dither(mute=mute)
if corrections:
self.corrections(mute=mute)
self._calculate_fret_eff(pax=pax)
if self.alternated:
self._calculate_stoich(pax=pax)
#self._calc_alex_hist()
for attr in ('ES_binwidth', 'ES_hist', 'E_fitter', 'S_fitter'):
# E_fitter and S_fitter are only attributes
# so we cannot use the membership syntax (attr in self)
if hasattr(self, attr):
self.delete(attr, warning=False)
def _aex_fraction(self):
"""Proportion of Aex period versus Dex + Aex."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return ((A_ON[1] - A_ON[0]) /
(A_ON[1] - A_ON[0] + D_ON[1] - D_ON[0]))
def _aex_dex_ratio(self):
"""Ratio of Aex and Dex period durations."""
assert self.alternated
D_ON, A_ON = self.D_ON, self.A_ON
return (A_ON[1] - A_ON[0]) / (D_ON[1] - D_ON[0])
def _calculate_fret_eff(self, pax=False):
"""Compute FRET efficiency (`E`) for each burst."""
G = self.get_gamma_array()
if not pax:
E = [na / (g * nd + na) for nd, na, g in zip(self.nd, self.na, G)]
else:
alpha = 1 - self._aex_fraction()
E = [(na / alpha) / (g * (nd + nda) + (na / alpha))
for nd, na, nda, g in zip(self.nd, self.na, self.nda, G)]
self.add(E=E, pax=pax)
def _calculate_stoich(self, pax=False):
"""Compute "stoichiometry" (the `S` parameter) for each burst."""
G = self.get_gamma_array()
naa = self.naa
if 'PAX' in self.meas_type:
naa = [self._get_naa_ich(i) for i in range(self.nch)]
if not pax:
S = [(g * d + a) / (g * d + a + aa / self.beta)
for d, a, aa, g in zip(self.nd, self.na, naa, G)]
else:
# This is a PAX-enhanced formula which uses information
# from both alternation periods in order to compute S
alpha = 1 - self._aex_fraction()
S = [(g * (d + da) + a / alpha) /
(g * (d + da) + a / alpha + aa / (alpha * self.beta))
for d, a, da, aa, g in
zip(self.nd, self.na, self.nda, naa, G)]
self.add(S=S)
def _calc_alex_hist(self, binwidth=0.05):
"""Compute the ALEX histogram with given bin width `bin_step`"""
if 'ES_binwidth' in self and self.ES_binwidth == binwidth:
return
ES_hist_tot = [ES_histog(E, S, binwidth) for E, S in
zip(self.E, self.S)]
E_bins, S_bins = ES_hist_tot[0][1], ES_hist_tot[0][2]
ES_hist = [h[0] for h in ES_hist_tot]
E_ax = E_bins[:-1] + 0.5 * binwidth
S_ax = S_bins[:-1] + 0.5 * binwidth
self.add(ES_hist=ES_hist, E_bins=E_bins, S_bins=S_bins,
E_ax=E_ax, S_ax=S_ax, ES_binwidth=binwidth)
##
# Methods for measurement info
#
def status(self, add="", noname=False):
"""Return a string with burst search, corrections and selection info.
"""
name = "" if noname else self.name
s = name
if 'L' in self: # burst search has been done
if 'rate_th' in self:
s += " BS_%s L%d m%d MR%d" % (self.ph_sel, self.L, self.m,
np.mean(self.rate_th) * 1e-3)
else:
P_str = '' if self.P is None else ' P%s' % self.P
s += " BS_%s L%d m%d F%.1f%s" % \
(self.ph_sel, self.L, self.m, np.mean(self.F), P_str)
s += " G%.3f" % np.mean(self.gamma)
if 'bg_fun' in self: s += " BG%s" % self.bg_fun.__name__[:-4]
if 'bg_time_s' in self: s += "-%ds" % self.bg_time_s
if 'fuse' in self: s += " Fuse%.1fms" % self.fuse
if 'bg_corrected' in self and self.bg_corrected:
s += " bg"
if 'leakage_corrected' in self and self.leakage_corrected:
s += " Lk%.3f" % np.mean(self.leakage*100)
if 'dir_ex_corrected' in self and self.dir_ex_corrected:
s += " dir%.1f" % (self.dir_ex*100)
if 'dithering' in self and self.dithering:
s += " Dith%d" % self.lsb
if 's' in self: s += ' '.join(self.s)
return s + add
@property
def name(self):
"""Measurement name: last subfolder + file name with no extension."""
if not hasattr(self, '_name'):
basename = str(os.path.splitext(os.path.basename(self.fname))[0])
name = basename
last_dir = str(os.path.basename(os.path.dirname(self.fname)))
if len(last_dir) > 0:
name = '_'.join([last_dir, basename])
self.add(_name=name)
return self._name
@name.setter
def name(self, value):
self.add(_name=value)
def Name(self, add=""):
"""Return short filename + status information."""
n = self.status(add=add)
return n
def __repr__(self):
return self.status()
def stats(self, string=False):
"""Print common statistics (BG rates, #bursts, mean size, ...)"""
s = print_burst_stats(self)
if string:
return s
else:
print(s)
##
# FRET fitting methods
#
def fit_E_m(self, E1=-1, E2=2, weights='size', gamma=1.):
"""Fit E in each channel with the mean using bursts in [E1,E2] range.
Note:
This two fitting are equivalent (but the first is much faster)::
fit_E_m(weights='size')
fit_E_minimize(kind='E_size', weights='sqrt')
However `fit_E_minimize()` does not provide a model curve.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, 2)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
# Compute weighted mean
fit_res[ich, 0] = np.dot(w, E[mask])/w.sum()
# Compute weighted variance
fit_res[ich, 1] = np.sqrt(
np.dot(w, (E[mask] - fit_res[ich, 0])**2)/w.sum())
fit_model_F[ich] = mask.sum()/mask.size
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
self.add(fit_E_res=fit_res, fit_E_name='Moments',
E_fit=fit_res[:, 0], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_poiss(self, E1=-1, E2=2, method=1, **kwargs):
"""ML fit for E modeling size ~ Poisson, using bursts in [E1,E2] range.
"""
assert method in [1, 2, 3]
fit_fun = {1: fret_fit.fit_E_poisson_na, 2: fret_fit.fit_E_poisson_nt,
3: fret_fit.fit_E_poisson_nd}
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = zeros(self.nch)
for ich, mask in zip(range(self.nch), Mask):
nd, na, bg_d, bg_a = self.expand(ich)
bg_x = bg_d if method == 3 else bg_a
fit_res[ich] = fit_fun[method](nd[mask], na[mask],
bg_x[mask], **kwargs)
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Poisson',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_ML_binom(self, E1=-1, E2=2, **kwargs):
"""ML fit for E modeling na ~ Binomial, using bursts in [E1,E2] range.
"""
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fret_fit.fit_E_binom(_d[mask], _a[mask], **kwargs)
for _d, _a, mask in zip(self.nd, self.na, Mask)])
self.add(fit_E_res=fit_res, fit_E_name='MLE: na ~ Binomial',
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_minimize(self, kind='slope', E1=-1, E2=2, **kwargs):
"""Fit E using method `kind` ('slope' or 'E_size') and bursts in [E1,E2]
If `kind` is 'slope' the fit function is fret_fit.fit_E_slope()
If `kind` is 'E_size' the fit function is fret_fit.fit_E_E_size()
Additional arguments in `kwargs` are passed to the fit function.
"""
assert kind in ['slope', 'E_size']
# Build a dictionary fun_d so we'll call the function fun_d[kind]
fun_d = dict(slope=fret_fit.fit_E_slope,
E_size=fret_fit.fit_E_E_size)
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res = np.array([fun_d[kind](nd[mask], na[mask], **kwargs)
for nd, na, mask in
zip(self.nd, self.na, Mask)])
fit_name = dict(slope='Linear slope fit', E_size='E_size fit')
self.add(fit_E_res=fit_res, fit_E_name=fit_name[kind],
E_fit=fit_res, fit_E_curve=False, fit_E_E1=E1, fit_E_E2=E2)
self.fit_E_calc_variance()
return self.E_fit
def fit_E_two_gauss_EM(self, fit_func=two_gaussian_fit_EM,
weights='size', gamma=1., **kwargs):
"""Fit the E population to a Gaussian mixture model using EM method.
Additional arguments in `kwargs` are passed to the fit_func().
"""
fit_res = zeros((self.nch, 5))
for ich, (nd, na, E) in enumerate(zip(self.nd, self.na, self.E)):
w = fret_fit.get_weights(nd, na, weights=weights, gamma=gamma)
fit_res[ich, :] = fit_func(E, weights=w, **kwargs)
self.add(fit_E_res=fit_res, fit_E_name=fit_func.__name__,
E_fit=fit_res[:, 2], fit_E_curve=True,
fit_E_model=two_gauss_mix_pdf,
fit_E_model_F=np.repeat(1, self.nch))
return self.E_fit
def fit_E_generic(self, E1=-1, E2=2, fit_fun=two_gaussian_fit_hist,
weights=None, gamma=1., **fit_kwargs):
"""Fit E in each channel with `fit_fun` using burst in [E1,E2] range.
All the fitting functions are defined in
:mod:`fretbursts.fit.gaussian_fitting`.
Parameters:
weights (string or None): specifies the type of weights
If not None `weights` will be passed to
`fret_fit.get_weights()`. `weights` can be not-None only when
using fit functions that accept weights (the ones ending in
`_hist` or `_EM`)
gamma (float): passed to `fret_fit.get_weights()` to compute
weights
All the additional arguments are passed to `fit_fun`. For example `p0`
or `mu_fix` can be passed (see `fit.gaussian_fitting` for details).
Note:
Use this method for CDF/PDF or hist fitting.
For EM fitting use :meth:`fit_E_two_gauss_EM()`.
"""
if fit_fun.__name__.startswith("gaussian_fit"):
fit_model = lambda x, p: SS.norm.pdf(x, p[0], p[1])
if 'mu0' not in fit_kwargs: fit_kwargs.update(mu0=0.5)
if 'sigma0' not in fit_kwargs: fit_kwargs.update(sigma0=0.3)
iE, nparam = 0, 2
elif fit_fun.__name__ == "two_gaussian_fit_hist_min_ab":
fit_model = two_gauss_mix_ab
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.5, 0.6, 0.1, 0.5])
iE, nparam = 3, 6
elif fit_fun.__name__.startswith("two_gaussian_fit"):
fit_model = two_gauss_mix_pdf
if 'p0' not in fit_kwargs:
fit_kwargs.update(p0=[0, .05, 0.6, 0.1, 0.5])
iE, nparam = 2, 5
else:
raise ValueError("Fitting function not recognized.")
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
fit_res, fit_model_F = zeros((self.nch, nparam)), zeros(self.nch)
for ich, (nd, na, E, mask) in enumerate(zip(
self.nd, self.na, self.E, Mask)):
if '_hist' in fit_fun.__name__ or '_EM' in fit_fun.__name__:
if weights is None:
w = None
else:
w = fret_fit.get_weights(nd[mask], na[mask],
weights=weights, gamma=gamma)
fit_res[ich, :] = fit_fun(E[mask], weights=w, **fit_kwargs)
else:
# Non-histogram fits (PDF/CDF) do not support weights
fit_res[ich, :] = fit_fun(E[mask], **fit_kwargs)
fit_model_F[ich] = mask.sum()/mask.size
# Save enough info to generate a fit plot (see hist_fret in burst_plot)
self.add(fit_E_res=fit_res, fit_E_name=fit_fun.__name__,
E_fit=fit_res[:, iE], fit_E_curve=True, fit_E_E1=E1,
fit_E_E2=E2, fit_E_model=fit_model,
fit_E_model_F=fit_model_F, fit_E_weights=weights,
fit_E_gamma=gamma, fit_E_kwargs=fit_kwargs)
return self.E_fit
def fit_from(self, D):
"""Copy fit results from another Data() variable.
Now that the fit methods accept E1,E1 parameter this probabily useless.
"""
# NOTE Are 'fit_guess' and 'fit_fix' still used ?
fit_data = ['fit_E_res', 'fit_E_name', 'E_fit', 'fit_E_curve',
'fit_E_E1', 'fit_E_E2=E2', 'fit_E_model',
'fit_E_model_F', 'fit_guess', 'fit_fix']
for name in fit_data:
if name in D:
self[name] = D[name]
setattr(self, name, self[name])
# Deal with the normalization to the number of bursts
self.add(fit_model_F=r_[[old_E.size/new_E.size \
for old_E, new_E in zip(D.E, self.E)]])
def fit_E_calc_variance(self, weights='sqrt', dist='DeltaE',
E_fit=None, E1=-1, E2=2):
"""Compute several versions of WEIGHTED std.dev. of the E estimator.
`weights` are multiplied *BEFORE* squaring the distance/error
`dist` can be 'DeltaE' or 'SlopeEuclid'
Note:
This method is still experimental
"""
assert dist in ['DeltaE', 'SlopeEuclid']
if E_fit is None:
E_fit = self.E_fit
E1 = self.fit_E_E1 if 'fit_E_E1' in self else -1
E2 = self.fit_E_E2 if 'fit_E_E2' in self else 2
else:
# If E_fit is not None the specified E1,E2 range is used
if E1 < 0 and E2 > 1:
pprint('WARN: E1 < 0 and E2 > 1 (wide range of E eff.)\n')
if size(E_fit) == 1 and self.nch > 0:
E_fit = np.repeat(E_fit, self.nch)
assert size(E_fit) == self.nch
E_sel = [Ei[(Ei > E1)*(Ei < E2)] for Ei in self.E]
Mask = self.select_bursts_mask(select_bursts.E, E1=E1, E2=E2)
E_var, E_var_bu, E_var_ph = \
zeros(self.nch), zeros(self.nch), zeros(self.nch)
for i, (Ech, nt, mask) in enumerate(zip(E_sel, self.nt, Mask)):
nt_s = nt[mask]
nd_s, na_s = self.nd[i][mask], self.na[i][mask]
w = fret_fit.get_weights(nd_s, na_s, weights=weights)
info_ph = nt_s.sum()
info_bu = nt_s.size
if dist == 'DeltaE':
distances = (Ech - E_fit[i])
elif dist == 'SlopeEuclid':
distances = fret_fit.get_dist_euclid(nd_s, na_s, E_fit[i])
residuals = distances * w
var = np.mean(residuals**2)
var_bu = np.mean(residuals**2)/info_bu
var_ph = np.mean(residuals**2)/info_ph
#lvar = np.mean(log(residuals**2))
#lvar_bu = np.mean(log(residuals**2)) - log(info_bu)
#lvar_ph = np.mean(log(residuals**2)) - log(info_ph)
E_var[i], E_var_bu[i], E_var_ph[i] = var, var_bu, var_ph
assert (-np.isnan(E_var[i])).all() # check there is NO NaN
self.add(E_var=E_var, E_var_bu=E_var_bu, E_var_ph=E_var_ph)
return E_var
| gpl-2.0 | -7,185,521,470,680,014,000 | 41.513032 | 82 | 0.562813 | false |
sl2017/campos | campos_jobber_final/models/campos_jobber_accom_group.py | 1 | 1182 | # -*- coding: utf-8 -*-
# Copyright 2017 Stein & Gabelgaard ApS
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import api, fields, models, _
class CamposJobberAccomGroup(models.Model):
_name = 'campos.jobber.accom.group'
_description = 'Campos Jobber Accom Group' # TODO
name = fields.Char(required=True)
code = fields.Char(required=True)
owner_id = fields.Many2one('campos.event.participant', 'Owner')
accom_participant_ids = fields.One2many('campos.jobber.accomodation', 'accom_group_id', string='Participants')
number_participants = fields.Integer('# participants', compute='_compute_number_participants')
subcamp_id = fields.Many2one('campos.subcamp', 'Sub Camp')
_sql_constraints = [
('code_uniq', 'unique(code)', 'Code already in use. Choose another'),
('name_uniq', 'unique(name)', 'Name already in use. Choose another'),
]
@api.depends('accom_participant_ids')
@api.multi
def _compute_number_participants(self):
for cjag in self:
cjag.number_participants = len(cjag.accom_participant_ids) | agpl-3.0 | 1,359,394,841,940,174,300 | 38.433333 | 114 | 0.64467 | false |
OpenTouch/python-facette | src/facette/v1/groupentry.py | 1 | 1278 | # Copyright (c) 2014 Alcatel-Lucent Enterprise
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from facette.utils import *
import json
GROUP_ENTRY_ORIGIN = "origin"
GROUP_ENTRY_PATTERN = "pattern"
class GroupEntry:
def __init__(self, js=""):
self.entry = {}
self.origin = facette_to_json(GROUP_ENTRY_ORIGIN, js, self.entry)
self.pattern = facette_to_json(GROUP_ENTRY_PATTERN, js, self.entry)
def set(self, origin=None, pattern=None):
self.origin = facette_set(id, GROUP_ENTRY_ORIGIN, self.entry)
self.pattern = facette_set(id, GROUP_ENTRY_PATTERN, self.entry)
def __str__(self):
return json.dumps(self.entry)
def __repr__(self):
return str(self)
| apache-2.0 | -6,053,528,661,753,423,000 | 34.5 | 78 | 0.682316 | false |
homann/stand-browser | test/test_stand_browser_dockwidget.py | 1 | 1123 | # coding=utf-8
"""DockWidget test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__date__ = '2017-02-18'
__copyright__ = 'Copyright 2017, Magnus Homann'
import unittest
from PyQt4.QtGui import QDockWidget
from stand_browser_dockwidget import StandBrowserDockWidget
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class StandBrowserDockWidgetTest(unittest.TestCase):
"""Test dockwidget works."""
def setUp(self):
"""Runs before each test."""
self.dockwidget = StandBrowserDockWidget(None)
def tearDown(self):
"""Runs after each test."""
self.dockwidget = None
def test_dockwidget_ok(self):
"""Test we can click OK."""
pass
if __name__ == "__main__":
suite = unittest.makeSuite(StandBrowserDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-2.0 | -1,944,341,467,552,885,500 | 23.955556 | 78 | 0.682102 | false |
macarthur-lab/xbrowse | xbrowse_server/api/views.py | 1 | 67273 | import datetime
import csv
import json
import logging
import sys
import traceback
from collections import defaultdict
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from settings import LOGIN_URL
from seqr.utils.gene_utils import get_queried_genes
from xbrowse.analysis_modules.combine_mendelian_families import get_variants_by_family_for_gene
from xbrowse_server.analysis.diagnostic_search import get_gene_diangostic_info
from xbrowse_server.base.model_utils import update_xbrowse_model, get_or_create_xbrowse_model, delete_xbrowse_model, \
create_xbrowse_model
from xbrowse_server.base.models import Project, Family, FamilySearchFlag, VariantNote, ProjectTag, VariantTag, GeneNote, \
AnalysedBy, VariantFunctionalData
from seqr.models import Individual as SeqrIndividual, MatchmakerResult
from xbrowse_server.api.utils import get_project_and_family_for_user, get_project_and_cohort_for_user, \
add_extra_info_to_variants_project, add_notes_to_genes, get_variant_notes, get_variant_tags, get_variant_functional_data
from xbrowse.variant_search.family import get_variants_with_inheritance_mode
from xbrowse_server.api import utils as api_utils
from xbrowse_server.api import forms as api_forms
from xbrowse_server.mall import get_reference, get_datastore, get_mall
from xbrowse_server.search_cache import utils as cache_utils
from xbrowse_server.decorators import log_request
from xbrowse_server.server_utils import JSONResponse
import utils
from xbrowse.variant_search import cohort as cohort_search
from xbrowse import Variant
from xbrowse.analysis_modules.mendelian_variant_search import MendelianVariantSearchSpec
from xbrowse.core import displays as xbrowse_displays
from xbrowse_server import server_utils
from . import basicauth
from xbrowse_server import user_controls
from django.utils import timezone
from xbrowse_server.phenotips.reporting_utilities import phenotype_entry_metric_for_individual
from xbrowse_server.base.models import ANALYSIS_STATUS_CHOICES
from xbrowse_server.matchmaker.utilities import get_all_clinical_data_for_family
from xbrowse_server.matchmaker.utilities import is_a_valid_patient_structure
from xbrowse_server.matchmaker.utilities import generate_slack_notification_for_seqr_match
from xbrowse_server.matchmaker.utilities import gather_all_annotated_genes_in_seqr
from xbrowse_server.matchmaker.utilities import find_projects_with_families_in_matchbox
from xbrowse_server.matchmaker.utilities import find_families_of_this_project_in_matchbox
from xbrowse_server.matchmaker.utilities import extract_hpo_id_list_from_mme_patient_struct
import requests
from django.contrib.admin.views.decorators import staff_member_required
logger = logging.getLogger()
@csrf_exempt
@basicauth.logged_in_or_basicauth()
@log_request('projects_api')
def projects(request):
"""
List the projects that this user has access to
"""
user_projects = user_controls.get_projects_for_user(request.user)
project_ids = [p.project_id for p in user_projects]
response_format = request.GET.get('format', 'json')
if response_format == 'json':
return JSONResponse({'projects': project_ids})
elif response_format == 'tsv':
return HttpResponse('\n'.join(project_ids))
else:
raise Exception("Invalid format")
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_api')
def mendelian_variant_search(request):
# TODO: how about we move project getter into the form, and just test for authX here?
# esp because error should be described in json, not just 404
request_dict = request.GET or request.POST
project, family = get_project_and_family_for_user(request.user, request_dict)
form = api_forms.MendelianVariantSearchForm(request_dict)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
try:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
except Exception as e:
traceback.print_exc()
return JSONResponse({
'is_error': True,
'error': str(e.args[0]) if e.args else str(e)
})
hashable_search_params = search_spec.toJSON()
hashable_search_params['family_id'] = family.family_id
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, hashable_search_params, list_of_variants)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request_dict.get('return_type', 'json')
if return_type == 'json':
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
elif return_type == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids, genes_to_return=search_spec.variant_filter.genes)
writer.writerow(fields)
return response
else:
return HttpResponse("Return type not implemented")
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def mendelian_variant_search_spec(request):
project, family = get_project_and_family_for_user(request.user, request.GET)
search_hash = request.GET.get('search_hash')
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, family, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
for variant in variants:
variant.set_extra('family_id', family.family_id)
add_extra_info_to_variants_project(get_reference(), project, variants, add_family_tags=True, add_populations=True)
return_type = request.GET.get('return_type')
if return_type == 'json' or not return_type:
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec_dict,
})
elif request.GET.get('return_type') == 'csv':
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
indiv_ids = family.indiv_ids_with_variant_data()
headers = xbrowse_displays.get_variant_display_headers(get_mall(project), project, indiv_ids)
writer.writerow(headers)
for variant in variants:
fields = xbrowse_displays.get_display_fields_for_variant(get_mall(project), project, variant, indiv_ids)
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('get_cohort_variants')
def cohort_variant_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortVariantSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = cohort.cohort_id
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
list_of_variants = [v.toJSON(encode_indiv_id=True) for v in variants]
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), list_of_variants)
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_variant_search_spec_api')
def cohort_variant_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
# TODO: use form
search_spec_dict, variants = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
search_spec = MendelianVariantSearchSpec.fromJSON(search_spec_dict)
if variants is None:
variants = api_utils.calculate_mendelian_variant_search(search_spec, cohort, user=request.user)
else:
variants = [Variant.fromJSON(v) for v in variants]
api_utils.add_extra_info_to_variants_cohort(get_reference(), cohort, variants)
return JSONResponse({
'is_error': False,
'variants': [v.toJSON() for v in variants],
'search_spec': search_spec.toJSON(),
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search')
def cohort_gene_search(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
sys.stderr.write("cohort_gene_search %s %s: starting ... \n" % (project.project_id, cohort.cohort_id))
form = api_forms.CohortGeneSearchForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.cohort_id = cohort.cohort_id
sys.stderr.write("cohort_gene_search %s %s: search spec: %s \n" % (project.project_id, cohort.cohort_id, str(search_spec.toJSON())))
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
sys.stderr.write("cohort_gene_search %s %s: get %s genes \n" % (project.project_id, cohort.cohort_id, len(genes)))
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
sys.stderr.write("cohort_gene_search %s %s: done adding extra info \n" % (project.project_id, cohort.cohort_id))
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_spec')
def cohort_gene_search_spec(request):
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
search_spec, genes = cache_utils.get_cached_results(project.project_id, request.GET.get('search_hash'))
if genes is None:
genes = api_utils.calculate_cohort_gene_search(cohort, search_spec)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
@csrf_exempt
@login_required
@log_request('cohort_gene_search_variants')
def cohort_gene_search_variants(request):
error = None
project, cohort = get_project_and_cohort_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CohortGeneSearchVariantsForm(request.GET)
if form.is_valid():
gene_id = form.cleaned_data['gene_id']
inheritance_mode = form.cleaned_data['inheritance_mode']
variant_filter = form.cleaned_data['variant_filter']
quality_filter = form.cleaned_data['quality_filter']
else:
error = server_utils.form_error_string(form)
if not error:
indivs_with_inheritance, gene_variation = cohort_search.get_individuals_with_inheritance_in_gene(
get_datastore(project),
get_reference(),
cohort.xcohort(),
inheritance_mode,
gene_id,
variant_filter=variant_filter,
quality_filter=quality_filter
)
relevant_variants = gene_variation.get_relevant_variants_for_indiv_ids(cohort.indiv_id_list())
api_utils.add_extra_info_to_variants_project(get_reference(), project, relevant_variants, add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variants': [v.toJSON() for v in relevant_variants],
'gene_info': get_reference().get_gene(gene_id),
}
return JSONResponse(ret)
else:
ret = {
'is_error': True,
'error': error
}
return JSONResponse(ret)
@login_required
@log_request('gene_info')
def gene_info(request, gene_id):
gene = get_reference().get_gene(gene_id)
gene['expression'] = get_reference().get_tissue_expression_display_values(gene_id)
add_notes_to_genes([gene], request.user)
ret = {
'gene': gene,
'is_error': False,
'found_gene': gene is not None,
}
return JSONResponse(ret)
@login_required
@log_request('family_variant_annotation')
def family_variant_annotation(request):
# TODO: this view not like the others - refactor to forms
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt']:
if request.GET.get(key) is None:
error = "%s is requred", key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_view(request.user):
raise PermissionDenied
if not error:
variant = get_datastore(project).get_single_variant(
family.project.project_id,
family.family_id,
int(request.GET['xpos']),
request.GET['ref'],
request.GET['alt']
)
if not variant:
error = "Variant does not exist"
if not error:
ret = {
'variant': variant.toJSON(),
'is_error': False,
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
@log_request('add_flag')
def add_family_search_flag(request):
error = None
for key in ['project_id', 'family_id', 'xpos', 'ref', 'alt', 'note', 'flag_type', 'flag_inheritance_mode']:
if request.GET.get(key, None) == None:
error = "%s is requred" % key
if not error:
project = get_object_or_404(Project, project_id=request.GET.get('project_id'))
family = get_object_or_404(Family, project=project, family_id=request.GET.get('family_id'))
if not project.can_edit(request.user):
raise PermissionDenied
if not error:
xpos = int(request.GET['xpos'])
ref=request.GET.get('ref')
alt=request.GET['alt']
note=request.GET.get('note')
flag_type=request.GET.get('flag_type')
flag_inheritance_mode=request.GET.get('flag_inheritance_mode')
# todo: more validation - is variant valid?
flag = FamilySearchFlag(user=request.user,
family=family,
xpos=int(request.GET['xpos']),
ref=ref,
alt=alt,
note=note,
flag_type=flag_type,
suggested_inheritance=flag_inheritance_mode,
date_saved=timezone.now(),
)
if not error:
flag.save()
variant = get_datastore(project).get_single_variant(family.project.project_id, family.family_id,
xpos, ref, alt )
api_utils.add_extra_info_to_variants_project(get_reference(), project, [variant], add_family_tags=True,
add_populations=True)
ret = {
'is_error': False,
'variant': variant.toJSON(),
}
else:
ret = {
'is_error': True,
'error': error,
}
return JSONResponse(ret)
@login_required
# @csrf_exempt
@log_request('add_analysed_by')
def add_family_analysed_by(request, data=None):
if not data:
data = request.GET
family_id = data.get('family_id')
project_id = data.get('project_id')
if not (family_id and project_id):
raise HttpResponseBadRequest('family_id and project_id are required')
try:
family = Family.objects.get(project__project_id=project_id, family_id=family_id)
except ObjectDoesNotExist:
raise Http404('No family matches the given query')
if not family.project.can_edit(request.user):
raise PermissionDenied
analysed_by = create_xbrowse_model(AnalysedBy, user=request.user, family=family, date_saved=timezone.now())
return JSONResponse({
'is_error': False,
'analysed_by': analysed_by.toJSON(),
})
@login_required
@log_request('delete_variant_note')
def delete_variant_note(request, note_id):
ret = {
'is_error': False,
}
notes = VariantNote.objects.filter(id=note_id)
if not notes:
ret['is_error'] = True
ret['error'] = 'note id %s not found' % note_id
else:
note = list(notes)[0]
if not note.project.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse(ret)
@login_required
@log_request('add_or_edit_variant_note')
def add_or_edit_variant_note(request):
"""Add a variant note"""
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantNoteForm(project, request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if 'note_id' in form.cleaned_data and form.cleaned_data['note_id']:
event_type = "edit_variant_note"
notes = VariantNote.objects.filter(
id=form.cleaned_data['note_id'],
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not notes:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
note = notes[0]
update_xbrowse_model(
note,
user=request.user,
note=form.cleaned_data['note_text'],
submit_to_clinvar=form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
else:
event_type = "add_variant_note"
create_xbrowse_model(
VariantNote,
user=request.user,
project=project,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
note=form.cleaned_data['note_text'],
submit_to_clinvar = form.cleaned_data['submit_to_clinvar'],
date_saved=timezone.now(),
family=family)
notes = get_variant_notes(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'note': form.cleaned_data['note_text'],
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'notes': notes,
})
@login_required
@log_request('add_or_edit_variant_tags')
def add_or_edit_variant_tags(request):
family = None
if 'family_id' in request.GET:
project, family = get_project_and_family_for_user(request.user, request.GET)
else:
project = utils.get_project_for_user(request.user, request.GET)
form = api_forms.VariantTagsForm(project, request.GET)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
variant_tags_to_delete = {
variant_tag.id: variant_tag for variant_tag in VariantTag.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'])
}
project_tag_events = {}
for project_tag in form.cleaned_data['project_tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantTag,
project_tag=project_tag,
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
if not created:
# this tag already exists so just keep it (eg. remove it from the set of tags that will be deleted)
del variant_tags_to_delete[tag.id]
continue
# this a new tag, so update who saved it and when
project_tag_events[project_tag] = "add_variant_tag"
update_xbrowse_model(
tag,
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
for variant_tag in variant_tags_to_delete.values():
project_tag_events[variant_tag.project_tag] = "delete_variant_tag"
delete_xbrowse_model(variant_tag)
# Get tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
tags = get_variant_tags(project=project, family_id=request.GET.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag.tag,
'title': project_tag.title,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'tags': tags,
})
@login_required
@csrf_exempt
@log_request('add_or_edit_functional_data')
def add_or_edit_functional_data(request):
request_data = json.loads(request.body)
project, family = get_project_and_family_for_user(request.user, request_data)
form = api_forms.VariantFunctionalDataForm(request_data)
if not form.is_valid():
ret = {
'is_error': True,
'error': server_utils.form_error_string(form)
}
return JSONResponse(ret)
project_tag_events = {}
tag_ids = set()
for tag_data in form.cleaned_data['tags']:
# retrieve tags
tag, created = get_or_create_xbrowse_model(
VariantFunctionalData,
functional_data_tag=tag_data['tag'],
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
)
tag_ids.add(tag.id)
if created:
project_tag_events[tag_data['tag']] = "add_variant_functional_data"
elif tag.metadata != tag_data.get('metadata'):
project_tag_events[tag_data['tag']] = "edit_variant_functional_data"
else:
continue
# this a new/changed tag, so update who saved it and when
update_xbrowse_model(
tag,
metadata=tag_data.get('metadata'),
user=request.user,
date_saved=timezone.now(),
search_url=form.cleaned_data['search_url'])
# delete the tags that are no longer checked.
variant_tags_to_delete = VariantFunctionalData.objects.filter(
family=family,
xpos=form.cleaned_data['xpos'],
ref=form.cleaned_data['ref'],
alt=form.cleaned_data['alt'],
).exclude(id__in=tag_ids)
for variant_tag in variant_tags_to_delete:
project_tag_events[variant_tag.functional_data_tag] = "delete_variant_functional_data"
delete_xbrowse_model(variant_tag)
# get the tags after updating the tag info in the database, so that the new tag info is added to the variant JSON
functional_data = get_variant_functional_data(project=project, family_id=request_data.get('family_id'), **form.cleaned_data)
# log tag creation
for project_tag, event_type in project_tag_events.items():
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'project_id': ''.join(project.project_id),
'family_id': family.family_id,
'tag': project_tag,
'xpos':form.cleaned_data['xpos'],
'ref':form.cleaned_data['ref'],
'alt':form.cleaned_data['alt'],
'username': request.user.username,
'email': request.user.email,
'search_url': form.cleaned_data.get('search_url'),
})
except Exception as e:
logging.error("Error while logging add_variant_tag event: %s" % e)
return JSONResponse({
'is_error': False,
'functional_data': functional_data,
})
@login_required
@log_request('delete_gene_note')
def delete_gene_note(request, note_id):
try:
note = GeneNote.objects.get(id=note_id)
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % note_id
})
if not note.can_edit(request.user):
raise PermissionDenied
delete_xbrowse_model(note)
return JSONResponse({
'is_error': False,
})
@login_required
@log_request('add_or_edit_gene_note')
def add_or_edit_gene_note(request):
"""Add a gene note"""
form = api_forms.GeneNoteForm(request.GET)
if not form.is_valid():
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
if form.cleaned_data.get('note_id'):
event_type = "edit_gene_note"
try:
note = GeneNote.objects.get(id=form.cleaned_data['note_id'])
except ObjectDoesNotExist:
return JSONResponse({
'is_error': True,
'error': 'note id %s not found' % form.cleaned_data['note_id']
})
if not note.can_edit(request.user):
raise PermissionDenied
update_xbrowse_model(
note,
note=form.cleaned_data['note_text'],
user=request.user,
date_saved=timezone.now(),
)
else:
event_type = "add_variant_note"
note = create_xbrowse_model(
GeneNote,
user=request.user,
gene_id=form.cleaned_data['gene_id'],
note=form.cleaned_data['note_text'],
date_saved=timezone.now(),
)
try:
if not settings.DEBUG: settings.EVENTS_COLLECTION.insert({
'event_type': event_type,
'date': timezone.now(),
'note': form.cleaned_data['note_text'],
'gene_id':form.cleaned_data['gene_id'],
'username': request.user.username,
'email': request.user.email,
})
except Exception as e:
logging.error("Error while logging %s event: %s" % (event_type, e))
return JSONResponse({
'is_error': False,
'note': note.toJSON(request.user),
})
def gene_autocomplete(request):
query = request.GET.get('q', '')
gene_items = get_queried_genes(query, 20)
genes = [{
'value': item['gene_id'],
'label': item['gene_symbol'],
} for item in gene_items]
return JSONResponse(genes)
@login_required
@log_request('variant_info')
def variant_info(request):
pass
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_api')
def combine_mendelian_families(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.CombineMendelianFamiliesForm(request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_group_id = family_group.slug
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
search_hash = cache_utils.save_results_for_spec(project.project_id, search_spec.toJSON(), genes)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
return JSONResponse({
'is_error': False,
'genes': genes,
'search_hash': search_hash,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('mendelian_variant_search_spec_api')
def combine_mendelian_families_spec(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
search_hash = request.GET.get('search_hash')
search_spec, genes = cache_utils.get_cached_results(project.project_id, search_hash)
search_spec_obj = MendelianVariantSearchSpec.fromJSON(search_spec)
if request.GET.get('return_type') != 'csv' or not request.GET.get('group_by_variants'):
if genes is None:
genes = api_utils.calculate_combine_mendelian_families(family_group, search_spec, user=request.user)
api_utils.add_extra_info_to_genes(project, get_reference(), genes)
if request.GET.get('return_type') != 'csv':
return JSONResponse({
'is_error': False,
'genes': genes,
'search_spec': search_spec,
})
else:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="family_group_results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
writer.writerow(["gene", "# families", "family list", "chrom", "start", "end"])
for gene in genes:
family_id_list = [family_id for (project_id, family_id) in gene["family_id_list"]]
writer.writerow(map(str, [gene["gene_name"], len(family_id_list), " ".join(family_id_list), gene["chr"], gene["start"], gene["end"], ""]))
return response
else:
# download results grouped by variant
indiv_id_list = []
for family in family_group.get_families():
indiv_id_list.extend(family.indiv_ids_with_variant_data())
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results_{}.csv"'.format(search_hash)
writer = csv.writer(response)
headers = ['genes','chr','pos','ref','alt','worst_annotation' ]
headers.extend(project.get_reference_population_slugs())
headers.extend([ 'polyphen','sift','muttaster','fathmm'])
for indiv_id in indiv_id_list:
headers.append(indiv_id)
headers.append(indiv_id+'_gq')
headers.append(indiv_id+'_dp')
writer.writerow(headers)
mall = get_mall(project)
variant_key_to_individual_id_to_variant = defaultdict(dict)
variant_key_to_variant = {}
for family in family_group.get_families():
for variant in get_variants_with_inheritance_mode(
mall,
family.xfamily(),
search_spec_obj.inheritance_mode,
search_spec_obj.variant_filter,
search_spec_obj.quality_filter,
user=request.user):
if len(variant.coding_gene_ids) == 0:
continue
variant_key = (variant.xpos, variant.ref, variant.alt)
variant_key_to_variant[variant_key] = variant
for indiv_id in family.indiv_ids_with_variant_data():
variant_key_to_individual_id_to_variant[variant_key][indiv_id] = variant
for variant_key in sorted(variant_key_to_individual_id_to_variant.keys()):
variant = variant_key_to_variant[variant_key]
individual_id_to_variant = variant_key_to_individual_id_to_variant[variant_key]
genes = [mall.reference.get_gene_symbol(gene_id) for gene_id in variant.coding_gene_ids]
fields = []
fields.append(','.join(genes))
fields.extend([
variant.chr,
str(variant.pos),
variant.ref,
variant.alt,
variant.annotation.get('vep_group', '.'),
])
for ref_population_slug in project.get_reference_population_slugs():
fields.append(variant.annotation['freqs'][ref_population_slug])
for field_key in ['polyphen', 'sift', 'muttaster', 'fathmm']:
fields.append(variant.annotation.get(field_key, ""))
for indiv_id in indiv_id_list:
variant = individual_id_to_variant.get(indiv_id)
genotype = None
if variant is not None:
genotype = variant.get_genotype(indiv_id)
if genotype is None:
fields.extend(['.', '.', '.'])
else:
fields.append("/".join(genotype.alleles) if genotype.alleles else "./.")
#fields[-1] += " %s (%s)" % (indiv_id, genotype.num_alt)
fields.append(str(genotype.gq) if genotype.gq is not None else '.')
fields.append(genotype.extras['dp'] if genotype.extras.get('dp') is not None else '.')
writer.writerow(fields)
return response
@csrf_exempt
@login_required
@log_request('combine_mendelian_families_variants_api')
def combine_mendelian_families_variants(request):
project, family_group = utils.get_project_and_family_group_for_user(request.user, request.GET)
form = api_forms.CombineMendelianFamiliesVariantsForm(request.GET)
if form.is_valid():
variants_grouped = get_variants_by_family_for_gene(
get_mall(project),
[f.xfamily() for f in form.cleaned_data['families']],
form.cleaned_data['inheritance_mode'],
form.cleaned_data['gene_id'],
variant_filter=form.cleaned_data['variant_filter'],
quality_filter=form.cleaned_data['quality_filter'],
user=request.user,
)
variants_by_family = []
for family in form.cleaned_data['families']:
variants = variants_grouped[(family.project.project_id, family.family_id)]
add_extra_info_to_variants_project(get_reference(), family.project, variants, add_family_tags=True, add_populations=True)
variants_by_family.append({
'project_id': family.project.project_id,
'family_id': family.family_id,
'family_name': str(family),
'variants': [v.toJSON() for v in variants],
})
return JSONResponse({
'is_error': False,
'variants_by_family': variants_by_family,
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
@csrf_exempt
@login_required
@log_request('diagnostic_search')
def diagnostic_search(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
form = api_forms.DiagnosticSearchForm(family, request.GET)
if form.is_valid():
search_spec = form.cleaned_data['search_spec']
search_spec.family_id = family.family_id
gene_list = form.cleaned_data['gene_list']
diagnostic_info_list = []
for gene_id in gene_list.gene_id_list():
diagnostic_info = get_gene_diangostic_info(family, gene_id, search_spec.variant_filter)
add_extra_info_to_variants_project(get_reference(), project, diagnostic_info._variants, add_family_tags=True, add_populations=True)
diagnostic_info_list.append(diagnostic_info)
return JSONResponse({
'is_error': False,
'gene_diagnostic_info_list': [d.toJSON() for d in diagnostic_info_list],
'gene_list_info': gene_list.toJSON(details=True),
'data_summary': family.get_data_summary(),
})
else:
return JSONResponse({
'is_error': True,
'error': server_utils.form_error_string(form)
})
def family_gene_lookup(request):
project, family = utils.get_project_and_family_for_user(request.user, request.GET)
if not project.can_view(request.user):
raise PermissionDenied
gene_id = request.GET.get('gene_id')
if not get_reference().is_valid_gene_id(gene_id):
return JSONResponse({
'is_error': True,
'error': 'Invalid gene',
})
family_gene_data = get_gene_diangostic_info(family, gene_id)
add_extra_info_to_variants_project(get_reference(), project, family_gene_data._variants, add_family_tags=True,
add_populations=True)
return JSONResponse({
'is_error': False,
'family_gene_data': family_gene_data.toJSON(),
'data_summary': family.get_data_summary(),
'gene': get_reference().get_gene(gene_id),
})
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_individuals_phenotypes(request,project_id):
"""
Export all HPO terms entered for this project individuals. A direct proxy
from PhenoTips API
Args:
project_id
Returns:
A JSON string of HPO terms entered
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
result={}
for individual in project.get_individuals():
ui_display_name = individual.indiv_id
ext_id=individual.phenotips_id
result[ui_display_name] = phenotype_entry_metric_for_individual(project_id, ext_id)['raw']
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_family_statuses(request,project_id):
"""
Exports the status of all families in this project
Args:
Project ID
Returns:
All statuses of families
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
project = get_object_or_404(Project, project_id=project_id)
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
result={}
for family in project.get_families():
fam_details =family.toJSON()
result[fam_details['family_id']] = status_description_map.get(family.analysis_status, 'unknown')
return JSONResponse(result)
@csrf_exempt
@login_required
@log_request('API_project_phenotypes')
def export_project_variants(request,project_id):
"""
Export all variants associated to this project
Args:
Project id
Returns:
A JSON object of variant information
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
status_description_map = {}
for abbrev, details in ANALYSIS_STATUS_CHOICES:
status_description_map[abbrev] = details[0]
variants=[]
project_tags = ProjectTag.objects.filter(project__project_id=project_id)
for project_tag in project_tags:
variant_tags = VariantTag.objects.filter(project_tag=project_tag)
for variant_tag in variant_tags:
variant = get_datastore(project).get_single_variant(
project.project_id,
variant_tag.family.family_id if variant_tag.family else '',
variant_tag.xpos,
variant_tag.ref,
variant_tag.alt,
)
variant_json = variant.toJSON() if variant is not None else {'xpos': variant_tag.xpos, 'ref': variant_tag.ref, 'alt': variant_tag.alt}
family_status = ''
if variant_tag.family:
family_status = status_description_map.get(variant_tag.family.analysis_status, 'unknown')
variants.append({"variant":variant_json,
"tag":project_tag.tag,
"description":project_tag.title,
"family":variant_tag.family.toJSON(),
"family_status":family_status})
return JSONResponse(variants)
@login_required
@log_request('matchmaker_individual_add')
def get_submission_candidates(request,project_id,family_id,indiv_id):
"""
Gathers submission candidate individuals from this family
Args:
individual_id: an individual ID
project_id: project this individual belongs to
Returns:
Status code
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
id_map,affected_patient = get_all_clinical_data_for_family(project_id,family_id,indiv_id)
return JSONResponse({
"submission_candidate":affected_patient,
"id_map":id_map
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_add')
def add_individual(request):
"""
Adds given individual to the local database
Args:
submission information of a single patient is expected in the POST data
Returns:
Submission status information
"""
affected_patient = json.loads(request.POST.get("patient_data", "wasn't able to parse patient_data in POST!"))
seqr_id = request.POST.get("localId", "wasn't able to parse Id (as seqr knows it) in POST!")
project_id = request.POST.get("projectId", "wasn't able to parse project Id in POST!")
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=seqr_id, family__project=project.seqr_project)
submission = json.dumps({'patient':affected_patient})
validity_check=is_a_valid_patient_structure(affected_patient)
if not validity_check['status']:
return JSONResponse({
'http_result':{"message":validity_check['reason'] + ", the patient was not submitted to matchmaker"},
'status_code':400,
})
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
result = requests.post(url=settings.MME_ADD_INDIVIDUAL_URL,
headers=headers,
data=submission)
#if successfully submitted to MME, persist info
if result.status_code==200 or result.status_code==409:
individual.mme_submitted_data = {'patient':affected_patient}
individual.mme_submitted_date = datetime.datetime.now()
individual.mme_deleted_date = None
individual.mme_deleted_by = None
individual.save()
#update the contact information store if any updates were made
updated_contact_name = affected_patient['contact']['name']
updated_contact_href = affected_patient['contact']['href']
try:
project = Project.objects.get(project_id=project_id)
update_xbrowse_model(
project,
mme_primary_data_owner=updated_contact_name,
mme_contact_url=updated_contact_href,
)
except ObjectDoesNotExist:
logger.error("ERROR: couldn't update the contact name and href of MME submission: ", updated_contact_name, updated_contact_href)
#seqr_project.save()
if result.status_code==401:
return JSONResponse({
'http_result':{"message":"sorry, authorization failed, I wasn't able to insert that individual"},
'status_code':result.status_code,
})
return JSONResponse({
'http_result':result.json(),
'status_code':result.status_code,
})
@csrf_exempt
@login_required
@log_request('matchmaker_individual_delete')
def delete_individual(request,project_id, indiv_id):
"""
Deletes a given individual from the local database
Args:
Project ID of project
Individual ID of a single patient to delete
Returns:
Delete confirmation
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
#find the latest ID that was used in submission which might defer from seqr ID
matchbox_id=indiv_id
if individual.mme_submitted_date:
if individual.mme_deleted_date:
return JSONResponse({"status_code":402,"message":"that individual has already been deleted"})
else:
matchbox_id = individual.mme_submitted_data['patient']['id']
logger.info("using matchbox ID: %s" % (matchbox_id))
payload = {"id":matchbox_id}
result = requests.delete(url=settings.MME_DELETE_INDIVIDUAL_URL,
headers=headers,
data=json.dumps(payload))
#if successfully deleted from matchbox/MME, persist that detail
if result.status_code == 200:
deleted_date = datetime.datetime.now()
individual.mme_deleted_date = deleted_date
individual.mme_deleted_by = request.user
individual.save()
return JSONResponse({"status_code":result.status_code,"message":result.text, 'deletion_date':str(deleted_date)})
else:
return JSONResponse({"status_code":404,"message":result.text})
return JSONResponse({"status_code":result.status_code,"message":result.text})
@login_required
@log_request('matchmaker_family_submissions')
def get_family_submissions(request,project_id,family_id):
"""
Gets the last 4 submissions for this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
else:
family = get_object_or_404(Family, project=project, family_id=family_id)
family_submissions=[]
family_members_submitted=[]
for individual in family.individual_set.filter(seqr_individual__mme_submitted_date__isnull=False):
family_submissions.append({'submitted_data': individual.seqr_individual.mme_submitted_data,
'hpo_details': extract_hpo_id_list_from_mme_patient_struct(individual.seqr_individual.mme_submitted_data),
'seqr_id': individual.indiv_id,
'family_id': family_id,
'project_id': project_id,
'insertion_date': individual.seqr_individual.mme_submitted_date.strftime("%b %d %Y %H:%M:%S"),
'deletion': individual.seqr_individual.mme_deleted_date,
})
family_members_submitted.append(individual.indiv_id)
#TODO: figure out when more than 1 indi for a family. For now returning a list. Eventually
#this must be the latest submission for every indiv in a family
return JSONResponse({
"family_submissions":family_submissions,
"family_members_submitted":family_members_submitted
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_internally_and_externally(request,project_id,indiv_id):
"""
Looks for matches for the given individual. Expects a single patient (MME spec) in the POST
data field under key "patient_data"
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
#first look in the local MME database
internal_result = requests.post(url=settings.MME_LOCAL_MATCH_URL,
headers=headers,
data=patient_data
)
ids={}
for internal_res in internal_result.json().get('results',[]):
ids[internal_res['patient']['id']] = internal_res
extract_hpo_id_list_from_mme_patient_struct(internal_res,hpo_map)
results['local_results']={"result":internal_result.json(),
"status_code":internal_result.status_code
}
#then externally (unless turned off)
if settings.SEARCH_IN_EXTERNAL_MME_NODES:
extnl_result = requests.post(url=settings.MME_EXTERNAL_MATCH_URL,
headers=headers,
data=patient_data
)
results['external_results']={"result":extnl_result.json(),
"status_code":str(extnl_result.status_code)
}
for ext_res in extnl_result.json().get('results',[]):
extract_hpo_id_list_from_mme_patient_struct(ext_res,hpo_map)
ids[ext_res['patient']['id']] = ext_res
saved_results = {
result.result_data['patient']['id']: result for result in MatchmakerResult.objects.filter(individual=individual)
}
result_analysis_state={}
for id in ids.keys():
persisted_result_det = saved_results.get(id)
if not persisted_result_det:
persisted_result_det = MatchmakerResult.objects.create(
individual=individual,
result_data=ids[id],
last_modified_by=request.user,
)
result_analysis_state[id] = {
"id_of_indiv_searched_with":indiv_id,
"content_of_indiv_searched_with":json.loads(patient_data),
"content_of_result":ids[id],
"result_id":id,
"we_contacted_host":persisted_result_det.we_contacted,
"host_contacted_us":persisted_result_det.host_contacted,
"seen_on":str(persisted_result_det.created_date),
"deemed_irrelevant":persisted_result_det.deemed_irrelevant,
"comments":persisted_result_det.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":persisted_result_det.flag_for_analysis,
"username_of_last_event_initiator":persisted_result_det.last_modified_by.username,
}
#post to slack
if settings.SLACK_TOKEN is not None:
generate_slack_notification_for_seqr_match(results,project_id,indiv_id)
return JSONResponse({
"match_results":results,
"result_analysis_state":result_analysis_state,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('match_internally_and_externally')
def match_in_open_mme_sources(request,project_id,indiv_id):
"""
Match in other MME data sources that are open and not toke protected (ex: Monarch)
Args:
project_id,indiv_id and POST all data in POST under key "patient_data"
Returns:
Status code and results
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
patient_data = request.POST.get("patient_data")
if patient_data is None:
r = HttpResponse("wasn't able to parse patient data field in POST!",status=400)
return r
#find details on HPO terms and start aggregating in a map to send back with reply
hpo_map={}
extract_hpo_id_list_from_mme_patient_struct(json.loads(patient_data),hpo_map)
#these open sites require no token
headers={
'X-Auth-Token': '',
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
results={}
open_sites = {'Monarch Initiative':'https://mme.monarchinitiative.org/match'} #todo: put into settings
for open_site_name, open_site_url in open_sites.iteritems():
results_back = requests.post(url=open_site_url,
headers=headers,
data=patient_data)
ids={}
for res in results_back.json().get('results',[]):
ids[res['patient']['id']] = res
extract_hpo_id_list_from_mme_patient_struct(res,hpo_map)
results[open_site_name]={"result":results_back.json(),
"status_code":results_back.status_code
}
return JSONResponse({
"match_results":results,
"hpo_map":hpo_map
})
@login_required
@csrf_exempt
@log_request('get_project_individuals')
def get_project_individuals(request,project_id):
"""
Get a list of individuals with their family IDs of this project
Args:
project_id
Returns:
map of individuals and their family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@login_required
@csrf_exempt
@log_request('get_family_individuals')
def get_family_individuals(request,project_id,family_id):
"""
Get a list of individuals belongint to this family IDs
Args:
project_id
family_id
Returns:
map of individuals in this family
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
indivs=[]
for indiv in project.get_individuals():
if indiv.to_dict()['family_id'] == family_id:
strct={'guid':indiv.id}
for k,v in indiv.to_dict().iteritems():
if k not in ['phenotypes']:
strct[k] = v
indivs.append(strct)
return JSONResponse({
"individuals":indivs
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_id_details')
def get_matchbox_id_details(request,matchbox_id):
"""
Gets information of this matchbox_id
"""
match_individuals = SeqrIndividual.objects.filter(mme_submitted_data__patient__id=matchbox_id)
records = []
for individual in match_individuals:
record = {
'seqr_id':individual.individual_id,
'family_id':individual.family.family_id,
'project_id':individual.family.project.deprecated_project_id,
'insertion_date':str(individual.mme_submitted_date)}
genomicFatures = []
for g_feature in individual.mme_submitted_data['patient']['genomicFeatures']:
genomicFatures.append({'gene_id': g_feature['gene']['id'],
'variant_start': g_feature['variant']['start'],
'variant_end': g_feature['variant']['end']})
record['submitted_genomic_features'] = genomicFatures
features = []
for feature in individual.mme_submitted_data['patient']['features']:
id = feature['id']
label = ''
if feature.has_key('label'):
label = feature['label']
features.append({'id': id,
'label': label}),
record['submitted_features'] = features
records.append(record)
return JSONResponse({
'submission_records':records
})
@staff_member_required(login_url=LOGIN_URL)
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics(request):
"""
Gets matchbox metrics
"""
mme_headers={
'X-Auth-Token': settings.MME_NODE_ADMIN_TOKEN,
'Accept': settings.MME_NODE_ACCEPT_HEADER,
'Content-Type': settings.MME_CONTENT_TYPE_HEADER
}
r = requests.get(url=settings.MME_MATCHBOX_METRICS_URL,
headers=mme_headers)
if r.status_code==200:
matchbox_metrics = r.json()['metrics']
genes_in_matchbox=matchbox_metrics['geneCounts'].keys()
seqr__gene_info = gather_all_annotated_genes_in_seqr()
seqr_metrics={"genes_in_seqr":len(seqr__gene_info),
"genes_found_in_matchbox":0}
unique_genes=[]
for gene_ids,proj in seqr__gene_info.iteritems():
if gene_ids[0] in genes_in_matchbox:
unique_genes.append(gene_ids[0])
seqr_metrics['genes_found_in_matchbox'] = len(set(unique_genes))
seqr_metrics["submission_info"]=find_projects_with_families_in_matchbox()
return JSONResponse({"from_matchbox":r.json(),
"from_seqr":seqr_metrics})
else:
resp = HttpResponse('{"message":"error contacting matchbox to gain metrics", "status":' + r.status_code + '}',status=r.status_code)
resp.status_code=r.status_code
return resp
@login_required
@log_request('matchmaker_get_matchbox_metrics')
def get_matchbox_metrics_for_project(request,project_id):
"""
Gets matchbox submission metrics for project (accessible to non-staff)
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
try:
return JSONResponse({"families":find_families_of_this_project_in_matchbox(project_id)})
except:
raise
@login_required
@csrf_exempt
@log_request('update_match_comment')
def update_match_comment(request,project_id,match_id,indiv_id):
"""
Update a comment made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
parse_json_error_mesg="wasn't able to parse POST!"
comment = request.POST.get("comment",parse_json_error_mesg)
if comment == parse_json_error_mesg:
return HttpResponse('{"message":"' + parse_json_error_mesg +'"}',status=500)
persisted_result_dets = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id)
if persisted_result_dets.count()>0:
for persisted_result_det in persisted_result_dets:
persisted_result_det.comments=comment.strip()
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
resp = HttpResponse('{"message":"OK"}',status=200)
return resp
else:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
@staff_member_required(login_url=LOGIN_URL)
@csrf_exempt
@log_request('get_current_match_state_of_all_results')
def get_current_match_state_of_all_results(request):
"""
gets the current state of all matches in this project
"""
return HttpResponse('{"message":"error unimplemented MME endpoint"}',status=500)
@login_required
@csrf_exempt
@log_request('get_current_match_state')
def get_current_match_state(request,project_id,match_id,indiv_id):
"""
gets the current state of this matched pair
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
try:
result_model = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
persisted_result_det = {
"id_of_indiv_searched_with":indiv_id,
"content_of_result":result_model.result_data,
"result_id":result_model.result_data['patient']['id'],
"we_contacted_host":result_model.we_contacted,
"host_contacted_us":result_model.host_contacted,
"seen_on":str(result_model.created_date),
"deemed_irrelevant":result_model.deemed_irrelevant,
"comments":result_model.comments or '',
"seqr_project_id":project_id,
"flag_for_analysis":result_model.flag_for_analysis,
"username_of_last_event_initiator":result_model.last_modified_by.username,
}
except Exception as e:
print e
return HttpResponse('{"message":"error talking to database"}',status=500)
return JSONResponse(persisted_result_det)
@login_required
@csrf_exempt
@log_request('match_state_update')
def match_state_update(request,project_id,match_id,indiv_id):
"""
Update a state change made about a match
"""
project = get_object_or_404(Project, project_id=project_id)
if not project.can_view(request.user):
raise PermissionDenied
individual = get_object_or_404(SeqrIndividual, individual_id=indiv_id, family__project=project.seqr_project)
state_type = request.POST.get('state_type', None)
state = request.POST.get('state',None)
if state_type is None or state is None:
return HttpResponse('{"message":"error parsing POST"}',status=500)
persisted_result_det = MatchmakerResult.objects.filter(individual=individual, result_data__patient__id=match_id).first()
try:
if state_type == 'flag_for_analysis':
persisted_result_det.flag_for_analysis=False
if state == "true":
persisted_result_det.flag_for_analysis=True
if state_type == 'deemed_irrelevant':
persisted_result_det.deemed_irrelevant=False
if state == "true":
persisted_result_det.deemed_irrelevant=True
if state_type == 'we_contacted_host':
persisted_result_det.we_contacted=False
if state == "true":
persisted_result_det.we_contacted=True
if state_type == 'host_contacted_us':
persisted_result_det.host_contacted=False
if state == "true":
persisted_result_det.host_contacted=True
persisted_result_det.last_modified_by=request.user
persisted_result_det.save()
except:
raise
return HttpResponse('{"message":"error updating database"}',status=500)
return HttpResponse('{"message":"successfully updated database"}',status=200)
| agpl-3.0 | -8,759,921,877,234,000,000 | 36.645775 | 170 | 0.615225 | false |
dvklopfenstein/PrincetonAlgorithms | py/AlgsSedgewickWayne/Topological.py | 1 | 2002 | """Compute topological ordering(w DFS) of a DAG or edge-weighted DAG. Runs in O(E + V) time."""
# TBD Finish Python port
from AlgsSedgewickWayne.DirectedCycle import DirectedCycle
from AlgsSedgewickWayne.DepthFirstOrder import DepthFirstOrder
from AlgsSedgewickWayne.EdgeWeightedDigraph import EdgeWeightedDigraph
from AlgsSedgewickWayne.EdgeWeightedDirectedCycle import EdgeWeightedDirectedCycle
class Topological(object):
"""Determines if digraph G has a topological order and, if so, finds topological order."""
def __init__(self, G): # G is Digraph O(V+E) wc
finder = DirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
self._order = dfs.reversePost() # topological order
self._rank = [] # rank[v] = position of vertex v in topological order
i = 0
for v in self. order:
self._rank[v] = i
i += 1
def Topological(EdgeWeightedDigraph G): # EdgeWeightedDigraph
"""Determines if digraph G has a topological order and, if so, finds topological order."""
EdgeWeightedDirectedCycle finder = new EdgeWeightedDirectedCycle(G)
if not finder.hasCycle():
dfs = DepthFirstOrder(G)
order = dfs.reversePost()
# Returns a topological order if the digraph has a topologial order, None otherwise
def order(self): return self._order # O(V)
# Does the digraph have a topological order?
def hasOrder(self): return self._order is not None # O(k)
def rank(self, v): # O(k)
"""The the rank of vertex v in the topological order; -1 if the digraph is not a DAG."""
self._validateVertex(v)
if self.hasOrder(): return self._rank[v]
else: return -1
def _validateVertex(self, v):
"""raise an IndexOutOfBoundsException unless 0 <= v < V."""
V = len(self._rank)
if v < 0 or v >= V:
raise Exception("vertex {} is not between 0 and {}".format(v, (V-1))
# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.
# Copyright 2002-2019, DV Klopfenstein, Python port
| gpl-2.0 | -1,320,169,917,173,574,100 | 39.04 | 95 | 0.695305 | false |
WizeCommerce/medusa | setup.py | 1 | 1292 | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "thrift_medusa",
version = "0.0.1",
author = "Samir Faci",
author_email = "",
description = ("Language agnostic tool for packaging of thrift based services and artifacts"),
license = "Apache Software License",
url = "https://github.com/WizeCommerce/medusa",
packages=['thrift_medusa', 'tests'],
#packages = find_packages(exclude="test"),
package_data = {'': ['*.yaml']},
long_description=read('README.md'),
install_requires=['lxml','paramiko','argparse','pyyaml','jinja2'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
],
#entry_points = { 'console_scripts': ['medusa = thrift_medusa:main', 'samir = thrift_medusa.thrift_medusa:main'] },
#scripts = ['./publishClients.py'],
test_suite='tests',
zip_safe = True
)
| apache-2.0 | -8,982,372,392,790,578,000 | 37 | 119 | 0.647059 | false |
kleinfeld/medpy | medpy/features/__init__.py | 1 | 1324 | """
@package medpy.features
Functionality to extract features from images and present/manipulate them.
Packages:
- histogram: Functions to create and manipulate (fuzzy) histograms.
- intensity: Functions to extracts voxel-wise intensity based features from (medical) images.
- texture: Run-time optimised features extraction on images. (experimental)
- utilities: Utilities for feature handling. Currently only for features from the @see medpy.features.intensity package.
"""
# determines the modules that should be imported when "from metric import *" is used
__all__ = []
# if __all__ is not set, only the following, explicit import statements are executed
from histogram import fuzzy_histogram, triangular_membership, trapezoid_membership, \
gaussian_membership, sigmoidal_difference_membership
from intensity import centerdistance, centerdistance_xdminus1, guassian_gradient_magnitude, \
hemispheric_difference, indices, intensities, local_histogram, local_mean_gauss, \
median
from utilities import append, join, normalize, normalize_with_model
#!experimental, therefore not directly included
#from texture import coarseness, contrast, directionality, efficient_local_avg, efficient_local_avg3d, running_total, running_total3d, tamura | gpl-3.0 | 7,675,755,194,362,172,000 | 56.608696 | 141 | 0.755287 | false |
open-austin/influence-texas | src/influencetx/legislators/migrations/0001_initial.py | 1 | 1680 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-17 17:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Legislator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('name', models.CharField(max_length=45)),
('first_name', models.CharField(blank=True, max_length=20)),
('last_name', models.CharField(blank=True, max_length=20)),
('party', models.CharField(choices=[('D', 'Democratic'), ('I', 'Independent'), ('R', 'Republican'), ('U', 'Unknown')], max_length=1)),
('chamber', models.CharField(choices=[('House', 'House'), ('Senate', 'Senate')], max_length=6)),
('district', models.IntegerField()),
('openstates_updated_at', models.DateTimeField()),
('url', models.URLField(blank=True)),
('photo_url', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='LegislatorIdMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('openstates_leg_id', models.CharField(db_index=True, max_length=48)),
('tpj_filer_id', models.IntegerField(db_index=True)),
],
),
]
| gpl-2.0 | -3,630,584,201,326,298,000 | 41 | 150 | 0.554167 | false |
raphaelrpl/portal | backend/appengine/routes/questions/rest.py | 1 | 4013 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from time import sleep
from gaebusiness.business import CommandExecutionException
from permission_app.permission_facade import main_user_form
from tekton.gae.middleware.json_middleware import JsonResponse
from question_app import question_facade
from gaepermission.decorator import login_required
from gaecookie.decorator import no_csrf
from question_app.question_model import CategoryQuestion, Question
from category_app.category_model import Category
@login_required
@no_csrf
def index():
cmd = question_facade.list_questions_cmd()
question_list = cmd()
question_form = question_facade.question_form()
def localize_user(model):
dct = question_form.fill_with_model(model)
user = main_user_form().fill_with_model(model.user.get())
dct['user'] = user
return dct
question_dcts = [localize_user(m) for m in question_list]
return JsonResponse(question_dcts)
@login_required
def new(_resp, _logged_user, **question_properties):
if _logged_user is None:
_resp.status_code = 400
return JsonResponse({"name": "Login required!"})
quest = question_properties.get('question', {})
if not quest:
_resp.status_code = 400
return JsonResponse({"name": "Required Field"})
question = Question(**quest)
question.user = _logged_user.key
try:
question.put()
except CommandExecutionException:
_resp.status_code = 400
if not question.name:
return JsonResponse({"name": "Required field"})
return JsonResponse({"name": "Put a valid post"})
for c in question_properties.get("categorys", {}):
cat = Category.query(Category.name == c).fetch()
if cat:
category = CategoryQuestion(origin=cat[0], destination=question)
category.put()
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
sleep(0.5)
return JsonResponse(data)
@login_required
def edit(_resp, _logged_user, **question_properties):
question_id = question_properties.get('id')
# key = ndb.Key('Question', int(question_id))
question = Question.get_by_id(int(question_id))
if int(_logged_user.key.id()) != int(question_properties.get('user', {}).get('id', 0)) and question_id != None:
_resp.status_code = 400
return JsonResponse({"name": "This post don't belong to you!"})
if question is None:
_resp.status_code = 400
return JsonResponse({"name": "Invalid post"})
question.name = question_properties.get('name')
try:
question.put()
except:
_resp.status_code = 400
return JsonResponse({"name": "Put a valid question"})
user_form = main_user_form()
form = question_facade.question_form()
question_dct = form.fill_with_model(question)
question_dct['user'] = user_form.fill_with_model(question.user.get())
return JsonResponse(question_dct)
# cmd = question_facade.update_question_cmd(question_id, **question_properties)
# return _save_or_update_json_response(_logged_user, cmd, _resp)
@login_required
def delete(_resp, id):
cmd = question_facade.delete_question_cmd(id)
try:
question = cmd()
# DeleteCategoryQuestion(destination=question).execute()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_dct = question_facade.question_form().fill_with_model(question)
return JsonResponse(question_dct)
def _save_or_update_json_response(_logged_user, cmd, _resp):
try:
question = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonResponse(cmd.errors)
question_form = question_facade.question_form()
data = question_form.fill_with_model(question)
data['user'] = _logged_user.name
return JsonResponse(data)
| mit | -3,836,463,681,952,738,300 | 33.299145 | 115 | 0.673561 | false |
mozillazg/bustard | tests/httpbin/core.py | 1 | 21325 | # -*- coding: utf-8 -*-
"""
httpbin.core
~~~~~~~~~~~~
This module provides the core HttpBin experience.
"""
import base64
import json
import os
import random
import time
import uuid
from bustard.app import Bustard
from bustard.http import (
Response, Headers, jsonify as bustard_jsonify, redirect
)
from bustard.utils import json_dumps_default
from werkzeug.datastructures import WWWAuthenticate
from werkzeug.http import http_date
from werkzeug.serving import run_simple
from six.moves import range as xrange
from . import filters
from .helpers import (
get_headers, status_code, get_dict, get_request_range,
check_basic_auth, check_digest_auth, secure_cookie,
H, ROBOT_TXT, ANGRY_ASCII
)
from .utils import weighted_choice
from .structures import CaseInsensitiveDict
ENV_COOKIES = (
'_gauges_unique',
'_gauges_unique_year',
'_gauges_unique_month',
'_gauges_unique_day',
'_gauges_unique_hour',
'__utmz',
'__utma',
'__utmb'
)
def jsonify(*args, **kwargs):
response = bustard_jsonify(*args, **kwargs)
if not response.data.endswith(b'\n'):
response.data += b'\n'
return response
# Prevent WSGI from correcting the casing of the Location header
# BaseResponse.autocorrect_location_header = False
# Find the correct template folder when running from a different location
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
app = Bustard(__name__, template_dir=tmpl_dir)
render_template = app.render_template
url_for = app.url_for
# -----------
# Middlewares
# -----------
@app.after_request
def set_cors_headers(request, response):
response.headers['Access-Control-Allow-Origin'] = (
request.headers.get('Origin', '*')
)
response.headers['Access-Control-Allow-Credentials'] = 'true'
if request.method == 'OPTIONS':
# Both of these headers are only used for the "preflight request"
# http://www.w3.org/TR/cors/#access-control-allow-methods-response-header
response.headers['Access-Control-Allow-Methods'] = (
'GET, POST, PUT, DELETE, PATCH, OPTIONS'
)
response.headers['Access-Control-Max-Age'] = '3600' # 1 hour cache
if request.headers.get('Access-Control-Request-Headers') is not None:
response.headers['Access-Control-Allow-Headers'] = (
request.headers['Access-Control-Request-Headers']
)
return response
# ------
# Routes
# ------
@app.route('/')
def view_landing_page(request):
"""Generates Landing Page."""
tracking_enabled = 'HTTPBIN_TRACKING' in os.environ
return render_template('index.html', request=request,
tracking_enabled=tracking_enabled)
@app.route('/html')
def view_html_page(request):
"""Simple Html Page"""
return render_template('moby.html')
@app.route('/robots.txt')
def view_robots_page(request):
"""Simple Html Page"""
response = Response()
response.content = ROBOT_TXT
response.content_type = 'text/plain'
return response
@app.route('/deny')
def view_deny_page(request):
"""Simple Html Page"""
response = Response()
response.content = ANGRY_ASCII
response.content_type = 'text/plain'
return response
# return "YOU SHOULDN'T BE HERE"
@app.route('/ip')
def view_origin(request):
"""Returns Origin IP."""
return jsonify(origin=request.headers.get('X-Forwarded-For',
request.remote_addr))
@app.route('/headers')
def view_headers(request):
"""Returns HTTP HEADERS."""
return jsonify(get_dict(request, 'headers'))
@app.route('/user-agent')
def view_user_agent(request):
"""Returns User-Agent."""
headers = get_headers(request)
return jsonify({'user-agent': headers['user-agent']})
@app.route('/get', methods=('GET', 'OPTIONS'))
def view_get(request):
"""Returns GET Data."""
return jsonify(get_dict(request, 'url', 'args', 'headers', 'origin'))
@app.route('/post', methods=('POST',))
def view_post(request):
"""Returns POST Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/put', methods=('PUT',))
def view_put(request):
"""Returns PUT Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/patch', methods=('PATCH',))
def view_patch(request):
"""Returns PATCH Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/delete', methods=('DELETE',))
def view_delete(request):
"""Returns DELETE Data."""
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files', 'json'))
@app.route('/gzip')
@filters.gzip
def view_gzip_encoded_content(request):
"""Returns GZip-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, gzipped=True))
@app.route('/deflate')
@filters.deflate
def view_deflate_encoded_content(request):
"""Returns Deflate-Encoded Data."""
return jsonify(get_dict(request, 'origin', 'headers',
method=request.method, deflated=True))
@app.route('/redirect/<int:n>')
def redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
absolute = request.args.get('absolute', 'false').lower() == 'true'
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=absolute))
if absolute:
return _redirect(request, 'absolute', n, True)
else:
return _redirect(request, 'relative', n, False)
def _redirect(request, kind, n, external):
return redirect(url_for('{0}_redirect_n_times'.format(kind),
n=n - 1, _external=external, _request=request))
@app.route('/redirect-to')
def redirect_to(request):
"""302 Redirects to the given URL."""
args = CaseInsensitiveDict(request.args.items())
# We need to build the response manually and convert to UTF-8 to prevent
# werkzeug from "fixing" the URL. This endpoint should set the Location
# header to the exact string supplied.
response = Response('')
response.status_code = 302
response.headers['Location'] = args['url'].encode('utf-8')
return response
@app.route('/relative-redirect/<int:n>')
def relative_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
response = Response('')
response.status_code = 302
if n == 1:
response.headers['Location'] = url_for('view_get')
return response
response.headers['Location'] = app.url_for(
'relative_redirect_n_times', n=n - 1
)
return response
@app.route('/absolute-redirect/<int:n>')
def absolute_redirect_n_times(request, n):
"""302 Redirects n times."""
n = int(n)
assert n > 0
if n == 1:
return redirect(app.url_for('view_get', _request=request,
_external=True))
return _redirect(request, 'absolute', n, True)
@app.route('/stream/<int:n>')
def stream_n_messages(request, n):
"""Stream n JSON messages"""
n = int(n)
response = get_dict(request, 'url', 'args', 'headers', 'origin')
n = min(n, 100)
def generate_stream():
for i in range(n):
response['id'] = i
yield json.dumps(response, default=json_dumps_default) + '\n'
return Response(generate_stream(), headers={
'Content-Type': 'application/json',
})
@app.route('/status/<codes>',
methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'])
def view_status_code(request, codes):
"""Return status code or random status code if more than one are given"""
if ',' not in codes:
code = int(codes)
return status_code(code)
choices = []
for choice in codes.split(','):
if ':' not in choice:
code = choice
weight = 1
else:
code, weight = choice.split(':')
choices.append((int(code), float(weight)))
code = weighted_choice(choices)
return status_code(code)
@app.route('/response-headers')
def response_headers(request):
"""Returns a set of response headers from the query string """
headers = Headers(request.args.to_dict())
response = jsonify(headers)
while True:
content_len_shown = response.headers['Content-Length']
d = {}
for key in response.headers.keys():
value = response.headers.get_all(key)
if len(value) == 1:
value = value[0]
d[key] = value
response = jsonify(d)
for key, value in headers.to_list():
response.headers.add(key, value)
if response.headers['Content-Length'] == content_len_shown:
break
return response
@app.route('/cookies')
def view_cookies(request, hide_env=True):
"""Returns cookie data."""
cookies = dict(request.cookies.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_COOKIES:
try:
del cookies[key]
except KeyError:
pass
return jsonify(cookies=cookies)
@app.route('/forms/post')
def view_forms_post(request):
"""Simple HTML form."""
return render_template('forms-post.html')
@app.route('/cookies/set/<name>/<value>')
def set_cookie(request, name, value):
"""Sets a cookie and redirects to cookie list."""
r = app.make_response(redirect(url_for('view_cookies')))
r.set_cookie(key=name, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/set')
def set_cookies(request):
"""Sets cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.set_cookie(key=key, value=value, secure=secure_cookie(request))
return r
@app.route('/cookies/delete')
def delete_cookies(request):
"""Deletes cookie(s) as provided by the query string
and redirects to cookie list.
"""
cookies = dict(request.args.items())
r = app.make_response(redirect(url_for('view_cookies')))
for key, value in cookies.items():
r.delete_cookie(key=key)
return r
@app.route('/basic-auth/<user>/<passwd>')
def basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(401)
return jsonify(authenticated=True, user=user)
@app.route('/hidden-basic-auth/<user>/<passwd>')
def hidden_basic_auth(request, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Basic Auth."""
if not check_basic_auth(request, user, passwd):
return status_code(404)
return jsonify(authenticated=True, user=user)
@app.route('/digest-auth/<qop>/<user>/<passwd>')
def digest_auth(request, qop=None, user='user', passwd='passwd'):
"""Prompts the user for authorization using HTTP Digest auth"""
if qop not in ('auth', 'auth-int'):
qop = None
if 'Authorization' not in request.headers or \
not check_digest_auth(user, passwd) or \
'Cookie' not in request.headers:
response = app.make_response('')
response.status_code = 401
# RFC2616 Section4.2: HTTP headers are ASCII. That means
# request.remote_addr was originally ASCII, so I should be able to
# encode it back to ascii. Also, RFC2617 says about nonces: "The
# contents of the nonce are implementation dependent"
nonce = H(b''.join([
getattr(request, 'remote_addr', u'').encode('ascii'),
b':',
str(time.time()).encode('ascii'),
b':',
os.urandom(10)
]))
opaque = H(os.urandom(10))
auth = WWWAuthenticate('digest')
auth.set_digest('[email protected]', nonce, opaque=opaque,
qop=('auth', 'auth-int') if qop is None else (qop, ))
response.headers['WWW-Authenticate'] = auth.to_header()
response.headers['Set-Cookie'] = 'fake=fake_value'
return response
return jsonify(authenticated=True, user=user)
@app.route('/delay/<delay>')
def delay_response(request, delay):
"""Returns a delayed response"""
delay = min(float(delay), 10)
time.sleep(delay)
return jsonify(get_dict(request, 'url', 'args', 'form', 'data',
'origin', 'headers', 'files'))
@app.route('/drip')
def drip(request):
"""Drips data over a duration after an optional initial delay."""
args = CaseInsensitiveDict(request.args.items())
duration = float(args.get('duration', 2))
numbytes = int(args.get('numbytes', 10))
code = int(args.get('code', 200))
pause = duration / numbytes
delay = float(args.get('delay', 0))
if delay > 0:
time.sleep(delay)
def generate_bytes():
for i in xrange(numbytes):
yield u'*'.encode('utf-8')
time.sleep(pause)
response = Response(generate_bytes(), headers={
'Content-Type': 'application/octet-stream',
'Content-Length': str(numbytes),
})
response.status_code = code
return response
@app.route('/base64/<value>')
def decode_base64(request, value):
"""Decodes base64url-encoded string"""
encoded = value.encode('utf-8') # base64 expects binary string as input
return base64.urlsafe_b64decode(encoded).decode('utf-8')
@app.route('/cache', methods=('GET',))
def cache(request):
"""Returns a 304 if an If-Modified-Since header or
If-None-Match is present. Returns the same as a GET otherwise.
"""
is_conditional = (
request.headers.get('If-Modified-Since') or
request.headers.get('If-None-Match')
)
if is_conditional is None:
response = view_get(request)
response.headers['Last-Modified'] = http_date()
response.headers['ETag'] = uuid.uuid4().hex
return response
else:
return status_code(304)
@app.route('/cache/<int:value>')
def cache_control(request, value):
"""Sets a Cache-Control header."""
value = int(value)
response = view_get(request)
response.headers['Cache-Control'] = 'public, max-age={0}'.format(value)
return response
@app.route('/encoding/utf8')
def encoding(request):
return render_template('UTF-8-demo.txt')
@app.route('/bytes/<int:n>')
def random_bytes(request, n):
"""Returns n random bytes generated with given seed."""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
response = Response()
# Note: can't just use os.urandom here because it ignores the seed
response.data = bytearray(random.randint(0, 255) for i in range(n))
response.content_type = 'application/octet-stream'
return response
@app.route('/stream-bytes/<int:n>')
def stream_random_bytes(request, n):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
n = int(n)
n = min(n, 100 * 1024) # set 100KB limit
params = CaseInsensitiveDict(request.args.items())
if 'seed' in params:
random.seed(int(params['seed']))
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
def generate_bytes():
chunks = bytearray()
for i in xrange(n):
chunks.append(random.randint(0, 255))
if len(chunks) == chunk_size:
yield(bytes(chunks))
chunks = bytearray()
if chunks:
yield(bytes(chunks))
headers = {'Content-Type': 'application/octet-stream'}
return Response(generate_bytes(), headers=headers)
@app.route('/range/<int:numbytes>')
def range_request(request, numbytes):
"""Streams n random bytes generated with given seed,
at given chunk size per packet.
"""
numbytes = int(numbytes)
if numbytes <= 0 or numbytes > (100 * 1024):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes'
})
response.status_code = 404
response.content = 'number of bytes must be in the range (0, 10240]'
return response
params = CaseInsensitiveDict(request.args.items())
if 'chunk_size' in params:
chunk_size = max(1, int(params['chunk_size']))
else:
chunk_size = 10 * 1024
duration = float(params.get('duration', 0))
pause_per_byte = duration / numbytes
request_headers = get_headers(request)
first_byte_pos, last_byte_pos = get_request_range(request_headers,
numbytes)
if (
first_byte_pos > last_byte_pos or
first_byte_pos not in xrange(0, numbytes) or
last_byte_pos not in xrange(0, numbytes)
):
response = Response(headers={
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': 'bytes */%d' % numbytes
})
response.status_code = 416
return response
def generate_bytes():
chunks = bytearray()
for i in xrange(first_byte_pos, last_byte_pos + 1):
# We don't want the resource to change across requests, so we need
# to use a predictable data generation function
chunks.append(ord('a') + (i % 26))
if len(chunks) == chunk_size:
yield(bytes(chunks))
time.sleep(pause_per_byte * chunk_size)
chunks = bytearray()
if chunks:
time.sleep(pause_per_byte * len(chunks))
yield(bytes(chunks))
content_range = 'bytes %d-%d/%d' % (first_byte_pos, last_byte_pos,
numbytes)
response_headers = {
'Content-Type': 'application/octet-stream',
'ETag': 'range%d' % numbytes,
'Accept-Ranges': 'bytes',
'Content-Range': content_range}
response = Response(generate_bytes(), headers=response_headers)
if (first_byte_pos == 0) and (last_byte_pos == (numbytes - 1)):
response.status_code = 200
else:
response.status_code = 206
return response
@app.route('/links/<int:n>/<int:offset>')
def link_page(request, n, offset):
"""Generate a page containing n links to other pages which do the same."""
n = int(n)
offset = int(offset)
n = min(max(1, n), 200) # limit to between 1 and 200 links
link = "<a href='{0}'>{1}</a> "
html = ['<html><head><title>Links</title></head><body>']
for i in xrange(n):
if i == offset:
html.append('{0} '.format(i))
else:
html.append(link.format(url_for('link_page', n=n, offset=i), i))
html.append('</body></html>')
return ''.join(html)
@app.route('/links/<int:n>')
def links(request, n):
"""Redirect to first links page."""
n = int(n)
return redirect(url_for('link_page', n=n, offset=0))
@app.route('/image')
def image(request):
"""Returns a simple image of the type suggest by the Accept header."""
headers = get_headers(request)
if 'accept' not in headers:
return image_png(request) # Default media type to png
accept = headers['accept'].lower()
if 'image/webp' in accept:
return image_webp(request)
elif 'image/svg+xml' in accept:
return image_svg(request)
elif 'image/jpeg' in accept:
return image_jpeg(request)
elif 'image/png' in accept or 'image/*' in accept:
return image_png(request)
else:
return status_code(406) # Unsupported media type
@app.route('/image/png')
def image_png(request):
data = resource('images/pig_icon.png')
return Response(data, headers={'Content-Type': 'image/png'})
@app.route('/image/jpeg')
def image_jpeg(request):
data = resource('images/jackal.jpg')
return Response(data, headers={'Content-Type': 'image/jpeg'})
@app.route('/image/webp')
def image_webp(request):
data = resource('images/wolf_1.webp')
return Response(data, headers={'Content-Type': 'image/webp'})
@app.route('/image/svg')
def image_svg(request):
data = resource('images/svg_logo.svg')
return Response(data, headers={'Content-Type': 'image/svg+xml'})
def resource(filename):
path = os.path.join(
tmpl_dir,
filename)
return open(path, 'rb').read()
@app.route('/xml')
def xml(request):
response = Response(render_template('sample.xml'))
response.headers['Content-Type'] = 'application/xml'
return response
if __name__ == '__main__':
run_simple('0.0.0.0', 5000, app, use_reloader=True, use_debugger=True)
| mit | 1,247,459,229,769,562,400 | 27.245033 | 81 | 0.608488 | false |
SurfasJones/icecream-info | icecream/lib/python2.7/site-packages/sphinx/search/__init__.py | 1 | 11415 | # -*- coding: utf-8 -*-
"""
sphinx.search
~~~~~~~~~~~~~
Create a full-text search index for offline search.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import with_statement
import re
import cPickle as pickle
from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
from sphinx.util import jsdump, rpartition
class SearchLanguage(object):
"""
This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
of this class.
You should override `lang` class property too (e.g. 'en', 'fr' and so on).
.. attribute:: stopwords
This is a set of stop words of the target language. Default `stopwords`
is empty. This word is used for building index and embedded in JS.
.. attribute:: js_stemmer_code
Return stemmer class of JavaScript version. This class' name should be
``Stemmer`` and this class must have ``stemWord`` method. This string is
embedded as-is in searchtools.js.
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
lang = None
stopwords = set()
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
*/
var Stemmer = function() {
this.stemWord = function(w) {
return w;
}
}
"""
_word_re = re.compile(r'\w+(?u)')
def __init__(self, options):
self.options = options
self.init(options)
def init(self, options):
"""
Initialize the class with the options the user has given.
"""
def split(self, input):
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
return self._word_re.findall(input)
def stem(self, word):
"""
This method implements stemming algorithm of the Python version.
Default implementation does nothing. You should implement this if the
language has any stemming rules.
This class is used to preprocess search words before registering them in
the search index. The stemming of the Python version and the JS version
(given in the js_stemmer_code attribute) must be compatible.
"""
return word
def word_filter(self, word):
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
"""
return not (((len(word) < 3) and (12353 < ord(word[0]) < 12436)) or
(ord(word[0]) < 256 and (len(word) < 3 or word in self.stopwords or
word.isdigit())))
from sphinx.search import en, ja
languages = {
'en': en.SearchEnglish,
'ja': ja.SearchJapanese,
}
class _JavaScriptIndex(object):
"""
The search index as javascript file that calls a function
on the documentation search object to register the index.
"""
PREFIX = 'Search.setIndex('
SUFFIX = ')'
def dumps(self, data):
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
data = s[len(self.PREFIX):-len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not \
s.endswith(self.SUFFIX):
raise ValueError('invalid data')
return jsdump.loads(data)
def dump(self, data, f):
f.write(self.dumps(data))
def load(self, f):
return self.loads(f.read())
js_index = _JavaScriptIndex()
class WordCollector(NodeVisitor):
"""
A special visitor that collects words for the `IndexBuilder`.
"""
def __init__(self, document, lang):
NodeVisitor.__init__(self, document)
self.found_words = []
self.found_title_words = []
self.lang = lang
def dispatch_visit(self, node):
if node.__class__ is comment:
raise SkipNode
if node.__class__ is raw:
# Some people might put content in raw HTML that should be searched,
# so we just amateurishly strip HTML tags and index the remaining
# content
nodetext = re.sub(r'(?is)<style.*?</style>', '', node.astext())
nodetext = re.sub(r'(?is)<script.*?</script>', '', nodetext)
nodetext = re.sub(r'<[^<]+?>', '', nodetext)
self.found_words.extend(self.lang.split(nodetext))
raise SkipNode
if node.__class__ is Text:
self.found_words.extend(self.lang.split(node.astext()))
elif node.__class__ is title:
self.found_title_words.extend(self.lang.split(node.astext()))
class IndexBuilder(object):
"""
Helper class that creates a searchindex based on the doctrees
passed to the `feed` method.
"""
formats = {
'jsdump': jsdump,
'pickle': pickle
}
def __init__(self, env, lang, options, scoring):
self.env = env
# filename -> title
self._titles = {}
# stemmed word -> set(filenames)
self._mapping = {}
# stemmed words in titles -> set(filenames)
self._title_mapping = {}
# word -> stemmed word
self._stem_cache = {}
# objtype -> index
self._objtypes = {}
# objtype index -> (domain, type, objname (localized))
self._objnames = {}
# add language-specific SearchLanguage instance
self.lang = languages[lang](options)
if scoring:
with open(scoring, 'rb') as fp:
self.js_scorer_code = fp.read().decode('utf-8')
else:
self.js_scorer_code = u''
def load(self, stream, format):
"""Reconstruct from frozen data."""
if isinstance(format, basestring):
format = self.formats[format]
frozen = format.load(stream)
# if an old index is present, we treat it as not existing.
if not isinstance(frozen, dict) or \
frozen.get('envversion') != self.env.version:
raise ValueError('old format')
index2fn = frozen['filenames']
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
rv = {}
for k, v in mapping.iteritems():
if isinstance(v, int):
rv[k] = set([index2fn[v]])
else:
rv[k] = set(index2fn[i] for i in v)
return rv
self._mapping = load_terms(frozen['terms'])
self._title_mapping = load_terms(frozen['titleterms'])
# no need to load keywords/objtypes
def dump(self, stream, format):
"""Dump the frozen index to a stream."""
if isinstance(format, basestring):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
rv = {}
otypes = self._objtypes
onames = self._objnames
for domainname, domain in self.env.domains.iteritems():
for fullname, dispname, type, docname, anchor, prio in \
domain.get_objects():
# XXX use dispname?
if docname not in fn2index:
continue
if prio < 0:
continue
prefix, name = rpartition(fullname, '.')
pdict = rv.setdefault(prefix, {})
try:
typeindex = otypes[domainname, type]
except KeyError:
typeindex = len(otypes)
otypes[domainname, type] = typeindex
otype = domain.object_types.get(type)
if otype:
# use unicode() to fire translation proxies
onames[typeindex] = (domainname, type,
unicode(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
shortanchor = anchor
pdict[name] = (fn2index[docname], typeindex, prio, shortanchor)
return rv
def get_terms(self, fn2index):
rvs = {}, {}
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in mapping.iteritems():
if len(v) == 1:
fn, = v
if fn in fn2index:
rv[k] = fn2index[fn]
else:
rv[k] = [fn2index[fn] for fn in v if fn in fn2index]
return rvs
def freeze(self):
"""Create a usable data structure for serializing."""
filenames = self._titles.keys()
titles = self._titles.values()
fn2index = dict((f, i) for (i, f) in enumerate(filenames))
terms, title_terms = self.get_terms(fn2index)
objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1])
for (k, v) in self._objtypes.iteritems())
objnames = self._objnames
return dict(filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms, envversion=self.env.version)
def prune(self, filenames):
"""Remove data for all filenames not in the list."""
new_titles = {}
for filename in filenames:
if filename in self._titles:
new_titles[filename] = self._titles[filename]
self._titles = new_titles
for wordnames in self._mapping.itervalues():
wordnames.intersection_update(filenames)
for wordnames in self._title_mapping.itervalues():
wordnames.intersection_update(filenames)
def feed(self, filename, title, doctree):
"""Feed a doctree to the index."""
self._titles[filename] = title
visitor = WordCollector(doctree, self.lang)
doctree.walk(visitor)
# memoize self.lang.stem
def stem(word):
try:
return self._stem_cache[word]
except KeyError:
self._stem_cache[word] = self.lang.stem(word)
return self._stem_cache[word]
_filter = self.lang.word_filter
for word in visitor.found_title_words:
word = stem(word)
if _filter(word):
self._title_mapping.setdefault(word, set()).add(filename)
for word in visitor.found_words:
word = stem(word)
if word not in self._title_mapping and _filter(word):
self._mapping.setdefault(word, set()).add(filename)
def context_for_searchtool(self):
return dict(
search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words =
jsdump.dumps(sorted(self.lang.stopwords)),
search_scorer_tool = self.js_scorer_code,
)
| mit | -8,059,802,016,210,315,000 | 33.279279 | 80 | 0.565484 | false |
omarkohl/pytest | _pytest/python.py | 1 | 89408 | """ Python test discovery, setup and run of test functions. """
import fnmatch
import functools
import inspect
import re
import types
import sys
import py
import pytest
from _pytest._code.code import TerminalRepr
from _pytest.mark import MarkDecorator, MarkerError
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
if sys.version_info[:2] == (2, 6):
def isclass(object):
""" Return true if the object is a class. Overrides inspect.isclass for
python 2.6 because it will return True for objects which always return
something on __getattr__ calls (see #1035).
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
"""
return isinstance(object, (type, types.ClassType))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
# entry.path might sometimes return a str object when the entry
# points to dynamically generated code
# see https://bitbucket.org/pytest-dev/py/issues/71
raw_filename = entry.frame.code.raw.co_filename
is_generated = '<' in raw_filename and '>' in raw_filename
if is_generated:
return False
# entry.path might point to an inexisting file, in which case it will
# alsso return a str object. see #1133
p = py.path.local(entry.path)
return p != cutdir1 and not p.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a funtools.wrapped.
# We musn't if it's been wrapped with mock.patch (python 2 only)
if not (isfunction(obj) or isfunction(get_real_func(obj))):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return _pytest._code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, '__test__', False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj)) and
safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
_pytest._code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return py._builtin._totext(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
if _PY3:
import codecs
def _escape_bytes(val):
"""
If val is pure ascii, returns it as a str(), otherwise escapes
into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in the string, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode('ascii')
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ''
else:
def _escape_bytes(val):
"""
In py2 bytes and str are the same type, so return it unchanged if it
is a full ascii string, otherwise escape it into its binary form.
"""
try:
return val.decode('ascii')
except UnicodeDecodeError:
return val.encode('string-escape')
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, bytes):
return _escape_bytes(val)
elif isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
elif _PY2 and isinstance(val, unicode):
# special case for python 2: if a unicode string is
# convertible to ascii, return it as an str() object instead
try:
return str(val)
except UnicodeError:
# fallthrough
pass
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
This helper produces a ``ExceptionInfo()`` object (see below).
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(OSError) as exc_info:
assert 1 == 1 # this will execute as expected
raise OSError(errno.EEXISTS, 'directory exists')
assert exc_info.value.errno == errno.EEXISTS # this will now execute
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
.. autoclass:: _pytest._code.ExceptionInfo
:members:
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
pytest.fail("DID NOT RAISE {0}".format(expected_exception))
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = _pytest._code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| mit | 5,183,993,823,527,229,000 | 37.83927 | 108 | 0.602608 | false |
ilya-epifanov/ansible | lib/ansible/plugins/strategies/linear.py | 1 | 14293 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook.block import Block
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.plugins import action_loader
from ansible.plugins.strategies import StrategyBase
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class StrategyModule(StrategyBase):
def _get_next_task_lockstep(self, hosts, iterator):
'''
Returns a list of (host, task) tuples, where the task may
be a noop task to keep the iterator in lock step across
all hosts.
'''
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
num_tasks = 0
num_rescue = 0
num_always = 0
lowest_cur_block = len(iterator._blocks)
display.debug("counting tasks in each state of execution")
for (k, v) in host_tasks.iteritems():
if v is None:
continue
(s, t) = v
if t is None:
continue
if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
lowest_cur_block = s.cur_block
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
elif s.run_state == PlayIterator.ITERATING_TASKS:
num_tasks += 1
elif s.run_state == PlayIterator.ITERATING_RESCUE:
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
display.debug("done counting tasks in each state of execution")
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
This helper returns the task for all hosts in the requested
state, otherwise they get a noop dummy task. This also advances
the state of the host, since the given states are determined
while using peek=True.
'''
# we return the values in the order they were originally
# specified in the given hosts array
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
host_state_task = host_tasks[host.name]
if host_state_task is None:
continue
(s, t) = host_state_task
if t is None:
continue
if s.run_state == cur_state and s.cur_block == cur_block:
new_t = iterator.get_next_task_for_host(host)
rvals.append((host, t))
else:
rvals.append((host, noop_task))
display.debug("done advancing hosts to next task")
return rvals
# if any hosts are in ITERATING_SETUP, return the setup task
# while all other hosts get a noop
if num_setups:
display.debug("advancing hosts in ITERATING_SETUP")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
# if any hosts are in ITERATING_TASKS, return the next normal
# task for these hosts, while all other hosts get a noop
if num_tasks:
display.debug("advancing hosts in ITERATING_TASKS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
# if any hosts are in ITERATING_RESCUE, return the next rescue
# task for these hosts, while all other hosts get a noop
if num_rescue:
display.debug("advancing hosts in ITERATING_RESCUE")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
# if any hosts are in ITERATING_ALWAYS, return the next always
# task for these hosts, while all other hosts get a noop
if num_always:
display.debug("advancing hosts in ITERATING_ALWAYS")
return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
# at this point, everything must be ITERATING_COMPLETE, so we
# return None for all hosts in the list
display.debug("all hosts are done, so returning None's for all hosts")
return [(host, None) for host in hosts]
def run(self, iterator, play_context):
'''
The linear strategy is simple - get the next task and queue
it for all hosts, then wait for the queue to drain before
moving on to the next task
'''
# iteratate over each task, while there is one left to run
result = True
work_to_do = True
while work_to_do and not self._tqm._terminated:
try:
self._display.debug("getting the remaining hosts for this loop")
hosts_left = self._inventory.get_hosts(iterator._play.hosts)
self._display.debug("done getting the remaining hosts for this loop")
# queue up this task for each host in the inventory
callback_sent = False
work_to_do = False
host_results = []
host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
# skip control
skip_rest = False
choose_step = True
for (host, task) in host_tasks:
if not task:
continue
run_once = False
work_to_do = True
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
# check to see if this task should be skipped, due to it being a member of a
# role which has already run (and whether that role allows duplicate execution)
if task._role and task._role.has_run(host):
# If there is no metadata, the default behavior is to not allow duplicates,
# if there is metadata, check to see if the allow_duplicates flag was set to true
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
self._display.debug("'%s' skipped because role has already run" % task)
continue
if task.action == 'meta':
self._execute_meta(task, play_context, iterator)
else:
# handle step if needed, skip meta actions as they are used internally
if self._step and choose_step:
if self._take_step(task):
choose_step = False
else:
skip_rest = True
break
self._display.debug("getting variables")
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
self._display.debug("done getting variables")
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
display.debug("done copying, going to template now")
try:
task.name = unicode(templar.template(task.name, fail_on_undefined=False))
display.debug("done templating")
except:
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name
callback_sent = True
display.debug("sending task start callback")
self._blocked_hosts[host.get_name()] = True
self._queue_task(host, task, task_vars, play_context)
results = self._process_pending_results(iterator)
host_results.extend(results)
# if we're bypassing the host loop, break out now
if run_once:
break
# go to next host/task group
if skip_rest:
continue
self._display.debug("done queuing things up, now waiting for results queue to drain")
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
self._display.debug("out of hosts to run on")
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
try:
included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
except AnsibleError, e:
return False
if len(included_files) > 0:
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._display.warning(str(e))
continue
for new_block in new_blocks:
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
for host in hosts_left:
if host in included_file._hosts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
self._display.debug("results queue empty")
except (IOError, EOFError), e:
self._display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
return False
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| gpl-3.0 | -5,798,462,310,246,295,000 | 45.405844 | 179 | 0.547331 | false |
mramanathan/pydiary_notes | decorators/starter.py | 1 | 1078 | # _*_ coding: utf-8 _*_
#!/usr/bin/env/python
def jewel(crown):
''' Novice decorator function '''
def necklace(*args, **kwargs):
print("necklace is always below the crown {}".format(jewel.__name__))
return crown(*args, **kwargs)
return necklace
def funcLog(crowns):
''' Real world application '''
import logging
logging.basicConfig(filename='{}.log'.format(crowns.__name__), level=logging.INFO)
def necklace(*args, **kwargs):
logging.info("Ran with args: {}, and kwargs: {}".format(args, kwargs))
print("necklace is always below the crown {}".format(jewel.__name__))
return crowns(*args, **kwargs)
return necklace
@jewel
def king():
print("King was crowned without necklace")
@jewel
def queen(orn1, orn2):
print("Queen was crowned with ({}, {})".format(orn1, orn2))
@funcLog
def king():
print("King was crowned without necklace")
@funcLog
def queen(orn1, orn2):
print("Queen was crowned with ({}, {})".format(orn1, orn2))
king()
queen('pearl-chain', 'gold-chain')
| gpl-3.0 | 1,280,736,323,662,418,200 | 21 | 86 | 0.622449 | false |
mammique/django | tests/regressiontests/extra_regress/tests.py | 2 | 14184 | from __future__ import absolute_import, unicode_literals
import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils.datastructures import SortedDict
from .models import TestObject, Order, RevisionableModel
class ExtraRegressTests(TestCase):
def setUp(self):
self.u = User.objects.create_user(
username="fred",
password="secret",
email="[email protected]"
)
def test_regression_7314_7372(self):
"""
Regression tests for #7314 and #7372
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertEqual(rm.pk, rm.base.pk)
rm2 = rm.new_revision()
rm2.title = "Second Revision"
rm.when = datetime.datetime(2008, 9, 28, 14, 25, 0)
rm2.save()
self.assertEqual(rm2.title, 'Second Revision')
self.assertEqual(rm2.base.title, 'First Revision')
self.assertNotEqual(rm2.pk, rm.pk)
self.assertEqual(rm2.base.pk, rm.pk)
# Queryset to match most recent revision:
qs = RevisionableModel.objects.extra(
where=["%(table)s.id IN (SELECT MAX(rev.id) FROM %(table)s rev GROUP BY rev.base_id)" % {
'table': RevisionableModel._meta.db_table,
}]
)
self.assertQuerysetEqual(qs,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title)
)
# Queryset to search for string in title:
qs2 = RevisionableModel.objects.filter(title__contains="Revision")
self.assertQuerysetEqual(qs2,
[
('First Revision', 'First Revision'),
('Second Revision', 'First Revision'),
],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
# Following queryset should return the most recent revision:
self.assertQuerysetEqual(qs & qs2,
[('Second Revision', 'First Revision')],
transform=lambda r: (r.title, r.base.title),
ordered=False
)
def test_extra_stay_tied(self):
# Extra select parameters should stay tied to their corresponding
# select portions. Applies when portions are updated or otherwise
# moved around.
qs = User.objects.extra(
select=SortedDict((("alpha", "%s"), ("beta", "2"), ("gamma", "%s"))),
select_params=(1, 3)
)
qs = qs.extra(select={"beta": 4})
qs = qs.extra(select={"alpha": "%s"}, select_params=[5])
self.assertEqual(
list(qs.filter(id=self.u.id).values('alpha', 'beta', 'gamma')),
[{'alpha': 5, 'beta': 4, 'gamma': 3}]
)
def test_regression_7957(self):
"""
Regression test for #7957: Combining extra() calls should leave the
corresponding parameters associated with the right extra() bit. I.e.
internal dictionary must remain sorted.
"""
self.assertEqual(
User.objects.extra(select={"alpha": "%s"}, select_params=(1,)
).extra(select={"beta": "%s"}, select_params=(2,))[0].alpha,
1)
self.assertEqual(
User.objects.extra(select={"beta": "%s"}, select_params=(1,)
).extra(select={"alpha": "%s"}, select_params=(2,))[0].alpha,
2)
def test_regression_7961(self):
"""
Regression test for #7961: When not using a portion of an
extra(...) in a query, remove any corresponding parameters from the
query as well.
"""
self.assertEqual(
list(User.objects.extra(select={"alpha": "%s"}, select_params=(-6,)
).filter(id=self.u.id).values_list('id', flat=True)),
[self.u.id]
)
def test_regression_8063(self):
"""
Regression test for #8063: limiting a query shouldn't discard any
extra() bits.
"""
qs = User.objects.all().extra(where=['id=%s'], params=[self.u.id])
self.assertQuerysetEqual(qs, ['<User: fred>'])
self.assertQuerysetEqual(qs[:1], ['<User: fred>'])
def test_regression_8039(self):
"""
Regression test for #8039: Ordering sometimes removed relevant tables
from extra(). This test is the critical case: ordering uses a table,
but then removes the reference because of an optimisation. The table
should still be present because of the extra() call.
"""
self.assertQuerysetEqual(
Order.objects.extra(where=["username=%s"],
params=["fred"],
tables=["auth_user"]
).order_by('created_by'),
[]
)
def test_regression_8819(self):
"""
Regression test for #8819: Fields in the extra(select=...) list
should be available to extra(order_by=...).
"""
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}).distinct(),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']),
['<User: fred>']
)
self.assertQuerysetEqual(
User.objects.filter(pk=self.u.id).extra(select={'extra_field': 1}, order_by=['extra_field']).distinct(),
['<User: fred>']
)
def test_dates_query(self):
"""
When calling the dates() method on a queryset with extra selection
columns, we can (and should) ignore those columns. They don't change
the result and cause incorrect SQL to be produced otherwise.
"""
rm = RevisionableModel.objects.create(
title='First Revision',
when=datetime.datetime(2008, 9, 28, 10, 30, 0)
)
self.assertQuerysetEqual(
RevisionableModel.objects.extra(select={"the_answer": 'id'}).dates('when', 'month'),
['datetime.datetime(2008, 9, 1, 0, 0)']
)
def test_values_with_extra(self):
"""
Regression test for #10256... If there is a values() clause, Extra
columns are only returned if they are explicitly mentioned.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values()),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra clauses after an empty values clause are still included
self.assertEqual(
list(TestObject.objects.values().extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[{'bar': 'second', 'third': 'third', 'second': 'second', 'whiz': 'third', 'foo': 'first', 'id': obj.pk, 'first': 'first'}]
)
# Extra columns are ignored if not mentioned in the values() clause
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values('first', 'second')),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns after a non-empty values() clause are ignored
self.assertEqual(
list(TestObject.objects.values('first', 'second').extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[{'second': 'second', 'first': 'first'}]
)
# Extra columns can be partially returned
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values('first', 'second', 'foo')),
[{'second': 'second', 'foo': 'first', 'first': 'first'}]
)
# Also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values('foo', 'whiz')),
[{'foo': 'first', 'whiz': 'third'}]
)
# Values list works the same way
# All columns are returned for an empty values_list()
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list()),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns after an empty values_list() are still included
self.assertEqual(
list(TestObject.objects.values_list().extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[('first', 'second', 'third', obj.pk, 'first', 'second', 'third')]
)
# Extra columns ignored completely if not mentioned in values_list()
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('first', 'second')),
[('first', 'second')]
)
# Extra columns after a non-empty values_list() clause are ignored completely
self.assertEqual(
list(TestObject.objects.values_list('first', 'second').extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third'))))),
[('first', 'second')]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('second', flat=True)),
['second']
)
# Only the extra columns specified in the values_list() are returned
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('first', 'second', 'whiz')),
[('first', 'second', 'third')]
)
# ...also works if only extra columns are included
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('foo','whiz')),
[('first', 'third')]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('whiz', flat=True)),
['third']
)
# ... and values are returned in the order they are specified
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('whiz','foo')),
[('third', 'first')]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('first','id')),
[('first', obj.pk)]
)
self.assertEqual(
list(TestObject.objects.extra(select=SortedDict((('foo','first'), ('bar','second'), ('whiz','third')))).values_list('whiz', 'first', 'bar', 'id')),
[('third', 'first', 'second', obj.pk)]
)
def test_regression_10847(self):
"""
Regression for #10847: the list of extra columns can always be
accurately evaluated. Using an inner query ensures that as_sql() is
producing correct output without requiring full evaluation and
execution of the inner query.
"""
obj = TestObject(first='first', second='second', third='third')
obj.save()
self.assertEqual(
list(TestObject.objects.extra(select={'extra': 1}).values('pk')),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.extra(select={'extra': 1}).values('pk')
),
['<TestObject: TestObject: first,second,third>']
)
self.assertEqual(
list(TestObject.objects.values('pk').extra(select={'extra': 1})),
[{'pk': obj.pk}]
)
self.assertQuerysetEqual(
TestObject.objects.filter(
pk__in=TestObject.objects.values('pk').extra(select={'extra': 1})
),
['<TestObject: TestObject: first,second,third>']
)
self.assertQuerysetEqual(
TestObject.objects.filter(pk=obj.pk) |
TestObject.objects.extra(where=["id > %s"], params=[obj.pk]),
['<TestObject: TestObject: first,second,third>']
)
def test_regression_17877(self):
"""
Ensure that extra WHERE clauses get correctly ANDed, even when they
contain OR operations.
"""
# Test Case 1: should appear in queryset.
t = TestObject(first='a', second='a', third='a')
t.save()
# Test Case 2: should appear in queryset.
t = TestObject(first='b', second='a', third='a')
t.save()
# Test Case 3: should not appear in queryset, bug case.
t = TestObject(first='a', second='a', third='b')
t.save()
# Test Case 4: should not appear in queryset.
t = TestObject(first='b', second='a', third='b')
t.save()
# Test Case 5: should not appear in queryset.
t = TestObject(first='b', second='b', third='a')
t.save()
# Test Case 6: should not appear in queryset, bug case.
t = TestObject(first='a', second='b', third='b')
t.save()
self.assertQuerysetEqual(
TestObject.objects.extra(
where=["first = 'a' OR second = 'a'", "third = 'a'"],
),
['<TestObject: TestObject: a,a,a>', '<TestObject: TestObject: b,a,a>'],
ordered=False
)
| bsd-3-clause | 247,268,189,135,698,620 | 39.641834 | 159 | 0.55062 | false |
Huyuwei/tvm | topi/python/topi/image/resize.py | 1 | 7184 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from .. import tag
def resize(data, size, layout="NCHW", method="bilinear", align_corners=True, out_dtype=None):
"""Perform resize operation on the data.
Parameters
----------
inputs : tvm.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
align_corners: Boolean, optional
To preserve the values at the corner pixels.
method: {"bilinear", "nearest_neighbor", "bicubic"}
Method to be used for resizing.
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == 'NHWC':
in_n, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], in_c]
elif layout == 'NCHW':
in_n, in_c, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], in_cc]
if align_corners:
y_ratio = (in_h - 1).astype('float') / (size[0] - 1)
x_ratio = (in_w - 1).astype('float') / (size[1] - 1)
else:
y_ratio = (in_h).astype('float') / (size[0])
x_ratio = (in_w).astype('float') / (size[1])
def _get_pixel(n, c, y, x, cc):
y = tvm.max(tvm.min(y, in_h - 1), 0)
x = tvm.max(tvm.min(x, in_w - 1), 0)
if layout == 'NHWC':
return data(n, y, x, c).astype('float')
if layout == 'NCHW':
return data(n, c, y, x).astype('float')
# else must be NCHWxc
return data(n, c, y, x, cc).astype('float')
def _get_indices(*indices):
if layout == 'NHWC':
n, y, x, c = indices
cc = None
elif layout == 'NCHW':
n, c, y, x = indices
cc = None
else:
n, c, y, x, cc = indices
return n, c, y, x, cc
def _cast_output(value):
if out_dtype:
dtype = out_dtype
else:
dtype = data.dtype
return value.astype(dtype)
# Nearest neighbor computation
def _nearest_neighbor(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
if align_corners:
yint = tvm.round(in_y).astype('int32')
xint = tvm.round(in_x).astype('int32')
else:
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
yint = tvm.floor(in_y + epsilon).astype('int32')
xint = tvm.floor(in_x + epsilon).astype('int32')
return _cast_output(_get_pixel(n, c, yint, xint, cc))
# Bilinear helper functions and computation.
def _lerp(A, B, t):
return A * (1.0 - t) + B * t
def _bilinear(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
p00 = _get_pixel(n, c, yint, xint, cc)
p10 = _get_pixel(n, c, yint, xint + 1, cc)
p01 = _get_pixel(n, c, yint + 1, xint, cc)
p11 = _get_pixel(n, c, yint + 1, xint + 1, cc)
col0 = _lerp(p00, p10, xfract)
col1 = _lerp(p01, p11, xfract)
value = _lerp(col0, col1, yfract)
return _cast_output(value)
# Bicubic helper function and computation.
def _cubic_kernel(A, B, C, D, t):
a = -A / 2.0 + (3.0*B) / 2.0 - (3.0*C) / 2.0 + D / 2.0
b = A - (5.0*B) / 2.0 + 2.0*C - D / 2.0
c = -A / 2.0 + C / 2.0
d = B
return a*t*t*t + b*t*t + c*t + d
def _bicubic(*indices):
n, c, y, x, cc = _get_indices(*indices)
in_y = y_ratio * y
in_x = x_ratio * x
xint = tvm.floor(in_x).astype('int32')
xfract = in_x - tvm.floor(in_x)
yint = tvm.floor(in_y).astype('int32')
yfract = in_y - tvm.floor(in_y)
# 1st row
p00 = _get_pixel(n, c, yint - 1, xint - 1, cc)
p10 = _get_pixel(n, c, yint - 1, xint + 0, cc)
p20 = _get_pixel(n, c, yint - 1, xint + 1, cc)
p30 = _get_pixel(n, c, yint - 1, xint + 2, cc)
# 2nd row
p01 = _get_pixel(n, c, yint + 0, xint - 1, cc)
p11 = _get_pixel(n, c, yint + 0, xint + 0, cc)
p21 = _get_pixel(n, c, yint + 0, xint + 1, cc)
p31 = _get_pixel(n, c, yint + 0, xint + 2, cc)
# 3rd row
p02 = _get_pixel(n, c, yint + 1, xint - 1, cc)
p12 = _get_pixel(n, c, yint + 1, xint + 0, cc)
p22 = _get_pixel(n, c, yint + 1, xint + 1, cc)
p32 = _get_pixel(n, c, yint + 1, xint + 2, cc)
# 4th row
p03 = _get_pixel(n, c, yint + 2, xint - 1, cc)
p13 = _get_pixel(n, c, yint + 2, xint + 0, cc)
p23 = _get_pixel(n, c, yint + 2, xint + 1, cc)
p33 = _get_pixel(n, c, yint + 2, xint + 2, cc)
# Interpolate bicubically
col0 = _cubic_kernel(p00, p10, p20, p30, xfract)
col1 = _cubic_kernel(p01, p11, p21, p31, xfract)
col2 = _cubic_kernel(p02, p12, p22, p32, xfract)
col3 = _cubic_kernel(p03, p13, p23, p33, xfract)
value = _cubic_kernel(col0, col1, col2, col3, yfract)
return _cast_output(value)
# Determine which interpolation method to use then run it.
if method == "nearest_neighbor":
compute_func = _nearest_neighbor
elif method == "bilinear":
compute_func = _bilinear
elif method == "bicubic":
compute_func = _bicubic
else:
raise ValueError('%s method is not supported.' % method)
return tvm.compute(output_shape, compute_func, name='resize', tag=tag.INJECTIVE)
| apache-2.0 | 8,464,358,940,680,116,000 | 32.886792 | 96 | 0.549415 | false |
yelizariev/addons-yelizariev | web_debranding/__manifest__.py | 1 | 1144 | # Copyright 2015-2020 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# Copyright 2017 Ilmir Karamov <https://it-projects.info/team/ilmir-k>
# Copyright 2018-2019 Kolushov Alexandr <https://it-projects.info/team/KolushovAlexandr>
# Copyright 2018 Ildar Nasyrov <https://it-projects.info/team/iledarn>
# Copyright 2018 WohthaN <https://github.com/WohthaN>
# Copyright 2019 Eugene Molotov <https://github.com/em230418>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
"name": "Backend debranding",
"version": "12.0.1.0.29",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "LGPL-3",
"category": "Debranding",
"images": ["images/web_debranding.png"],
"website": "https://twitter.com/yelizariev",
"price": 250.00,
"currency": "EUR",
"depends": ["web", "mail", "access_settings_menu"],
"data": ["data.xml", "views.xml", "js.xml", "pre_install.xml"],
"qweb": ["static/src/xml/web.xml"],
"post_load": "post_load",
"auto_install": False,
"uninstall_hook": "uninstall_hook",
"installable": True,
"saas_demo_title": "Backend debranding demo",
}
| lgpl-3.0 | -7,172,393,133,491,129,000 | 43 | 88 | 0.664336 | false |
ictofnwi/coach | dashboard/views.py | 1 | 19639 | import random
import re
import json
import pytz
import dateutil.parser
from datetime import datetime, timedelta
from pprint import pformat
from hashlib import md5
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render, redirect
from django.conf import settings
from django.template import RequestContext, loader
from django.db.models import Q
from models import Activity, Recommendation, LogEvent, GroupAssignment
from recommendation import recommend
from tincan_api import TinCan
from helpers import *
# Fetch TinCan credentials from settings
USERNAME = settings.TINCAN['username']
PASSWORD = settings.TINCAN['password']
ENDPOINT = settings.TINCAN['endpoint']
# Reference to TinCan verbs
COMPLETED = TinCan.VERBS['completed']['id']
PROGRESSED = TinCan.VERBS['progressed']['id']
# Reference to TinCan activity types
ASSESSMENT = TinCan.ACTIVITY_TYPES['assessment']
MEDIA = TinCan.ACTIVITY_TYPES['media']
QUESTION = TinCan.ACTIVITY_TYPES['question']
# Reference to progress URI in result/extension
PROGRESS_T = "http://uva.nl/coach/progress"
# Default barcode height
BARCODE_HEIGHT = 35
## Decorators
def identity_required(func):
def inner(request, *args, **kwargs):
# Fetch email from GET paramaters if present and store in session.
paramlist = request.GET.get('paramlist', None)
email = request.GET.get('email', None)
param_hash = request.GET.get('hash', None)
if paramlist is not None:
hash_contents = []
for param in paramlist.split(","):
if param == "pw":
hash_contents.append(settings.AUTHENTICATION_SECRET)
else:
hash_contents.append(request.GET.get(param, ""))
hash_string = md5(",".join(hash_contents)).hexdigest().upper()
if hash_string == param_hash and email is not None and email != "":
request.session['user'] = "mailto:%s" % (email, )
# Fetch user from session
user = request.session.get('user', None)
# If no user is specified, show information on how to login
if user is None:
return render(request, 'dashboard/loginfirst.html', {})
else:
return func(request, *args, **kwargs)
return inner
def check_group(func):
"""Decorator to check the group for A/B testing.
Users in group A see the dashboard and users in group B do not.
Users that are in no group will be assigned one, so that both groups differ
at most 1 in size. If both groups are the same size, the group will be
assigned pseudorandomly.
"""
def inner(request, *args, **kwargs):
# Fetch user from session
user = request.session.get('user', None)
# Case 1: Existing user
try:
assignment = GroupAssignment.objects.get(user=user)
if assignment.group == 'A':
return func(request, *args, **kwargs)
else:
return HttpResponse()
# Case 2: New user
except ObjectDoesNotExist:
# Case 2a: First half of new pair,
# randomly pick A or B for this user.
if GroupAssignment.objects.count() % 2 == 0:
group = random.choice(['A', 'B'])
if group == 'A':
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
else:
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
# Case 2b: Second half of new pair,
# choose the group that was not previously chosen.
else:
try:
last_group = GroupAssignment.objects.order_by('-id')[0].group
except:
last_group = random.choice(['A', 'B'])
if last_group == 'A':
assignment = GroupAssignment(user=user, group='B')
assignment.save()
return HttpResponse()
else:
assignment = GroupAssignment(user=user, group='A')
assignment.save()
return func(request, *args, **kwargs)
return inner
## Bootstrap
def bootstrap(request):
width = request.GET.get('width',0)
template = loader.get_template('dashboard/bootstrap.js')
return HttpResponse(
template.render(RequestContext(
request,
{ 'host': request.get_host(), 'width': width }
)),
content_type="application/javascript"
)
def bootstrap_recommend(request, milestones):
width = request.GET.get('width',0)
max_recs = int(request.GET.get('max', False))
return render(request, 'dashboard/bootstrap_recommend.js',
{'milestones': milestones,
'max_recs': max_recs,
'width': width,
'host': request.get_host()})
## Debug interface
def log(request):
logs = LogEvent.objects.order_by('-timestamp')[:100]
data = request.GET.get('data',"0") == "1"
return render(request, 'dashboard/log.html',
{ 'logs': logs, 'data': data, 'host': request.get_host()})
## Interface
@identity_required
@check_group
def barcode(request, default_width=170):
"""Return an svg representing progress of an individual vs the group."""
# Fetch user from session
user = request.session.get('user', None)
width = int(request.GET.get('width', default_width))
data = {'width': width, 'height': BARCODE_HEIGHT}
# Add values
markers = {}
activities = Activity.objects.filter(type=ASSESSMENT)
for activity in activities:
if activity.user in markers:
markers[activity.user] += min(80, activity.value)
else:
markers[activity.user] = min(80, activity.value)
if user in markers:
data['user'] = markers[user]
del markers[user]
else:
data['user'] = 0
data['people'] = markers.values()
# Normalise
if len(markers) > 0:
maximum = max(max(data['people']), data['user'])
data['user'] /= maximum
data['user'] *= width
data['user'] = int(data['user'])
for i in range(len(data['people'])):
data['people'][i] /= maximum
data['people'][i] *= width
data['people'][i] = int(data['people'][i])
else:
# if no other persons have been active
# then user is assumed to be in the lead.
# This is regardless if the user has done anything at all.
data['user'] = width
return render(request, 'dashboard/barcode.svg', data)
@identity_required
@check_group
def index(request):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the dashboard
width = request.GET.get("width",300);
activities = Activity.objects.filter(user=user).order_by('time')
statements = map(lambda x: x._dict(), activities)
statements = aggregate_statements(statements)
for statement in statements:
statement['activity'] = fix_url(statement['activity'], request)
statements = split_statements(statements)
assignments = statements['assignments']
assignments.sort(key = lambda x: x['time'], reverse=True)
exercises = statements['exercises']
exercises.sort(key = lambda x: x['value'])
video = statements['video']
video.sort(key = lambda x: x['time'], reverse=True)
template = loader.get_template('dashboard/index.html')
context = RequestContext(request, {
'width': width,
'barcode_height': BARCODE_HEIGHT,
'assignments': assignments,
'exercises': exercises,
'video': video,
'host': request.get_host()
})
response = HttpResponse(template.render(context))
response['Access-Control-Allow-Origin'] = "*"
event = LogEvent(type='D', user=user, data="{}")
event.save()
return response
@identity_required
@check_group
def get_recommendations(request, milestones, max_recommendations=False):
# Fetch user from session
user = request.session.get('user', None)
# Fetch desired width of the recommendations dashboard
width = request.GET.get("width", 300);
# Get maximum recommendations to be showed
max_recommendations = int(request.GET.get('max', max_recommendations))
# Fetch activities that can be perceived as seen by the user
seen = Activity.objects.filter(
Q(verb=COMPLETED) | Q(verb=PROGRESSED),
value__gte=30,
user=user
)
# Futher filter that list to narrow it down to activities that can be
# perceived as being done by the user.
done = seen.filter(value__gte=80)
# Preprocess the seen and done sets to be used later
seen = set(map(lambda x: hash(x.activity), seen))
done = set(map(lambda x: x.activity, done))
# Init dict containing final recommendations
recommendations = {}
# For every milestone we want to make recommendations for:
for milestone in milestones.split(','):
# Alas this is necessary on some servers
milestone = re.sub(r'http(s?):/([^/])',r'http\1://\2',milestone)
# Make sure the milestone is not already passed
if milestone not in done:
# Fetch list of rules from the context of this milestone.
# Rules contain antecedent => consequent associations with a
# certain amount of confidence and support. The antecedent is
# stored as a hash of the activities in the antecedent. The
# consequent is the activity that is recommended if you did the
# activities in the consequent. At the moment only the trail
# recommendation algorithm is used, which has antecedents of only
# one activity. If this was different, the antecedent hash check
# would have to include creating powersets of certain length.
rules = Recommendation.objects.filter(milestone=milestone)
# For each recommendation rule
for rule in rules:
# If the LHS applies and the RHS is not already done
if rule.antecedent_hash in seen and \
rule.consequent not in done:
# If the consequent was already recommended earlier
if rule.consequent in recommendations:
# Fetch earlier recommendation
earlier_rule = recommendations[rule.consequent]
# Calculate the original total by with the support was
# divided in order to get the confidence of the
# the earlier recommendation
earlier_total = earlier_rule['support']
earlier_total /= float(earlier_rule['confidence'])
total = earlier_total + rule.support/rule.confidence
# Calculate combined values
support = earlier_rule['support'] + rule.support
confidence = support / float(total)
score = f_score(confidence, support, beta=1.5)
# Update the earlier recommendation to combine both
earlier_rule['support'] = support
earlier_rule['confidence'] = confidence
earlier_rule['score'] = score
# If the consequent is recommended for the first time
else:
# Calculate F-score
score = f_score(rule.confidence, rule.support, beta=1.5)
# Store recommendation for this consequent
recommendations[rule.consequent] = {
'milestone': milestone,
'url': rule.consequent,
'id': rand_id(),
'name': rule.name,
'desc': rule.description,
'm_name': rule.m_name,
'confidence': rule.confidence,
'support': rule.support,
'score': score
}
# Convert to a list of recommendations.
# The lookup per consequent is no longer necessary
recommendations = recommendations.values()
# If recommendations were found
if len(recommendations) > 0:
# Normalise score
max_score = max(map(lambda x: x['score'], recommendations))
for recommendation in recommendations:
recommendation['score'] /= max_score
# Sort the recommendations using their f-scores
recommendations.sort(key = lambda x: x['score'], reverse=True)
# Cap the number of recommendations if applicable.
if max_recommendations:
recommendations = recommendations[:max_recommendations]
# Log Recommendations viewed
data = json.dumps({
"recs": map(lambda x: x['url'], recommendations),
"path": request.path,
"milestone_n": len(milestones.split(',')),
"milestones": milestones})
event = LogEvent(type='V', user=user, data=data)
event.save()
# Render the result
return render(request, 'dashboard/recommend.html',
{'recommendations': recommendations,
'context': event.id,
'width' : width,
'host': request.get_host()})
else:
return HttpResponse()
## Background processes
def cache_activities(request):
"""Create a cache of the Learning Record Store by getting all items since
the most recent one in the cache.
"""
# Dynamic interval retrieval settings
INTERVAL = timedelta(days=1)
EPOCH = datetime(2013, 9, 3, 0, 0, 0, 0, pytz.utc)
# Set aggregate to True if events concerning the same activity-person
# should be aggregated into one row. This has impact for recommendations.
aggregate = False
# Find most recent date
try:
# Selecting the the datetime of the latest stored item minus a margin
# of 6 hours. The margin is there to be slightly more resillient to
# variation (read mistakes) in timezone handling and also to cope with
# the situation that an event was stored later than it occured. The
# latter situation is one of the use cases of the Experience API.
# TODO: The 6 hour margin is arbitrary and a hack.
# We should find a better solution for this.
t1 = Activity.objects.latest('time').time - timedelta(hours=6)
except:
t1 = EPOCH
# Get new data
tincan = TinCan(USERNAME, PASSWORD, ENDPOINT)
statements = tincan.dynamicIntervalStatementRetrieval(t1, INTERVAL)
created_statement_count = 0
for statement in statements:
statement_type = statement['object']['definition']['type']
user = statement['actor']['mbox']
activity = statement['object']['id']
verb = statement['verb']['id']
name = statement['object']['definition']['name']['en-US']
description = statement['object']['definition']['description']['en-US']
time = dateutil.parser.parse(statement['timestamp'])
try:
raw_score = statement['result']['score']['raw']
min_score = statement['result']['score']['min']
max_score = statement['result']['score']['max']
value = 100 * (raw_score - min_score) / max_score
except KeyError:
try:
value = 100 * float(statement['result']['extensions'][PROGRESS_T])
except KeyError:
# If no information is given about the end result then assume a
# perfect score was acquired when the activity was completed,
# and no score otherwise.
if verb == COMPLETED:
value = 100
else:
value = 0
if aggregate:
a, created = Activity.objects.get_or_create(user=user,
activity=activity)
# Don't overwrite completed except with other completed events
# and only overwite with more recent timestamp
if created or (time > a.time and
(verb == COMPLETED or a.verb != COMPLETED)):
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
else:
a, created = Activity.objects.get_or_create(user=user,
verb=verb,
activity=activity,
time=time)
if created:
a.verb = verb
a.type = statement_type
a.value = value
a.name = name
a.description = description
a.time = time
a.save()
created_statement_count += 1
data = json.dumps({'t1':t1.isoformat(), 'created':created_statement_count});
event = LogEvent(type='C', user='all', data=data)
event.save()
return HttpResponse()
def generate_recommendations(request):
minsup = int(request.GET.get('minsup', 2))
minconf = int(request.GET.get('minconf', .3))
gamma = int(request.GET.get('gamma', .8))
# Mine recommendations
recommendations, names = recommend(
minsup=minsup,
minconf=minconf,
gamma=gamma
)
# Add recommendations to database
Recommendation.objects.all().delete()
for recommendation in recommendations:
model = Recommendation(
antecedent_hash = hash(recommendation['antecedent']),
confidence = recommendation['confidence'],
support = recommendation['support'],
milestone = recommendation['milestone'],
m_name = names[recommendation['milestone']][0],
name = names[recommendation['consequent']][0],
consequent = recommendation['consequent'],
description = names[recommendation['consequent']][1])
model.save()
event = LogEvent(type='G', user='all', data=json.dumps(recommendations))
event.save()
return HttpResponse(pformat(recommendations))
@identity_required
def track(request, defaulttarget='index.html'):
"""Track user clicks so that we may be able to improve recommendation
relevance in the future.
"""
# Fetch user from session
user = request.session.get('user', None)
# Fetch target URL from GET parameters
target = request.GET.get('target', defaulttarget)
# Fetch context log id from GET paramaters
context = request.GET.get('context', None)
if context is not None:
try:
context = LogEvent.objects.get(pk=int(context))
except LogEvent.DoesNotExist:
context = None
event = LogEvent(type='T', user=user, data=target, context=context)
event.save()
return redirect(fix_url(target, request))
| agpl-3.0 | -7,548,557,729,014,595,000 | 38.594758 | 82 | 0.585671 | false |
mikacousin/olc | src/ascii_load.py | 1 | 25915 | """ASCII file: Load functions"""
import array
from olc.channel_time import ChannelTime
from olc.cue import Cue
from olc.define import MAX_CHANNELS, NB_UNIVERSES, App
from olc.device import Device, Parameter, Template
from olc.group import Group
from olc.independent import Independent
from olc.master import Master
from olc.sequence import Sequence
from olc.step import Step
def get_time(string):
"""String format : [[hours:]minutes:]seconds[.tenths]
Return time in seconds
"""
if ":" in string:
tsplit = string.split(":")
if len(tsplit) == 2:
time = int(tsplit[0]) * 60 + float(tsplit[1])
elif len(tsplit) == 3:
time = int(tsplit[0]) * 3600 + int(tsplit[1]) * 60 + float(tsplit[2])
else:
print("Time format Error")
time = 0
else:
time = float(string)
return time
class AsciiParser:
"""Parse ASCII files"""
def __init__(self):
self.default_time = App().settings.get_double("default-time")
def parse(self, readlines):
"""Parse stream"""
flag_seq = False
in_cue = False
flag_patch = False
flag_master = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_parameter = False
type_seq = "MainPlayback"
playback = False
txt = False
t_in = False
t_out = False
d_in = False
d_out = False
wait = False
channels = False
mem = False
channel_time = {}
template = None
devices = {}
parameters = {}
console = ""
item = ""
for line in readlines:
# Remove not needed endline
line = line.replace("\r", "")
line = line.replace("\n", "")
# Marker for end of file
if line[:7].upper() == "ENDDATA":
break
# Console type
if line[:7].upper() == "CONSOLE":
console = line[8:]
# Clear all
if line[:9].upper() == "CLEAR ALL":
del App().memories[:]
del App().chasers[:]
del App().groups[:]
del App().masters[:]
for page in range(2):
for i in range(20):
App().masters.append(Master(page + 1, i + 1, 0, 0))
App().patch.patch_empty()
App().sequence.__init__(1, text="Main Playback")
del App().sequence.steps[1:]
App().independents.__init__()
# Sequence
if line[:9].upper() == "$SEQUENCE":
p = line[10:].split(" ")
if int(p[0]) < 2 and not playback:
playback = True
type_seq = "MainPlayback"
else:
type_seq = "Chaser"
index_seq = int(p[0])
App().chasers.append(Sequence(index_seq, type_seq=type_seq))
del App().chasers[-1].steps[1:]
flag_seq = True
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
# Chasers
if flag_seq and type_seq == "Chaser":
if line[:4].upper() == "TEXT":
App().chasers[-1].text = line[5:]
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
p = line[5:].split(" ")
seq = p[0]
mem = float(p[1])
if in_cue:
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1]
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
cue = Cue(seq, mem, channels, text=txt)
step = Step(
seq,
cue,
time_in=t_in,
time_out=t_out,
delay_out=d_out,
delay_in=d_in,
wait=wait,
text=txt,
)
App().chasers[-1].add_step(step)
in_cue = False
t_out = False
t_in = False
channels = False
# Main Playback
if flag_seq and type_seq == "MainPlayback":
if line[:0] == "!":
flag_seq = False
if line[:3].upper() == "CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[4:])
if line[:4].upper() == "$CUE":
in_cue = True
channels = array.array("B", [0] * MAX_CHANNELS)
mem = float(line[5:])
if in_cue:
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT" and not txt:
txt = line[7:]
if line[:12].upper() == "$$PRESETTEXT":
txt = line[13:]
if line[:4].upper() == "DOWN":
p = line[5:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_out = get_time(time)
if t_out == 0:
t_out = self.default_time
d_out = get_time(delay)
if line[:2].upper() == "UP":
p = line[3:]
time = p.split(" ")[0]
delay = p.split(" ")[1] if len(p.split(" ")) == 2 else "0"
t_in = get_time(time)
if t_in == 0:
t_in = self.default_time
d_in = get_time(delay)
if line[:6].upper() == "$$WAIT":
time = line[7:].split(" ")[0]
wait = get_time(time)
if line[:11].upper() == "$$PARTTIME ":
p = line[11:]
d = p.split(" ")[0]
if d == ".":
d = 0
delay = float(d)
time_str = p.split(" ")[1]
time = get_time(time_str)
if line[:14].upper() == "$$PARTTIMECHAN":
p = line[15:].split(" ")
# We could have several channels
for chan in p:
if chan.isdigit():
channel_time[int(chan)] = ChannelTime(delay, time)
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
# Ignore channels greater than MAX_CHANNELS
if channel < MAX_CHANNELS:
level = int(r[1][1:], 16)
channels[channel - 1] = level
if line[:5].upper() == "$$AL ":
items = line[5:].split(" ")
channel = int(items[0])
if line[:4].upper() == "$$A ":
items = line[4:].split(" ")
channel = int(items[0])
param_number = int(items[1])
value = int(items[2])
if channel < MAX_CHANNELS:
device_number = abs(App().patch.channels[channel - 1][0][0])
device = App().patch.devices[device_number]
param = device.template.parameters.get(param_number)
high_byte = param.offset.get("High Byte")
low_byte = param.offset.get("Low Byte")
parameters[param_number] = {
"high byte": high_byte,
"low byte": low_byte,
"value": value,
}
devices[channel] = parameters
if line == "":
if not wait:
wait = 0.0
if not txt:
txt = ""
if not t_out:
t_out = 5.0
if not t_in:
t_in = 5.0
if not d_in:
d_in = 0.0
if not d_out:
d_out = 0.0
# Create Cue
cue = Cue(0, mem, channels, text=txt, devices=devices)
# Add cue to the list
App().memories.append(cue)
# Create Step
step = Step(
1,
cue,
time_in=t_in,
time_out=t_out,
delay_in=d_in,
delay_out=d_out,
wait=wait,
channel_time=channel_time,
text=txt,
)
# Add Step to the Sequence
App().sequence.add_step(step)
in_cue = False
txt = False
t_out = False
t_in = False
wait = False
mem = False
channels = False
channel_time = {}
devices = {}
parameters = {}
# Dimmers Patch
if line[:11].upper() == "CLEAR PATCH":
flag_seq = False
flag_patch = True
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = False
App().patch.patch_empty() # Empty patch
App().window.channels_view.flowbox.invalidate_filter()
if flag_patch and line[:0] == "!":
flag_patch = False
if line[:7].upper() == "PATCH 1":
for p in line[8:].split(" "):
q = p.split("<")
if q[0]:
r = q[1].split("@")
channel = int(q[0])
output = int(r[0])
univ = int((output - 1) / 512)
level = int(r[1])
if univ < NB_UNIVERSES:
if channel < MAX_CHANNELS:
out = output - (512 * univ)
App().patch.add_output(channel, out, univ, level)
App().window.channels_view.flowbox.invalidate_filter()
else:
print("More than", MAX_CHANNELS, "channels")
else:
print("More than", NB_UNIVERSES, "universes")
# Parameter Definitions
if line[:9].upper() == "$PARAMDEF":
item = line[10:].split(" ")
number = int(item[0])
group = int(item[1])
name = item[2]
name = ""
for i in range(2, len(item)):
name += item[i] + " "
name = name[:-1]
App().parameters[number] = [group, name]
# Device Template
if flag_template:
if line[:0] == "!":
flag_template = False
if line[:14].upper() == "$$MANUFACTURER":
template.manufacturer = line[15:]
if line[:11].upper() == "$$MODELNAME":
template.model_name = line[12:]
if line[:10].upper() == "$$MODENAME":
template.mode_name = line[11:]
if line[:10].upper() == "$$COLORCAL":
pass
if line[:11].upper() == "$$FOOTPRINT":
template.footprint = int(line[12:])
if line[:11].upper() == "$$PARAMETER":
item = line[12:].split(" ")
param_number = int(item[0])
# param_type = int(item[1])
# param_xfade = int(item[2])
parameter = Parameter(param_number)
flag_parameter = True
if flag_parameter:
if line[:8].upper() == "$$OFFSET":
item = line[9:].split(" ")
parameter.offset = {
"High Byte": int(item[0]),
"Low Byte": int(item[1]),
"Step": int(item[2]),
}
if line[:9].upper() == "$$DEFAULT":
parameter.default = int(line[10:])
if line[:11].upper() == "$$HIGHLIGHT":
parameter.highlight = int(line[12:])
if line[:7].upper() == "$$TABLE":
item = line[8:].split(" ")
start = int(item[0])
stop = int(item[1])
flags = int(item[2])
range_name = ""
for i in range(3, len(item)):
range_name += item[i] + " "
range_name = range_name[:-1]
parameter.table.append([start, stop, flags, range_name])
if line[:8].upper() == "$$RANGE ":
item = line[8:].split(" ")
percent = int(item[2]) == 1
parameter.range = {
"Minimum": int(item[0]),
"Maximum": int(item[1]),
"Percent": percent,
}
if line[:12].upper() == "$$RANGEGROUP":
pass
if line == "":
template.parameters[parameter.number] = parameter
flag_parameter = False
if line[:9].upper() == "$TEMPLATE":
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = False
flag_template = True
name = line[10:]
template = Template(name)
App().templates.append(template)
# Devices
if line[:8].upper() == "$DEVICE ":
item = line[8:].split(" ")
channel = int(item[0])
output = int(item[1])
universe = int((output - 1) / 512)
output = output - (512 * universe)
template = ""
for i in range(6, len(item)):
template += item[i] + " "
template = template[:-1]
if channel < MAX_CHANNELS and universe < NB_UNIVERSES:
device = Device(channel, output, universe, template)
App().patch.add_device(device)
# Presets not in sequence
if line[:5].upper() == "GROUP" and console == "CONGO":
# On Congo, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_preset = True
flag_template = False
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[6:])
if line[:7].upper() == "$PRESET" and (console in ("DLIGHT", "VLC")):
# On DLight, Preset not in sequence
flag_seq = False
flag_patch = False
flag_master = False
flag_group = False
flag_inde = False
flag_template = False
flag_preset = True
channels = array.array("B", [0] * MAX_CHANNELS)
preset_nb = float(line[8:])
if flag_preset:
if line[:1] == "!":
flag_preset = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
# Find Preset's position
found = False
i = 0
for i, _ in enumerate(App().memories):
if App().memories[i].memory > preset_nb:
found = True
break
if not found:
# Preset is at the end
i += 1
if not txt:
txt = ""
# Create Preset
cue = Cue(0, preset_nb, channels, text=txt)
# Add preset to the list
App().memories.insert(i, cue)
flag_preset = False
txt = ""
# Groups
if line[:5].upper() == "GROUP" and console != "CONGO":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[6:])
if line[:6].upper() == "$GROUP":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_inde = False
flag_template = False
flag_group = True
channels = array.array("B", [0] * MAX_CHANNELS)
group_nb = float(line[7:])
if flag_group:
if line[:1] == "!":
flag_group = False
if line[:4].upper() == "TEXT":
txt = line[5:]
if line[:6].upper() == "$$TEXT":
txt = line[7:]
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if line == "":
if not txt:
txt = ""
# We don't create a group who already exist
group_exist = False
for grp in App().groups:
if group_nb == grp.index:
group_exist = True
if not group_exist:
App().groups.append(Group(group_nb, channels, txt))
flag_group = False
txt = ""
# Masters
if flag_master:
if line[:1] == "!":
flag_master = False
if line[:4].upper() == "CHAN":
p = line[5:].split(" ")
for q in p:
r = q.split("/")
if r[0] != "":
channel = int(r[0])
level = int(r[1][1:], 16)
if channel <= MAX_CHANNELS:
channels[channel - 1] = level
if (line == "" or line[:13].upper() == "$MASTPAGEITEM") and int(
item[1]
) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], channels
)
flag_master = False
if line[:13].upper() == "$MASTPAGEITEM":
item = line[14:].split(" ")
# DLight use Type "2" for Groups
if console == "DLIGHT" and item[2] == "2":
item[2] = "13"
if item[2] == "2":
flag_seq = False
flag_patch = False
flag_group = False
flag_preset = False
flag_inde = False
flag_template = False
flag_master = True
channels = array.array("B", [0] * MAX_CHANNELS)
# Only 20 Masters per pages
elif int(item[1]) <= 20:
index = int(item[1]) - 1 + ((int(item[0]) - 1) * 20)
App().masters[index] = Master(
int(item[0]), int(item[1]), item[2], item[3]
)
# Independents
if line[:16].upper() == "$SPECIALFUNCTION":
flag_seq = False
flag_patch = False
flag_master = False
flag_preset = False
flag_group = False
flag_template = False
flag_inde = True
channels = array.array("B", [0] * MAX_CHANNELS)
text = ""
items = line[17:].split(" ")
number = int(items[0])
# Parameters not implemented:
# ftype = items[1] # 0: inclusive, 1: Inhibit, 2: Exclusive
# button_mode = items[2] # 0: Momentary, 1: Toggling
if flag_inde:
if line[:1] == "!":
flag_inde = False
if line[:4].upper() == "TEXT":
text = line[5:]
if line[:6].upper() == "$$TEXT" and not text:
text = line[7:]
if line[:4].upper() == "CHAN":
chan_list = line[5:].split(" ")
for channel in chan_list:
item = channel.split("/")
if item[0]:
chan = int(item[0])
level = int(item[1][1:], 16)
if chan <= MAX_CHANNELS:
channels[chan - 1] = level
if line == "":
inde = Independent(number, text=text, levels=channels)
App().independents.update(inde)
flag_inde = False
# MIDI mapping
if line[:10].upper() == "$$MIDINOTE":
item = line[11:].split(" ")
App().midi.midi_notes.update({item[0]: [int(item[1]), int(item[2])]})
if line[:8].upper() == "$$MIDICC":
item = line[9:].split(" ")
App().midi.midi_cc.update({item[0]: [int(item[1]), int(item[2])]})
| gpl-3.0 | -7,586,853,367,817,942,000 | 41.001621 | 88 | 0.353849 | false |
mementum/backtrader | samples/vctest/vctest.py | 1 | 15011 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
# The above could be sent to an independent module
import backtrader as bt
from backtrader.utils import flushfile # win32 quick stdout flushing
from backtrader.utils.py3 import string_types
class TestStrategy(bt.Strategy):
params = dict(
smaperiod=5,
trade=False,
stake=10,
exectype=bt.Order.Market,
stopafter=0,
valid=None,
cancel=0,
donotsell=False,
price=None,
pstoplimit=None,
)
def __init__(self):
# To control operation entries
self.orderid = list()
self.order = None
self.counttostop = 0
self.datastatus = 0
# Create SMA on 2nd data
self.sma = bt.indicators.MovAv.SMA(self.data, period=self.p.smaperiod)
print('--------------------------------------------------')
print('Strategy Created')
print('--------------------------------------------------')
def notify_data(self, data, status, *args, **kwargs):
print('*' * 5, 'DATA NOTIF:', data._getstatusname(status), *args)
if status == data.LIVE:
self.counttostop = self.p.stopafter
self.datastatus = 1
def notify_store(self, msg, *args, **kwargs):
print('*' * 5, 'STORE NOTIF:', msg)
def notify_order(self, order):
if order.status in [order.Completed, order.Cancelled, order.Rejected]:
self.order = None
print('-' * 50, 'ORDER BEGIN', datetime.datetime.now())
print(order)
print('-' * 50, 'ORDER END')
def notify_trade(self, trade):
print('-' * 50, 'TRADE BEGIN', datetime.datetime.now())
print(trade)
print('-' * 50, 'TRADE END')
def prenext(self):
self.next(frompre=True)
def next(self, frompre=False):
txt = list()
txt.append('%04d' % len(self))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('%s' % self.data.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data.open[0]))
txt.append('{}'.format(self.data.high[0]))
txt.append('{}'.format(self.data.low[0]))
txt.append('{}'.format(self.data.close[0]))
txt.append('{}'.format(self.data.volume[0]))
txt.append('{}'.format(self.data.openinterest[0]))
txt.append('{}'.format(self.sma[0]))
print(', '.join(txt))
if len(self.datas) > 1:
txt = list()
txt.append('%04d' % len(self))
dtfmt = '%Y-%m-%dT%H:%M:%S.%f'
txt.append('%s' % self.data1.datetime.datetime(0).strftime(dtfmt))
txt.append('{}'.format(self.data1.open[0]))
txt.append('{}'.format(self.data1.high[0]))
txt.append('{}'.format(self.data1.low[0]))
txt.append('{}'.format(self.data1.close[0]))
txt.append('{}'.format(self.data1.volume[0]))
txt.append('{}'.format(self.data1.openinterest[0]))
txt.append('{}'.format(float('NaN')))
print(', '.join(txt))
if self.counttostop: # stop after x live lines
self.counttostop -= 1
if not self.counttostop:
self.env.runstop()
return
if not self.p.trade:
return
# if True and len(self.orderid) < 1:
if self.datastatus and not self.position and len(self.orderid) < 1:
self.order = self.buy(size=self.p.stake,
exectype=self.p.exectype,
price=self.p.price,
plimit=self.p.pstoplimit,
valid=self.p.valid)
self.orderid.append(self.order)
elif self.position.size > 0 and not self.p.donotsell:
if self.order is None:
size = self.p.stake // 2
if not size:
size = self.position.size # use the remaining
self.order = self.sell(size=size, exectype=bt.Order.Market)
elif self.order is not None and self.p.cancel:
if self.datastatus > self.p.cancel:
self.cancel(self.order)
if self.datastatus:
self.datastatus += 1
def start(self):
header = ['Datetime', 'Open', 'High', 'Low', 'Close', 'Volume',
'OpenInterest', 'SMA']
print(', '.join(header))
self.done = False
def runstrategy():
args = parse_args()
# Create a cerebro
cerebro = bt.Cerebro()
storekwargs = dict()
if not args.nostore:
vcstore = bt.stores.VCStore(**storekwargs)
if args.broker:
brokerargs = dict(account=args.account, **storekwargs)
if not args.nostore:
broker = vcstore.getbroker(**brokerargs)
else:
broker = bt.brokers.VCBroker(**brokerargs)
cerebro.setbroker(broker)
timeframe = bt.TimeFrame.TFrame(args.timeframe)
if args.resample or args.replay:
datatf = bt.TimeFrame.Ticks
datacomp = 1
else:
datatf = timeframe
datacomp = args.compression
fromdate = None
if args.fromdate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.fromdate))
fromdate = datetime.datetime.strptime(args.fromdate, dtformat)
todate = None
if args.todate:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.todate))
todate = datetime.datetime.strptime(args.todate, dtformat)
VCDataFactory = vcstore.getdata if not args.nostore else bt.feeds.VCData
datakwargs = dict(
timeframe=datatf, compression=datacomp,
fromdate=fromdate, todate=todate,
historical=args.historical,
qcheck=args.qcheck,
tz=args.timezone
)
if args.nostore and not args.broker: # neither store nor broker
datakwargs.update(storekwargs) # pass the store args over the data
data0 = VCDataFactory(dataname=args.data0, tradename=args.tradename,
**datakwargs)
data1 = None
if args.data1 is not None:
data1 = VCDataFactory(dataname=args.data1, **datakwargs)
rekwargs = dict(
timeframe=timeframe, compression=args.compression,
bar2edge=not args.no_bar2edge,
adjbartime=not args.no_adjbartime,
rightedge=not args.no_rightedge,
)
if args.replay:
cerebro.replaydata(data0, **rekwargs)
if data1 is not None:
cerebro.replaydata(data1, **rekwargs)
elif args.resample:
cerebro.resampledata(data0, **rekwargs)
if data1 is not None:
cerebro.resampledata(data1, **rekwargs)
else:
cerebro.adddata(data0)
if data1 is not None:
cerebro.adddata(data1)
if args.valid is None:
valid = None
else:
try:
valid = float(args.valid)
except:
dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.valid))
valid = datetime.datetime.strptime(args.valid, dtformat)
else:
valid = datetime.timedelta(seconds=args.valid)
# Add the strategy
cerebro.addstrategy(TestStrategy,
smaperiod=args.smaperiod,
trade=args.trade,
exectype=bt.Order.ExecType(args.exectype),
stake=args.stake,
stopafter=args.stopafter,
valid=valid,
cancel=args.cancel,
donotsell=args.donotsell,
price=args.price,
pstoplimit=args.pstoplimit)
# Live data ... avoid long data accumulation by switching to "exactbars"
cerebro.run(exactbars=args.exactbars)
if args.plot and args.exactbars < 1: # plot if possible
cerebro.plot()
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Test Visual Chart 6 integration')
parser.add_argument('--exactbars', default=1, type=int,
required=False, action='store',
help='exactbars level, use 0/-1/-2 to enable plotting')
parser.add_argument('--plot',
required=False, action='store_true',
help='Plot if possible')
parser.add_argument('--stopafter', default=0, type=int,
required=False, action='store',
help='Stop after x lines of LIVE data')
parser.add_argument('--nostore',
required=False, action='store_true',
help='Do not Use the store pattern')
parser.add_argument('--qcheck', default=0.5, type=float,
required=False, action='store',
help=('Timeout for periodic '
'notification/resampling/replaying check'))
parser.add_argument('--no-timeoffset',
required=False, action='store_true',
help=('Do not Use TWS/System time offset for non '
'timestamped prices and to align resampling'))
parser.add_argument('--data0', default=None,
required=True, action='store',
help='data 0 into the system')
parser.add_argument('--tradename', default=None,
required=False, action='store',
help='Actual Trading Name of the asset')
parser.add_argument('--data1', default=None,
required=False, action='store',
help='data 1 into the system')
parser.add_argument('--timezone', default=None,
required=False, action='store',
help='timezone to get time output into (pytz names)')
parser.add_argument('--historical',
required=False, action='store_true',
help='do only historical download')
parser.add_argument('--fromdate',
required=False, action='store',
help=('Starting date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--todate',
required=False, action='store',
help=('End date for historical download '
'with format: YYYY-MM-DD[THH:MM:SS]'))
parser.add_argument('--smaperiod', default=5, type=int,
required=False, action='store',
help='Period to apply to the Simple Moving Average')
pgroup = parser.add_mutually_exclusive_group(required=False)
pgroup.add_argument('--replay',
required=False, action='store_true',
help='replay to chosen timeframe')
pgroup.add_argument('--resample',
required=False, action='store_true',
help='resample to chosen timeframe')
parser.add_argument('--timeframe', default=bt.TimeFrame.Names[0],
choices=bt.TimeFrame.Names,
required=False, action='store',
help='TimeFrame for Resample/Replay')
parser.add_argument('--compression', default=1, type=int,
required=False, action='store',
help='Compression for Resample/Replay')
parser.add_argument('--no-bar2edge',
required=False, action='store_true',
help='no bar2edge for resample/replay')
parser.add_argument('--no-adjbartime',
required=False, action='store_true',
help='no adjbartime for resample/replay')
parser.add_argument('--no-rightedge',
required=False, action='store_true',
help='no rightedge for resample/replay')
parser.add_argument('--broker',
required=False, action='store_true',
help='Use VisualChart as broker')
parser.add_argument('--account', default=None,
required=False, action='store',
help='Choose broker account (else first)')
parser.add_argument('--trade',
required=False, action='store_true',
help='Do Sample Buy/Sell operations')
parser.add_argument('--donotsell',
required=False, action='store_true',
help='Do not sell after a buy')
parser.add_argument('--exectype', default=bt.Order.ExecTypes[0],
choices=bt.Order.ExecTypes,
required=False, action='store',
help='Execution to Use when opening position')
parser.add_argument('--price', default=None, type=float,
required=False, action='store',
help='Price in Limit orders or Stop Trigger Price')
parser.add_argument('--pstoplimit', default=None, type=float,
required=False, action='store',
help='Price for the limit in StopLimit')
parser.add_argument('--stake', default=10, type=int,
required=False, action='store',
help='Stake to use in buy operations')
parser.add_argument('--valid', default=None,
required=False, action='store',
help='Seconds or YYYY-MM-DD')
parser.add_argument('--cancel', default=0, type=int,
required=False, action='store',
help=('Cancel a buy order after n bars in operation,'
' to be combined with orders like Limit'))
return parser.parse_args()
if __name__ == '__main__':
runstrategy()
| gpl-3.0 | 3,710,281,282,891,024,000 | 35.612195 | 79 | 0.541136 | false |
dansanderson/picotool | tests/pico8/music/music_test.py | 1 | 3789 | #!/usr/bin/env python3
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from pico8.music import music
VALID_MUSIC_LINES = [b'00 41424344\n'] * 64
class TestMusic(unittest.TestCase):
def testFromLines(self):
m = music.Music.from_lines(VALID_MUSIC_LINES, 4)
self.assertEqual(b'\x41\x42\x43\x44' * 64, m._data)
self.assertEqual(4, m._version)
def testToLines(self):
m = music.Music.from_lines(VALID_MUSIC_LINES, 4)
self.assertEqual(list(m.to_lines()), VALID_MUSIC_LINES)
def testSetChannel(self):
m = music.Music.empty(version=4)
m.set_channel(0, 0, 0)
self.assertEqual(b'\x00\x42\x43\x44', m._data[0:4])
m.set_channel(0, 1, 1)
self.assertEqual(b'\x00\x01\x43\x44', m._data[0:4])
m.set_channel(0, 2, 2)
self.assertEqual(b'\x00\x01\x02\x44', m._data[0:4])
m.set_channel(0, 3, 3)
self.assertEqual(b'\x00\x01\x02\x03', m._data[0:4])
m.set_channel(1, 0, 0)
self.assertEqual(b'\x00\x42\x43\x44', m._data[4:8])
m.set_channel(0, 0, None)
m.set_channel(0, 1, None)
m.set_channel(0, 2, None)
m.set_channel(0, 3, None)
self.assertEqual(b'\x41\x42\x43\x44', m._data[0:4])
def testGetChannel(self):
m = music.Music.empty(version=4)
self.assertIsNone(m.get_channel(0, 0))
m.set_channel(0, 0, 0)
self.assertEqual(0, m.get_channel(0, 0))
self.assertIsNone(m.get_channel(0, 1))
m.set_channel(0, 1, 1)
self.assertEqual(1, m.get_channel(0, 1))
self.assertIsNone(m.get_channel(0, 2))
m.set_channel(0, 2, 2)
self.assertEqual(2, m.get_channel(0, 2))
self.assertIsNone(m.get_channel(0, 3))
m.set_channel(0, 3, 3)
self.assertEqual(3, m.get_channel(0, 3))
self.assertIsNone(m.get_channel(1, 0))
m.set_channel(1, 0, 0)
self.assertEqual(0, m.get_channel(1, 0))
def testSetProperties(self):
m = music.Music.empty(version=4)
m.set_channel(0, 0, 0)
m.set_channel(0, 1, 1)
m.set_channel(0, 2, 2)
m.set_channel(0, 3, 3)
self.assertEqual(b'\x00\x01\x02\x03', m._data[0:4])
m.set_properties(0)
self.assertEqual(b'\x00\x01\x02\x03', m._data[0:4])
m.set_properties(0, begin=True)
self.assertEqual(b'\x80\x01\x02\x03', m._data[0:4])
m.set_properties(0, end=True)
self.assertEqual(b'\x80\x81\x02\x03', m._data[0:4])
m.set_properties(0, stop=True)
self.assertEqual(b'\x80\x81\x82\x03', m._data[0:4])
m.set_properties(0, begin=False, stop=False)
self.assertEqual(b'\x00\x81\x02\x03', m._data[0:4])
m.set_properties(0, begin=True, end=False)
self.assertEqual(b'\x80\x01\x02\x03', m._data[0:4])
m.set_channel(1, 0, 0)
m.set_channel(1, 1, 1)
m.set_channel(1, 2, 2)
m.set_channel(1, 3, 3)
self.assertEqual(b'\x00\x01\x02\x03', m._data[4:8])
m.set_properties(1, begin=True)
self.assertEqual(b'\x80\x01\x02\x03', m._data[4:8])
def testGetProperties(self):
m = music.Music.empty(version=4)
self.assertEqual((False, False, False), m.get_properties(0))
m.set_properties(0, begin=True)
self.assertEqual((True, False, False), m.get_properties(0))
m.set_properties(0, end=True)
self.assertEqual((True, True, False), m.get_properties(0))
m.set_properties(0, stop=True)
self.assertEqual((True, True, True), m.get_properties(0))
m.set_properties(0, begin=False, stop=False)
self.assertEqual((False, True, False), m.get_properties(0))
if __name__ == '__main__':
unittest.main()
| mit | 476,180,073,317,295,040 | 36.147059 | 68 | 0.584851 | false |
vivisect/synapse | synapse/lib/trigger.py | 1 | 1918 | import logging
import synapse.lib.cache as s_cache
logger = logging.getLogger(__name__)
class Triggers:
def __init__(self):
self._trig_list = []
self._trig_match = s_cache.MatchCache()
self._trig_byname = s_cache.Cache(onmiss=self._onTrigNameMiss)
def clear(self):
'''
Clear all previously registered triggers
'''
self._trig_list = []
self._trig_byname.clear()
def add(self, func, perm):
'''
Add a new callback to the triggers.
Args:
func (function): The function to call
perm (str,dict): The permission tufo
Returns:
(None)
'''
self._trig_list.append((perm, func))
self._trig_byname.clear()
def _onTrigNameMiss(self, name):
retn = []
for perm, func in self._trig_list:
if self._trig_match.match(name, perm[0]):
retn.append((perm, func))
return retn
def _cmpperm(self, perm, must):
for prop, match in must[1].items():
valu = perm[1].get(prop)
if valu is None:
return False
if not self._trig_match.match(valu, match):
return False
return True
def trigger(self, perm, *args, **kwargs):
'''
Fire any matching trigger functions for the given perm.
Args:
perm ((str,dict)): The perm tufo to trigger
*args (list): args list to use calling the trigger function
**kwargs (dict): kwargs dict to use calling the trigger function
Returns:
(None)
'''
for must, func in self._trig_byname.get(perm[0]):
if self._cmpperm(perm, must):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception(e)
| apache-2.0 | -5,746,460,234,072,582,000 | 25.638889 | 79 | 0.519291 | false |
vlegoff/tsunami | src/secondaires/navigation/equipage/postes/chirurgien.py | 1 | 1796 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le poste chirurgien."""
from . import Poste
class Chirurgien(Poste):
"""Classe définissant le poste chirurgien."""
nom = "chirurgien"
autorite = 15
points = 4
nom_parent = "officier"
| bsd-3-clause | -7,182,430,875,838,210,000 | 41.738095 | 79 | 0.764903 | false |
allenai/allennlp | tests/training/metrics/covariance_test.py | 1 | 7843 | import numpy as np
import torch
from torch.testing import assert_allclose
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
# global_distributed_metric,
# run_distributed_test,
)
from allennlp.training.metrics import Covariance
class CovarianceTest(AllenNlpTestCase):
@multi_device
def test_covariance_unmasked_computation(self, device: str):
covariance = Covariance()
batch_size = 100
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
stride = 10
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
# Flatten the predictions and labels thus far, so numpy treats them as
# independent observations.
expected_covariance = np.cov(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
)[0, 1]
covariance(timestep_predictions, timestep_labels)
assert_allclose(expected_covariance, covariance.get_metric())
# Test reset
covariance.reset()
covariance(predictions, labels)
assert_allclose(
np.cov(predictions.view(-1).cpu().numpy(), labels.view(-1).cpu().numpy())[0, 1],
covariance.get_metric(),
)
@multi_device
def test_covariance_masked_computation(self, device: str):
covariance = Covariance()
batch_size = 100
num_labels = 10
predictions = torch.randn(batch_size, num_labels, device=device)
labels = 0.5 * predictions + torch.randn(batch_size, num_labels, device=device)
# Random binary mask
mask = torch.randint(0, 2, size=(batch_size, num_labels), device=device).bool()
stride = 10
for i in range(batch_size // stride):
timestep_predictions = predictions[stride * i : stride * (i + 1), :]
timestep_labels = labels[stride * i : stride * (i + 1), :]
timestep_mask = mask[stride * i : stride * (i + 1), :]
# Flatten the predictions, labels, and mask thus far, so numpy treats them as
# independent observations.
expected_covariance = np.cov(
predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
fweights=mask[: stride * (i + 1), :].view(-1).cpu().numpy(),
)[0, 1]
covariance(timestep_predictions, timestep_labels, timestep_mask)
assert_allclose(expected_covariance, covariance.get_metric())
# Test reset
covariance.reset()
covariance(predictions, labels, mask)
assert_allclose(
np.cov(
predictions.view(-1).cpu().numpy(),
labels.view(-1).cpu().numpy(),
fweights=mask.view(-1).cpu().numpy(),
)[0, 1],
covariance.get_metric(),
)
# Commenting in order to revisit distributed covariance later.
# def test_distributed_covariance(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# # Random binary mask
# mask = torch.randint(0, 2, size=(batch_size, num_labels)).bool()
# expected_covariance = np.cov(
# predictions.view(-1).cpu().numpy(),
# labels.view(-1).cpu().numpy(),
# fweights=mask.view(-1).cpu().numpy(),
# )[0, 1]
# predictions = [predictions[:5], predictions[5:]]
# labels = [labels[:5], labels[5:]]
# mask = [mask[:5], mask[5:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels, "mask": mask}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# Covariance(),
# metric_kwargs,
# expected_covariance,
# exact=(0.0001, 1e-01),
# )
# def test_distributed_covariance_unequal_batches(self):
# batch_size = 10
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# # Random binary mask
# mask = torch.randint(0, 2, size=(batch_size, num_labels)).bool()
# expected_covariance = np.cov(
# predictions.view(-1).cpu().numpy(),
# labels.view(-1).cpu().numpy(),
# fweights=mask.view(-1).cpu().numpy(),
# )[0, 1]
# predictions = [predictions[:6], predictions[6:]]
# labels = [labels[:6], labels[6:]]
# mask = [mask[:6], mask[6:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels, "mask": mask}
# run_distributed_test(
# [-1, -1],
# global_distributed_metric,
# Covariance(),
# metric_kwargs,
# expected_covariance,
# exact=(0.0001, 1e-01),
# )
# def test_multiple_runs(self):
# batch_size = 12
# num_labels = 10
# predictions = torch.randn(batch_size, num_labels)
# labels = 0.5 * predictions + torch.randn(batch_size, num_labels)
# stride = 1
# expected_covariances = []
# for i in range(batch_size // stride):
# timestep_predictions = predictions[stride * i : stride * (i + 1), :]
# timestep_labels = labels[stride * i : stride * (i + 1), :]
# # Flatten the predictions and labels thus far, so numpy treats them as
# # independent observations.
# expected_covariance = np.cov(
# predictions[: stride * (i + 1), :].view(-1).cpu().numpy(),
# labels[: stride * (i + 1), :].view(-1).cpu().numpy(),
# )[0, 1]
# expected_covariances.append(expected_covariance)
# predictions = [predictions[:6], predictions[6:]]
# labels = [labels[:6], labels[6:]]
# metric_kwargs = {"predictions": predictions, "gold_labels": labels}
# run_distributed_test(
# [-1, -1],
# multiple_runs,
# Covariance(),
# batch_size,
# stride,
# metric_kwargs,
# expected_covariances,
# exact=(0.0001, 1e-01),
# )
# def multiple_runs(
# global_rank: int,
# world_size: int,
# gpu_id: Union[int, torch.device],
# covariance: Covariance,
# batch_size: int,
# stride: int,
# metric_kwargs: Dict[str, List[Any]],
# expected_covariances: List[float],
# exact: Union[bool, Tuple[float, float]] = True,
# ):
# kwargs = {}
# # Use the arguments meant for the process with rank `global_rank`.
# for argname in metric_kwargs:
# kwargs[argname] = metric_kwargs[argname][global_rank]
# predictions = kwargs["predictions"]
# labels = kwargs["gold_labels"]
# batch_size = predictions.shape[0]
# stride = stride // world_size
# for i in range(batch_size // stride):
# timestep_predictions = predictions[stride * i : stride * (i + 1), :]
# timestep_labels = labels[stride * i : stride * (i + 1), :]
# # Flatten the predictions and labels thus far, so numpy treats them as
# # independent observations.
# covariance(timestep_predictions, timestep_labels)
# assert_allclose(expected_covariances[i], covariance.get_metric(), rtol=exact[0], atol=exact[1])
| apache-2.0 | -7,190,731,554,335,734,000 | 38.812183 | 105 | 0.553742 | false |
orionzhou/robin | utils/counter.py | 1 | 6732 | """
Counter class for py2.6 back compat.
<http://code.activestate.com/recipes/576611/>
"""
from operator import itemgetter
from heapq import nlargest
from itertools import repeat, ifilter
class Counter(dict):
'''Dict subclass for counting hashable objects. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> Counter('zyzygy')
Counter({'y': 3, 'z': 2, 'g': 1})
'''
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in self.iteritems():
for _ in repeat(None, count):
yield elem
# Override dict methods where the meaning changes for Counter objects.
@classmethod
def fromkeys(cls, iterable, v=None):
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
if iterable is not None:
if hasattr(iterable, 'iteritems'):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_max = max
result = Counter()
for elem in set(self) | set(other):
newcount = _max(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
_min = min
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in ifilter(self.__contains__, other):
newcount = _min(self[elem], other[elem])
if newcount > 0:
result[elem] = newcount
return result
def report(self, sep=", ", percentage=False):
total = sum(self.values())
items = []
for k, v in sorted(self.items(), key=lambda x: -x[-1]):
item = "{0}:{1}".format(k, v)
if percentage:
item += " ({0:.1f}%)".format(v * 100. / total)
items.append(item)
return sep.join(items)
if __name__ == '__main__':
import doctest
print(doctest.testmod())
| gpl-2.0 | -6,222,579,013,777,309,000 | 32 | 85 | 0.531491 | false |
wjakob/layerlab | recipes/utils/materials.py | 1 | 6279 | # Complex-valued IOR curves for a few metals
from scipy import interpolate
lambda_gold = [298.75705, 302.400421, 306.133759, 309.960449, 313.884003, 317.908142,
322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482,
349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625,
381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579,
420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653,
467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769,
527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818,
604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964,
708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157,
855.063293, 885.601257]
eta_gold = [1.795+1.920375j, 1.812+1.92j, 1.822625+1.918875j, 1.83+1.916j,
1.837125+1.911375j, 1.84+1.904j, 1.83425+1.891375j,
1.824+1.878j, 1.812+1.86825j, 1.798+1.86j, 1.782+1.85175j,
1.766+1.846j, 1.7525+1.84525j, 1.74+1.848j, 1.727625+1.852375j,
1.716+1.862j, 1.705875+1.883j, 1.696+1.906j, 1.68475+1.9225j,
1.674+1.936j, 1.666+1.94775j, 1.658+1.956j, 1.64725+1.959375j,
1.636+1.958j, 1.628+1.951375j, 1.616+1.94j, 1.59625+1.9245j,
1.562+1.904j, 1.502125+1.875875j, 1.426+1.846j,
1.345875+1.814625j, 1.242+1.796j, 1.08675+1.797375j,
0.916+1.84j, 0.7545+1.9565j, 0.608+2.12j, 0.49175+2.32625j,
0.402+2.54j, 0.3455+2.730625j, 0.306+2.88j, 0.267625+2.940625j,
0.236+2.97j, 0.212375+3.015j, 0.194+3.06j, 0.17775+3.07j,
0.166+3.15j, 0.161+3.445812j, 0.16+3.8j, 0.160875+4.087687j,
0.164+4.357j, 0.1695+4.610188j, 0.176+4.86j,
0.181375+5.125813j, 0.188+5.39j, 0.198125+5.63125j, 0.21+5.88j]
lambda_aluminium = [298.75705, 302.400421, 306.133759, 309.960449, 313.884003,
317.908142, 322.036835, 326.274139, 330.624481, 335.092377, 339.682678,
344.400482, 349.251221, 354.240509, 359.37442, 364.659332, 370.10202,
375.709625, 381.489777, 387.450562, 393.600555, 399.948975, 406.505493,
413.280579, 420.285339, 427.531647, 435.032196, 442.800629, 450.851562,
459.200653, 467.864838, 476.862213, 486.212463, 495.936707, 506.057861,
516.600769, 527.592224, 539.061646, 551.040771, 563.564453, 576.670593,
590.400818, 604.800842, 619.920898, 635.816284, 652.548279, 670.184753,
688.800964, 708.481018, 729.318665, 751.41925, 774.901123, 799.897949,
826.561157, 855.063293, 885.601257]
eta_aluminium = [(0.273375+3.59375j), (0.28+3.64j), (0.286813+3.689375j),
(0.294+3.74j), (0.301875+3.789375j), (0.31+3.84j),
(0.317875+3.894375j), (0.326+3.95j), (0.33475+4.005j), (0.344+4.06j),
(0.353813+4.11375j), (0.364+4.17j), (0.374375+4.23375j), (0.385+4.3j),
(0.39575+4.365j), (0.407+4.43j), (0.419125+4.49375j), (0.432+4.56j),
(0.445688+4.63375j), (0.46+4.71j), (0.474688+4.784375j), (0.49+4.86j),
(0.506188+4.938125j), (0.523+5.02j), (0.540063+5.10875j), (0.558+5.2j),
(0.577313+5.29j), (0.598+5.38j), (0.620313+5.48j), (0.644+5.58j),
(0.668625+5.69j), (0.695+5.8j), (0.72375+5.915j), (0.755+6.03j),
(0.789+6.15j), (0.826+6.28j), (0.867+6.42j), (0.912+6.55j),
(0.963+6.7j), (1.02+6.85j), (1.08+7j), (1.15+7.15j), (1.22+7.31j),
(1.3+7.48j), (1.39+7.65j), (1.49+7.82j), (1.6+8.01j), (1.74+8.21j),
(1.91+8.39j), (2.14+8.57j), (2.41+8.62j), (2.63+8.6j), (2.8+8.45j),
(2.74+8.31j), (2.58+8.21j), (2.24+8.21j)]
lambda_copper = [302.400421, 306.133759, 309.960449, 313.884003, 317.908142,
322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482,
349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625,
381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579,
420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653,
467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769,
527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818,
604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964,
708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157,
855.063293, 885.601257]
eta_copper = [(1.38+1.687j), (1.358438+1.703313j), (1.34+1.72j),
(1.329063+1.744563j), (1.325+1.77j), (1.3325+1.791625j), (1.34+1.81j),
(1.334375+1.822125j), (1.325+1.834j), (1.317812+1.85175j),
(1.31+1.872j), (1.300313+1.89425j), (1.29+1.916j),
(1.281563+1.931688j), (1.27+1.95j), (1.249062+1.972438j),
(1.225+2.015j), (1.2+2.121562j), (1.18+2.21j), (1.174375+2.177188j),
(1.175+2.13j), (1.1775+2.160063j), (1.18+2.21j), (1.178125+2.249938j),
(1.175+2.289j), (1.172812+2.326j), (1.17+2.362j), (1.165312+2.397625j),
(1.16+2.433j), (1.155312+2.469187j), (1.15+2.504j),
(1.142812+2.535875j), (1.135+2.564j), (1.131562+2.589625j),
(1.12+2.605j), (1.092437+2.595562j), (1.04+2.583j), (0.950375+2.5765j),
(0.826+2.599j), (0.645875+2.678062j), (0.468+2.809j),
(0.35125+3.01075j), (0.272+3.24j), (0.230813+3.458187j), (0.214+3.67j),
(0.20925+3.863125j), (0.213+4.05j), (0.21625+4.239563j), (0.223+4.43j),
(0.2365+4.619563j), (0.25+4.817j), (0.254188+5.034125j), (0.26+5.26j),
(0.28+5.485625j), (0.3+5.717j)]
lambda_chrome = [300.194, 307.643005, 316.276001, 323.708008, 333.279999,
341.542999, 351.217987, 362.514984, 372.312012, 385.031006, 396.10202,
409.175018, 424.58902, 438.09201, 455.80899, 471.406982, 490.040009,
512.314026, 532.102966, 558.468018, 582.06604, 610.739014, 700.452026,
815.65802, 826.53302, 849.17804, 860.971985, 885.570984]
eta_chrome = [(0.98+2.67j), (1.02+2.76j), (1.06+2.85j), (1.12+2.95j),
(1.18+3.04j), (1.26+3.12j), (1.33+3.18j), (1.39+3.24j), (1.43+3.31j),
(1.44+3.4j), (1.48+3.54j), (1.54+3.71j), (1.65+3.89j), (1.8+4.06j),
(1.99+4.22j), (2.22+4.36j), (2.49+4.44j), (2.75+4.46j), (2.98+4.45j),
(3.18+4.41j), (3.34+4.38j), (3.48+4.36j), (3.84+4.37j), (4.23+4.34j),
(4.27+4.33j), (4.31+4.32j), (4.33+4.32j), (4.38+4.31j)]
gold = interpolate.interp1d(lambda_gold, eta_gold, kind='cubic')
copper = interpolate.interp1d(lambda_copper, eta_copper, kind='cubic')
aluminium = interpolate.interp1d(lambda_aluminium, eta_aluminium, kind='cubic')
chrome = interpolate.interp1d(lambda_chrome, eta_chrome, kind='cubic')
| bsd-2-clause | 1,225,152,362,439,384,800 | 60.558824 | 85 | 0.637363 | false |
angus-ai/angus-jumpingsumo | wrapper.py | 1 | 3347 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import subprocess
import threading
import time
import angus
WIDTH = 640
def img_generator(file_path):
with open(file_path, "rb") as f:
buff = ""
for chunk in f:
buff += chunk
s = buff.find('\xff\xd8')
e = buff.find('\xff\xd9')
if s != -1 and e != -1:
jpg = buff[s:e + 2]
buff = buff[e + 2:]
yield jpg
def command(img, service):
file_path = '/tmp/imgtmp.jpg'
with open(file_path, 'wb') as f:
f.write(img)
job = service.process({'image': open(file_path, 'rb')})
result = job.result['faces']
if len(result) > 0 and result[0]['roi_confidence'] > 0.5:
roi = result[0]['roi']
x = roi[0]
w = roi[2]
cmd_angle = (x + w * 0.5) - WIDTH / 2
print w
if abs(cmd_angle) > WIDTH / 8:
if cmd_angle > 0:
return "Right"
else:
return "Left"
elif w > 100:
return "Back"
elif w < 80:
return "Forw"
return None
def command_loop(singleton, sub, service):
img = singleton[0]
if img is None:
return
cmd = command(img, service)
if cmd == "Right":
sub.stdin.write("u")
sub.stdin.flush()
elif cmd == "Left":
sub.stdin.write("y")
sub.stdin.flush()
elif cmd == "Back":
sub.stdin.write("i")
sub.stdin.flush()
elif cmd == "Forw":
sub.stdin.write("o")
sub.stdin.flush()
def loop(singleton, sub, service):
while True:
command_loop(singleton, sub, service)
# print "Loop"
time.sleep(1)
def launch(input_path, sub, service):
singleton = [None]
count = 0
thread = threading.Thread(target=loop, args=(singleton, sub, service))
thread.daemon = True
thread.start()
for img in img_generator(input_path):
singleton[0] = img
count += 1
if count > 600:
break
sub.stdin.write("q")
sub.stdin.flush()
def main():
os.environ[
'LD_LIBRARY_PATH'] = "../ARSDKBuildUtils/Targets/Unix/Install/lib"
sub = subprocess.Popen(
["./JumpingSumoInterface"],
stdin=subprocess.PIPE,
stdout=None,
stderr=subprocess.STDOUT)
time.sleep(2)
conn = angus.connect()
service = conn.services.get_service('face_detection', 1)
launch("./video_fifo", sub, service)
if __name__ == "__main__":
main()
| apache-2.0 | -9,205,598,472,601,172,000 | 24.356061 | 74 | 0.58052 | false |
nhatbui/pysuite | pookeeper/pookeeper/pookeeper.py | 1 | 7385 | import os
from collections import defaultdict, OrderedDict
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
class ZooKeeper(LineReceiver):
def __init__(self, connection_addr, znodes, ephem_nodes):
self.address = connection_addr
self.znodes = znodes
self.ephem_nodes = ephem_nodes
def connectionMade(self):
self.sendLine("true:Connected")
def connectionLost(self):
# Delete all ephemeral nodes associated
# with this connection/address.
for node in self.ephem_nodes[self.address]:
self.delete_node(node)
del self.ephem_nodes[self.address]
def delete_node(self, node):
# Delete node from parent's children listing
parent, child_name = os.path.split(node)
del self.znodes[parent]['children'][child_name]
# Delete node and all its children :(
stack = [node]
while len(stack):
curr_node = stack.pop()
stack.extend(self.znodes[curr_node]['children'].keys())
# Notify watchers
self.notify_watchers(curr_node)
del self.znodes[curr_node]
def notify_watchers(self, node):
# Notify watchers
while len(self.znodes[node]['watchers']):
watcher = self.znodes[node]['watchers'].pop()
watcher.sendLine('true:WATCHER_NOTICE:DELETED:{}'.format(node))
def lineReceived(self, msg):
# Check command
idx = msg.find(':')
if idx == -1:
self.sendLine('false:bad message')
cmd = msg[:idx]
if cmd == 'CREATE':
self.handle_CREATENODE(msg[(idx+1):])
elif cmd == 'ECREATE':
self.handle_CREATEEPHEMERALNODE(msg[(idx+1):])
elif cmd == 'DELETE':
self.handle_DELETENODE(msg[(idx+1):])
elif cmd == 'EXISTS':
self.handle_EXISTSNODE(msg[(idx+1):])
elif cmd == 'GET':
self.handle_GET(msg[(idx+1):])
elif cmd == 'SET':
self.handle_SET(msg[(idx+1):])
elif cmd == 'CHILDREN':
self.handle_GETCHILDREN(msg[(idx+1):])
elif cmd == 'WATCH':
self.handle_WATCH(msg[(idx+1):])
else:
self.sendLine('false:unknown command')
def handle_CREATENODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check path up to node exists
p, _ = os.path.split(node)
if p not in self.znodes:
self.sendLine('false')
return
# Check if node already exists
if node in self.znodes:
self.sendLine('false:node already exists')
return
parent, child = os.path.split(node)
self.znodes[node] = { 'parent': parent, 'children': {}, 'watchers': []}
self.znodes[parent]['children'][child] = True
self.sendLine('true:CREATED:{}'.format(node))
def handle_CREATEEPHEMERALNODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false:bad node name')
# Check path up to node exists
p, _ = os.path.split(node)
if p not in self.znodes:
self.sendLine('false:path up to node does not exist')
else:
parent, child = os.path.split(node)
self.znodes[node] = { 'parent': parent, 'children': {}, 'watchers': []}
self.znodes[parent]['children'][child] = True
# Add as ephemeral node
self.ephem_nodes[self.address].append(node)
self.sendLine('true:CREATED_ENODE:{}'.format(node))
def handle_DELETENODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
# Delete node from parent's children listing
parent, child_name = os.path.split(node)
del self.znodes[parent]['children'][child_name]
# Delete node and all its children :(
stack = [node]
while len(stack):
curr_node = stack.pop()
stack.extend(self.znodes[curr_node]['children'].keys())
# Notify watchers
while len(self.znodes[curr_node]['watchers']):
watcher = self.znodes[curr_node]['watchers'].pop()
watcher.sendLine('true:WATCHER_NOTICE:DELETED:{}'.format(curr_node))
del self.znodes[curr_node]
self.sendLine('true:DELETED:{}'.format(node))
else:
self.sendLine('false:NOT DELETED:{}'.format(node))
def handle_EXISTSNODE(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.sendLine('true')
else:
self.sendLine('false')
def handle_GET(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.sendLine(self.znodes[node]['data'])
else:
self.sendLine('false')
def handle_SET(self, msg):
idx = msg.find(':')
if idx == -1:
self.sendLine('false')
node = msg[:idx]
data = msg[(idx+1):]
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.znodes[node]['data'] = data
# Notify watchers
while len(self.znodes[node]['watchers']):
watcher = self.znodes[node]['watchers'].pop()
watcher.sendLine('true:WATCHER_NOFITY:CHANGED:{}'.format(node))
self.sendLine('true:SET:{}'.format(node))
else:
self.sendLine('false')
def handle_GETCHILDREN(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false')
# Check that node exists
if node in self.znodes:
self.sendLine(','.join(self.znodes[node]['children'].keys()))
else:
self.sendLine('false')
def handle_WATCH(self, node):
# Check if znode path starts with a slash
if node[0] != '/':
self.sendLine('false:WATCHING:improper naming:{}'.format(node))
# Check that node exists
if node in self.znodes:
self.znodes[node]['watchers'].append(self)
self.sendLine('true:WATCHING:{}'.format(node))
else:
self.sendLine('false:WATCHING:node does not exist:{}'.format(node))
class ZooKeeperFactory(Factory):
def __init__(self):
self.znodes = {'/': { 'parent': None, 'children': OrderedDict(), 'watchers': [] } }
self.ephem_nodes = defaultdict(list)
def buildProtocol(self, addr):
return ZooKeeper(addr, self.znodes, self.ephem_nodes)
if __name__ == '__main__':
reactor.listenTCP(8123, ZooKeeperFactory())
print('Starting on port 8123')
reactor.run()
| mit | -7,960,860,430,897,966,000 | 29.899582 | 91 | 0.55545 | false |
FluidityStokes/fluidity | tests/mms_tracer_P1dg_cdg_diff_steady_3d_cjc_inhNmnnbc/cdg3d.py | 1 | 1504 | import os
from fluidity_tools import stat_parser
from sympy import *
from numpy import array,max,abs
meshtemplate='''
Point(1) = {0.0,0.0,0,0.1};
Extrude {1,0,0} {
Point{1}; Layers{<layers>};
}
Extrude {0,1,0} {
Line{1}; Layers{<layers>};
}
Extrude {0,0,1} {
Surface{5}; Layers{<layers>};
}
//Z-normal surface, z=0
Physical Surface(28) = {5};
//Z-normal surface, z=1
Physical Surface(29) = {27};
//Y-normal surface, y=0
Physical Surface(30) = {14};
//Y-normal surface, y=1
Physical Surface(31) = {22};
//X-normal surface, x=0
Physical Surface(32) = {26};
//X-normal surface, x=1
Physical Surface(33) = {18};
Physical Volume(34) = {1};
'''
def generate_meshfile(name,layers):
geo = meshtemplate.replace('<layers>',str(layers))
open(name+".geo",'w').write(geo)
os.system("gmsh -3 "+name+".geo")
os.system("../../bin/gmsh2triangle "+name+".msh")
def run_test(layers, binary):
'''run_test(layers, binary)
Run a single test of the channel problem. Layers is the number of mesh
points in the cross-channel direction. The mesh is unstructured and
isotropic. binary is a string containing the fluidity command to run.
The return value is the error in u and p at the end of the simulation.'''
generate_meshfile("channel",layers)
os.system(binary+" channel_viscous.flml")
s=stat_parser("channel-flow-dg.stat")
return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1],
s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
| lgpl-2.1 | -6,174,876,977,776,289,000 | 25.857143 | 77 | 0.664229 | false |
hzlf/openbroadcast | website/apps/__rework_in_progress/importer/api.py | 1 | 7486 | from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib.auth.models import User
from django.db.models import Count
import json
from tastypie import fields
from tastypie.authentication import *
from tastypie.authorization import *
from tastypie.resources import ModelResource, Resource, ALL, ALL_WITH_RELATIONS
from tastypie.cache import SimpleCache
from tastypie.utils import trailing_slash
from tastypie.exceptions import ImmediateHttpResponse
from django.http import HttpResponse
from importer.models import Import, ImportFile
from alibrary.api import MediaResource
# file = request.FILES[u'files[]']
class ImportFileResource(ModelResource):
import_session = fields.ForeignKey('importer.api.ImportResource', 'import_session', null=True, full=False)
media = fields.ForeignKey('alibrary.api.MediaResource', 'media', null=True, full=True)
class Meta:
queryset = ImportFile.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
resource_name = 'importfile'
# excludes = ['type','results_musicbrainz']
excludes = ['type',]
authentication = Authentication()
authorization = Authorization()
always_return_data = True
filtering = {
'import_session': ALL_WITH_RELATIONS,
'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
}
def dehydrate(self, bundle):
bundle.data['status'] = bundle.obj.get_status_display().lower();
# offload json parsing to the backend
# TODO: remove in js, enable here
"""
bundle.data['import_tag'] = json.loads(bundle.data['import_tag'])
bundle.data['results_acoustid'] = json.loads(bundle.data['results_acoustid'])
bundle.data['results_musicbrainz'] = json.loads(bundle.data['results_musicbrainz'])
bundle.data['results_discogs'] = json.loads(bundle.data['results_discogs'])
bundle.data['results_tag'] = json.loads(bundle.data['results_tag'])
"""
return bundle
def obj_update(self, bundle, request, **kwargs):
#import time
#time.sleep(3)
return super(ImportFileResource, self).obj_update(bundle, request, **kwargs)
def obj_create(self, bundle, request, **kwargs):
"""
Little switch to play with jquery fileupload
"""
try:
#import_id = request.GET['import_session']
import_id = request.GET.get('import_session', None)
uuid_key = request.GET.get('uuid_key', None)
print "####################################"
print request.FILES[u'files[]']
if import_id:
imp = Import.objects.get(pk=import_id)
bundle.data['import_session'] = imp
elif uuid_key:
imp, created = Import.objects.get_or_create(uuid_key=uuid_key, user=request.user)
bundle.data['import_session'] = imp
else:
bundle.data['import_session'] = None
bundle.data['file'] = request.FILES[u'files[]']
except Exception, e:
print e
return super(ImportFileResource, self).obj_create(bundle, request, **kwargs)
class ImportResource(ModelResource):
files = fields.ToManyField('importer.api.ImportFileResource', 'files', full=True, null=True)
class Meta:
queryset = Import.objects.all()
list_allowed_methods = ['get', 'post']
detail_allowed_methods = ['get', 'post', 'put', 'delete']
#list_allowed_methods = ['get',]
#detail_allowed_methods = ['get',]
resource_name = 'import'
excludes = ['updated',]
include_absolute_url = True
authentication = Authentication()
authorization = Authorization()
always_return_data = True
filtering = {
#'channel': ALL_WITH_RELATIONS,
'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'],
}
def save_related(self, obj):
return True
# additional methods
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/import-all%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('import_all'), name="importer_api_import_all"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/apply-to-all%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('apply_to_all'), name="importer_api_apply_to_all"),
]
def import_all(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
import_session = Import.objects.get(**self.remove_api_resource_names(kwargs))
import_files = import_session.files.filter(status=2)
# first to a batch update
import_files.update(status=6)
# save again to trigger pos-save actions
for import_file in import_files:
import_file.status = 6
import_file.save()
bundle = self.build_bundle(obj=import_session, request=request)
bundle = self.full_dehydrate(bundle)
self.log_throttled_access(request)
return self.create_response(request, bundle)
"""
mass aply import tag
"""
def apply_to_all(self, request, **kwargs):
self.method_check(request, allowed=['post'])
self.is_authenticated(request)
self.throttle_check(request)
import_session = Import.objects.get(**self.remove_api_resource_names(kwargs))
item_id = request.POST.get('item_id', None)
ct = request.POST.get('ct', None)
print 'item_id: %s' % item_id
print 'ct: %s' % ct
if not (ct and item_id):
raise ImmediateHttpResponse(response=HttpResponse(status=410))
import_files = import_session.files.filter(status__in=(2,4))
source = import_files.filter(pk=item_id)
# exclude current one
import_files = import_files.exclude(pk=item_id)
try:
source = source[0]
print source
# print source.import_tag
except:
source = None
if source:
sit = source.import_tag
for import_file in import_files:
dit = import_file.import_tag
if ct == 'artist':
map = ('artist', 'alibrary_artist_id', 'mb_artist_id', 'force_artist')
if ct == 'release':
map = ('release', 'alibrary_release_id', 'mb_release_id', 'force_release')
for key in map:
src = sit.get(key, None)
if src:
dit[key] = src
else:
dit.pop(key, None)
import_file.import_tag = dit
import_file.save()
bundle = self.build_bundle(obj=import_session, request=request)
bundle = self.full_dehydrate(bundle)
self.log_throttled_access(request)
return self.create_response(request, bundle)
| gpl-3.0 | 2,424,500,771,755,327,000 | 31.837719 | 190 | 0.56786 | false |
crempp/mdweb | mdweb/SiteMapView.py | 1 | 2696 | """MDWeb SiteMap View Object."""
import datetime
import logging
import numbers
import os
import pytz
import time
from flask import (
current_app as app,
make_response,
render_template_string,
url_for,
)
from flask.views import View
#: Template string to use for the sitemap generation
# (is there a better place to put this?, not in the theme)
# pylint: disable=C0301
SITEMAP_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
{% for page in pages -%}
<url>
<loc>{{page.loc|safe}}</loc>
<lastmod>{{page.lastmod|safe}}</lastmod>
{%- if page.changefreq %}
<changefreq>{{page.changefreq|safe}}</changefreq>
{%- endif %}
{%- if page.priority %}
<priority>{{page.priority|safe}}</priority>
{%- endif %}
</url>
{%- endfor %}
</urlset>
"""
class SiteMapView(View):
"""Sitemap View Object."""
sitemap_cache = None
def dispatch_request(self):
"""Flask dispatch method."""
if self.sitemap_cache is None:
self.sitemap_cache = self.generate_sitemap()
response = make_response(self.sitemap_cache)
response.headers["Content-Type"] = "application/xml"
return response
@classmethod
def generate_sitemap(cls):
"""Generate sitemap.xml. Makes a list of urls and date modified."""
logging.info("Generating sitemap...")
start = time.time()
pages = []
index_url = url_for('index', _external=True)
for url, page in app.navigation.get_page_dict().items():
if page.meta_inf.published:
mtime = os.path.getmtime(page.page_path)
if isinstance(mtime, numbers.Real):
mtime = datetime.datetime.fromtimestamp(mtime)
mtime.replace(tzinfo=pytz.UTC)
# lastmod = mtime.strftime('%Y-%m-%dT%H:%M:%S%z')
lastmod = mtime.strftime('%Y-%m-%d')
pages.append({
'loc': "%s%s" % (index_url, url),
'lastmod': lastmod,
'changefreq': page.meta_inf.sitemap_changefreq,
'priority': page.meta_inf.sitemap_priority,
})
sitemap_xml = render_template_string(SITEMAP_TEMPLATE, pages=pages)
end = time.time()
logging.info("completed sitemap generation in %s seconds",
(end - start))
return sitemap_xml
| mit | -4,282,121,319,264,830,000 | 30.717647 | 124 | 0.585682 | false |
TemoaProject/temoa | temoa_model/temoa_config.py | 1 | 19245 | """
Tools for Energy Model Optimization and Analysis (Temoa):
An open source framework for energy systems optimization modeling
Copyright (C) 2015, NC State University
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
A complete copy of the GNU General Public License v2 (GPLv2) is available
in LICENSE.txt. Users uncompressing this from an archive may not have
received this license file. If not, see <http://www.gnu.org/licenses/>.
"""
from os.path import abspath, isfile, splitext, dirname
from os import sep
import re
def db_2_dat(ifile, ofile, options):
# Adapted from DB_to_DAT.py
import sqlite3
import sys
import re
import getopt
def write_tech_mga(f):
cur.execute("SELECT tech FROM technologies")
f.write("set tech_mga :=\n")
for row in cur:
f.write(row[0] + '\n')
f.write(';\n\n')
def write_tech_sector(f):
sectors = set()
cur.execute("SELECT sector FROM technologies")
for row in cur:
sectors.add(row[0])
for s in sectors:
cur.execute("SELECT tech FROM technologies WHERE sector == '" + s + "'")
f.write("set tech_" + s + " :=\n")
for row in cur:
f.write(row[0] + '\n')
f.write(';\n\n')
def query_table (t_properties, f):
t_type = t_properties[0] #table type (set or param)
t_name = t_properties[1] #table name
t_dtname = t_properties[2] #DAT table name when DB table must be subdivided
t_flag = t_properties[3] #table flag, if any
t_index = t_properties[4] #table column index after which '#' should be specified
if type(t_flag) is list: #tech production table has a list for flags; this is currently hard-wired
db_query = "SELECT * FROM " + t_name + " WHERE flag=='p' OR flag=='pb' OR flag=='ps'"
cur.execute(db_query)
if cur.fetchone() is None:
return
if t_type == "set":
f.write("set " + t_dtname + " := \n")
else:
f.write("param " + t_dtname + " := \n")
elif t_flag != '': #check to see if flag is empty, if not use it to make table
db_query = "SELECT * FROM " + t_name + " WHERE flag=='" + t_flag + "'"
cur.execute(db_query)
if cur.fetchone() is None:
return
if t_type == "set":
f.write("set " + t_dtname + " := \n")
else:
f.write("param " + t_dtname + " := \n")
else: #Only other possible case is empty flag, then 1-to-1 correspodence between DB and DAT table names
db_query = "SELECT * FROM " + t_name
cur.execute(db_query)
if cur.fetchone() is None:
return
if t_type == "set":
f.write("set " + t_name + " := \n")
else:
f.write("param " + t_name + " := \n")
cur.execute(db_query)
if t_index == 0: #make sure that units and descriptions are commented out in DAT file
for line in cur:
str_row = str(line[0]) + "\n"
f.write(str_row)
print(str_row)
else:
for line in cur:
before_comments = line[:t_index+1]
before_comments = re.sub('[(]', '', str(before_comments))
before_comments = re.sub('[\',)]', ' ', str(before_comments))
after_comments = line[t_index+2:]
after_comments = re.sub('[(]', '', str(after_comments))
after_comments = re.sub('[\',)]', ' ', str(after_comments))
search_afcom = re.search(r'^\W+$', str(after_comments)) #Search if after_comments is empty.
if not search_afcom :
str_row = before_comments + "# " + after_comments + "\n"
else :
str_row = before_comments + "\n"
f.write(str_row)
print(str_row)
f.write(';\n\n')
#[set or param, table_name, DAT fieldname, flag (if any), index (where to insert '#')
table_list = [
['set', 'time_periods', 'time_exist', 'e', 0],
['set', 'time_periods', 'time_future', 'f', 0],
['set', 'time_season', '', '', 0],
['set', 'time_of_day', '', '', 0],
['set', 'regions', '', '', 0],
['set', 'tech_curtailment', '', '', 0],
['set', 'tech_flex', '', '', 0],
['set', 'tech_reserve', '', '', 0],
['set', 'technologies', 'tech_resource', 'r', 0],
['set', 'technologies', 'tech_production', ['p','pb','ps'], 0],
['set', 'technologies', 'tech_baseload', 'pb', 0],
['set', 'technologies', 'tech_storage', 'ps', 0],
['set', 'tech_ramping', '', '', 0],
['set', 'tech_exchange', '', '', 0],
['set', 'commodities', 'commodity_physical', 'p', 0],
['set', 'commodities', 'commodity_emissions', 'e', 0],
['set', 'commodities', 'commodity_demand', 'd', 0],
['set', 'tech_groups', '', '', 0],
['set', 'tech_annual', '', '', 0],
['set', 'groups', '', '', 0],
['param','MinGenGroupTarget', '', '', 2],
['param','MinGenGroupWeight', '', '', 3],
['param','LinkedTechs', '', '', 3],
['param','SegFrac', '', '', 2],
['param','DemandSpecificDistribution','', '', 4],
['param','CapacityToActivity', '', '', 2],
['param','PlanningReserveMargin', '', '', 2],
['param','GlobalDiscountRate', '', '', 0],
['param','MyopicBaseyear', '', '', 0],
['param','DiscountRate', '', '', 3],
['param','EmissionActivity', '', '', 6],
['param','EmissionLimit', '', '', 3],
['param','Demand', '', '', 3],
['param','TechOutputSplit', '', '', 4],
['param','TechInputSplit', '', '', 4],
['param','MinCapacity', '', '', 3],
['param','MaxCapacity', '', '', 3],
['param','MaxActivity', '', '', 3],
['param','MinActivity', '', '', 3],
['param','MaxResource', '', '', 2],
['param','GrowthRateMax', '', '', 2],
['param','GrowthRateSeed', '', '', 2],
['param','LifetimeTech', '', '', 2],
['param','LifetimeProcess', '', '', 3],
['param','LifetimeLoanTech', '', '', 2],
['param','CapacityFactorTech', '', '', 4],
['param','CapacityFactorProcess', '', '', 5],
['param','Efficiency', '', '', 5],
['param','ExistingCapacity', '', '', 3],
['param','CostInvest', '', '', 3],
['param','CostFixed', '', '', 4],
['param','CostVariable', '', '', 4],
['param','CapacityCredit', '', '', 4],
['param','RampUp', '', '', 2],
['param','RampDown', '', '', 2],
['param','StorageInitFrac', '', '', 3],
['param','StorageDuration', '', '', 2]]
with open(ofile, 'w') as f:
f.write('data ;\n\n')
#connect to the database
con = sqlite3.connect(ifile, isolation_level=None)
cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database
con.text_factory = str #this ensures data is explored with the correct UTF-8 encoding
# Return the full list of existing tables.
table_exist = cur.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()
table_exist = [i[0] for i in table_exist]
for table in table_list:
if table[1] in table_exist:
query_table(table, f)
if options.mga_weight == 'integer':
write_tech_mga(f)
if options.mga_weight == 'normalized':
write_tech_sector(f)
# Making sure the database is empty from the begining for a myopic solve
if options.myopic:
cur.execute("DELETE FROM Output_CapacityByPeriodAndTech WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_Emissions WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_Costs WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_Objective WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_VFlow_In WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_VFlow_Out WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_V_Capacity WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("DELETE FROM Output_Curtailment WHERE scenario="+"'"+str(options.scenario)+"'")
cur.execute("VACUUM")
con.commit()
cur.close()
con.close()
class TemoaConfig( object ):
states = (
('mga', 'exclusive'),
)
tokens = (
'dot_dat',
'output',
'scenario',
'how_to_cite',
'version',
'solver',
'neos',
'keep_pyomo_lp_file',
'saveEXCEL',
'myopic'
'keep_myopic_databases'
'saveTEXTFILE',
'mgaslack',
'mgaiter',
'path_to_data',
'path_to_logs',
'mgaweight'
)
t_ANY_ignore = '[ \t]'
def __init__(self, **kwargs):
# Make compatible with Python 2.7 and 3
try:
import queue
except:
import Queue as queue
self.__error = list()
self.__mga_todo = queue.Queue()
self.__mga_done = queue.Queue()
self.file_location = None
self.dot_dat = list() # Use Kevin's name.
self.output = None # May update to a list if multiple output is required.
self.scenario = None
self.saveEXCEL = False
self.myopic = False
self.KeepMyopicDBs = False
self.saveTEXTFILE = False
self.how_to_cite = None
self.version = False
self.neos = False
self.generateSolverLP = False
self.keepPyomoLP = False
self.mga = None # mga slack value
self.mga_iter = None
self.mga_weight = None
# To keep consistent with Kevin's argumetn parser, will be removed in the future.
self.graph_format = None
self.show_capacity = False
self.graph_type = 'separate_vintages'
self.use_splines = False
#Introduced during UI Development
self.path_to_data = re.sub('temoa_model$', 'data_files', dirname(abspath(__file__)))# Path to where automated excel and text log folder will be save as output.
self.path_to_logs = self.path_to_data+sep+"debug_logs" #Path to where debug logs will be generated for each run. By default in debug_logs folder in db_io.
self.path_to_lp_files = None
self.abort_temoa = False
if 'd_solver' in kwargs.keys():
self.solver = kwargs['d_solver']
else:
self.solver = None
def __repr__(self):
width = 25
spacer = '\n' + '-'*width + '\n'
msg = spacer
msg += '{:>{}s}: {}\n'.format('Config file', width, self.file_location)
for i in self.dot_dat:
if self.dot_dat.index(i) == 0:
msg += '{:>{}s}: {}\n'.format('Input file', width, i)
else:
msg += '{:>25s} {}\n'.format(' ', i)
msg += '{:>{}s}: {}\n'.format('Output file', width, self.output)
msg += '{:>{}s}: {}\n'.format('Scenario', width, self.scenario)
msg += '{:>{}s}: {}\n'.format('Spreadsheet output', width, self.saveEXCEL)
msg += '{:>{}s}: {}\n'.format('Myopic scheme', width, self.myopic)
msg += '{:>{}s}: {}\n'.format('Retain myopic databases', width, self.KeepMyopicDBs)
msg += spacer
msg += '{:>{}s}: {}\n'.format('Citation output status', width, self.how_to_cite)
msg += '{:>{}s}: {}\n'.format('NEOS status', width, self.neos)
msg += '{:>{}s}: {}\n'.format('Version output status', width, self.version)
msg += spacer
msg += '{:>{}s}: {}\n'.format('Selected solver status', width, self.solver)
msg += '{:>{}s}: {}\n'.format('Solver LP write status', width, self.generateSolverLP)
msg += '{:>{}s}: {}\n'.format('Pyomo LP write status', width, self.keepPyomoLP)
msg += spacer
msg += '{:>{}s}: {}\n'.format('MGA slack value', width, self.mga)
msg += '{:>{}s}: {}\n'.format('MGA # of iterations', width, self.mga_iter)
msg += '{:>{}s}: {}\n'.format('MGA weighting method', width, self.mga_weight)
msg += '**NOTE: If you are performing MGA runs, navigate to the DAT file and make any modifications to the MGA sets before proceeding.'
return msg
def t_ANY_COMMENT(self, t):
r'\#.*'
pass
def t_dot_dat(self, t):
r'--input[\s\=]+[-\\\/\:\.\~\w]+(\.dat|\.db|\.sqlite)\b'
self.dot_dat.append(abspath(t.value.replace('=', ' ').split()[1]))
def t_output(self, t):
r'--output[\s\=]+[-\\\/\:\.\~\w]+(\.db|\.sqlite)\b'
self.output = abspath(t.value.replace('=', ' ').split()[1])
def t_scenario(self, t):
r'--scenario[\s\=]+\w+\b'
self.scenario = t.value.replace('=', ' ').split()[1]
def t_saveEXCEL(self, t):
r'--saveEXCEL\b'
self.saveEXCEL = True
def t_myopic(self, t):
r'--myopic\b'
self.myopic = True
def t_keep_myopic_databases(self, t):
r'--keep_myopic_databases\b'
self.KeepMyopicDBs = True
def t_saveTEXTFILE(self, t):
r'--saveTEXTFILE\b'
self.saveTEXTFILE = True
def t_path_to_data(self, t):
r'--path_to_data[\s\=]+[-\\\/\:\.\~\w\ ]+\b'
self.path_to_data = abspath(t.value.replace('=', ',').split(",")[1])
def t_path_to_logs(self, t):
r'--path_to_logs[\s\=]+[-\\\/\:\.\~\w\ ]+\b'
self.path_to_logs = abspath(t.value.replace('=', ',').split(",")[1])
def t_how_to_cite(self, t):
r'--how_to_cite\b'
self.how_to_cite = True
def t_version(self, t):
r'--version\b'
self.version = True
def t_neos(self, t):
r'--neos\b'
self.neos = True
def t_solver(self, t):
r'--solver[\s\=]+\w+\b'
self.solver = t.value.replace('=', ' ').split()[1]
def t_keep_pyomo_lp_file(self, t):
r'--keep_pyomo_lp_file\b'
self.keepPyomoLP = True
def t_begin_mga(self, t):
r'--mga[\s\=]+\{'
t.lexer.push_state('mga')
t.lexer.level = 1
def t_mga_mgaslack(self, t):
r'slack[\s\=]+[\.\d]+'
self.mga = float(t.value.replace('=', ' ').split()[1])
def t_mga_mgaiter(self, t):
r'iteration[\s\=]+[\d]+'
self.mga_iter = int(t.value.replace('=', ' ').split()[1])
def t_mga_mgaweight(self, t):
r'weight[\s\=]+(integer|normalized|distance)\b'
self.mga_weight = t.value.replace('=', ' ').split()[1]
def t_mga_end(self, t):
r'\}'
t.lexer.pop_state()
t.lexer.level -= 1
def t_ANY_newline(self,t):
r'\n+|(\r\n)+|\r+' # '\n' (In linux) = '\r\n' (In Windows) = '\r' (In Mac OS)
t.lexer.lineno += len(t.value)
def t_ANY_error(self, t):
if not self.__error:
self.__error.append({'line': [t.lineno, t.lineno], 'index': [t.lexpos, t.lexpos], 'value': t.value[0]})
elif t.lexpos - self.__error[-1]['index'][-1] == 1:
self.__error[-1]['line' ][-1] = t.lineno
self.__error[-1]['index'][-1] = t.lexpos
self.__error[-1]['value'] += t.value[0]
else:
self.__error.append({'line': [t.lineno, t.lineno], 'index': [t.lexpos, t.lexpos], 'value': t.value[0]})
t.lexer.skip(1)
def next_mga(self):
if not self.__mga_todo.empty():
self.__mga_done.put(self.scenario)
self.scenario = self.__mga_todo.get()
return True
else:
return False
def build(self,**kwargs):
import ply.lex as lex, os, sys
db_or_dat = True # True means input file is a db file. False means input is a dat file.
if 'config' in kwargs:
if isfile(kwargs['config']):
self.file_location= abspath(kwargs.pop('config'))
else:
msg = 'No such file exists: {}'.format(kwargs.pop('config'))
raise Exception( msg )
self.lexer = lex.lex(module=self, **kwargs)
if self.file_location:
try:
with open(self.file_location, encoding="utf8") as f:
self.lexer.input(f.read())
except:
with open(self.file_location, 'r') as f:
self.lexer.input(f.read())
while True:
tok = self.lexer.token()
if not tok: break
if self.__error:
width = 25
msg = '\nIllegal character(s) in config file:\n'
msg += '-'*width + '\n'
for e in self.__error:
msg += "Line {} to {}: '{}'\n".format(e['line'][0], e['line'][1], e['value'])
msg += '-'*width + '\n'
sys.stderr.write(msg)
try:
txt_file = open(self.path_to_logs+os.sep+"Complete_OutputLog.log", "w")
except BaseException as io_exc:
sys.stderr.write("Log file cannot be opened. Please check path. Trying to find:\n"+self.path_to_logs+" folder\n")
txt_file = open("OutputLog.log", "w")
txt_file.write( msg )
txt_file.close()
self.abort_temoa = True
if not self.dot_dat:
raise Exception('Input file not specified.')
for i in self.dot_dat:
if not isfile(i):
raise Exception('Cannot locate input file: {}'.format(i))
i_name, i_ext = splitext(i)
if (i_ext == '.dat') or (i_ext == '.txt'):
db_or_dat = False
elif (i_ext == '.db') or (i_ext == '.sqlite') or (i_ext == '.sqlite3') or (i_ext == 'sqlitedb'):
db_or_dat = True
if not self.output and db_or_dat:
raise Exception('Output file not specified.')
if db_or_dat and not isfile(self.output):
raise Exception('Cannot locate output file: {}.'.format(self.output))
if not self.scenario and db_or_dat:
raise Exception('Scenario name not specified.')
if self.mga_iter:
for i in range(self.mga_iter):
self.__mga_todo.put(self.scenario + '_mga_' + str(i))
f = open(os.devnull, 'w');
sys.stdout = f # Suppress the original DB_to_DAT.py output
counter = 0
for ifile in self.dot_dat:
i_name, i_ext = splitext(ifile)
if i_ext != '.dat':
ofile = i_name + '.dat'
db_2_dat(ifile, ofile, self)
self.dot_dat[self.dot_dat.index(ifile)] = ofile
counter += 1
f.close()
sys.stdout = sys.__stdout__
if counter > 0:
sys.stderr.write("\n{} .db DD file(s) converted\n".format(counter)) | gpl-2.0 | -3,519,481,013,285,294,600 | 37.959514 | 164 | 0.52663 | false |
armyofevilrobots/reticulatus | reticulatus/gui/reticulate_main.py | 1 | 13375 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'reticulate_main.ui'
#
# Created: Thu Oct 25 21:48:45 2012
# by: pyside-uic 0.2.13 running on PySide 1.1.0
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("main_window")
main_window.resize(925, 633)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(main_window.sizePolicy().hasHeightForWidth())
main_window.setSizePolicy(sizePolicy)
main_window.setMinimumSize(QtCore.QSize(512, 384))
main_window.setAutoFillBackground(False)
self.centralwidget = QtGui.QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.object_tabs = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.object_tabs.sizePolicy().hasHeightForWidth())
self.object_tabs.setSizePolicy(sizePolicy)
self.object_tabs.setObjectName("object_tabs")
self.object_3d = QtGui.QWidget()
self.object_3d.setCursor(QtCore.Qt.CrossCursor)
self.object_3d.setLayoutDirection(QtCore.Qt.RightToLeft)
self.object_3d.setObjectName("object_3d")
self.object_3d_layout = QtGui.QHBoxLayout(self.object_3d)
self.object_3d_layout.setObjectName("object_3d_layout")
self.frame = QtGui.QFrame(self.object_3d)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setMaximumSize(QtCore.QSize(50, 16777215))
self.frame.setLayoutDirection(QtCore.Qt.RightToLeft)
self.frame.setFrameShape(QtGui.QFrame.NoFrame)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setLineWidth(0)
self.frame.setObjectName("frame")
self.slider_container_layout = QtGui.QVBoxLayout(self.frame)
self.slider_container_layout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.slider_container_layout.setObjectName("slider_container_layout")
self.layer_slider = QtGui.QSlider(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.layer_slider.sizePolicy().hasHeightForWidth())
self.layer_slider.setSizePolicy(sizePolicy)
self.layer_slider.setMaximumSize(QtCore.QSize(50, 16777215))
self.layer_slider.setMinimum(0)
self.layer_slider.setMaximum(9999)
self.layer_slider.setProperty("value", 0)
self.layer_slider.setOrientation(QtCore.Qt.Vertical)
self.layer_slider.setInvertedAppearance(False)
self.layer_slider.setObjectName("layer_slider")
self.slider_container_layout.addWidget(self.layer_slider)
self.layer_lcd = QtGui.QLCDNumber(self.frame)
self.layer_lcd.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.layer_lcd.setFont(font)
self.layer_lcd.setNumDigits(4)
self.layer_lcd.setObjectName("layer_lcd")
self.slider_container_layout.addWidget(self.layer_lcd)
self.object_3d_layout.addWidget(self.frame)
self.object_tabs.addTab(self.object_3d, "")
self.gcode = QtGui.QWidget()
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gcode.sizePolicy().hasHeightForWidth())
self.gcode.setSizePolicy(sizePolicy)
self.gcode.setObjectName("gcode")
self.gcode_hlayout = QtGui.QHBoxLayout(self.gcode)
self.gcode_hlayout.setObjectName("gcode_hlayout")
self.gcode_editor = QtGui.QTextEdit(self.gcode)
self.gcode_editor.setObjectName("gcode_editor")
self.gcode_hlayout.addWidget(self.gcode_editor)
self.object_tabs.addTab(self.gcode, "")
self.horizontalLayout.addWidget(self.object_tabs)
main_window.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(main_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 925, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menu_edit = QtGui.QMenu(self.menubar)
self.menu_edit.setObjectName("menu_edit")
self.menu_Settings = QtGui.QMenu(self.menubar)
self.menu_Settings.setObjectName("menu_Settings")
self.menu_Help = QtGui.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menuActions = QtGui.QMenu(self.menubar)
self.menuActions.setObjectName("menuActions")
self.menu_Windows = QtGui.QMenu(self.menubar)
self.menu_Windows.setObjectName("menu_Windows")
main_window.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(main_window)
self.statusbar.setEnabled(True)
self.statusbar.setSizeGripEnabled(True)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
self.layers_dock = QtGui.QDockWidget(main_window)
self.layers_dock.setMinimumSize(QtCore.QSize(120, 160))
self.layers_dock.setMaximumSize(QtCore.QSize(1024, 1024))
self.layers_dock.setObjectName("layers_dock")
self.dock_contents = QtGui.QWidget()
self.dock_contents.setObjectName("dock_contents")
self.verticalLayout = QtGui.QVBoxLayout(self.dock_contents)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(self.dock_contents)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.layer_list_widget = QtGui.QListWidget(self.dock_contents)
self.layer_list_widget.setObjectName("layer_list_widget")
self.verticalLayout.addWidget(self.layer_list_widget)
self.layers_dock.setWidget(self.dock_contents)
main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.layers_dock)
self.tools_dock = QtGui.QDockWidget(main_window)
self.tools_dock.setMinimumSize(QtCore.QSize(120, 160))
self.tools_dock.setObjectName("tools_dock")
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.tools_dock.setWidget(self.dockWidgetContents)
main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.tools_dock)
self.action_file = QtGui.QAction(main_window)
self.action_file.setObjectName("action_file")
self.action_new = QtGui.QAction(main_window)
self.action_new.setObjectName("action_new")
self.action_open = QtGui.QAction(main_window)
self.action_open.setObjectName("action_open")
self.action_save = QtGui.QAction(main_window)
self.action_save.setObjectName("action_save")
self.action_quit = QtGui.QAction(main_window)
self.action_quit.setObjectName("action_quit")
self.action_print_settings = QtGui.QAction(main_window)
self.action_print_settings.setObjectName("action_print_settings")
self.action_slice_settings = QtGui.QAction(main_window)
self.action_slice_settings.setObjectName("action_slice_settings")
self.action_help = QtGui.QAction(main_window)
self.action_help.setObjectName("action_help")
self.action_about = QtGui.QAction(main_window)
self.action_about.setObjectName("action_about")
self.action_display_settings = QtGui.QAction(main_window)
self.action_display_settings.setObjectName("action_display_settings")
self.action_slice = QtGui.QAction(main_window)
self.action_slice.setObjectName("action_slice")
self.action_Layers = QtGui.QAction(main_window)
self.action_Layers.setObjectName("action_Layers")
self.action_Toolbox = QtGui.QAction(main_window)
self.action_Toolbox.setObjectName("action_Toolbox")
self.menuFile.addAction(self.action_new)
self.menuFile.addAction(self.action_open)
self.menuFile.addAction(self.action_save)
self.menuFile.addSeparator()
self.menuFile.addAction(self.action_quit)
self.menu_Settings.addAction(self.action_print_settings)
self.menu_Settings.addAction(self.action_slice_settings)
self.menu_Settings.addAction(self.action_display_settings)
self.menu_Help.addAction(self.action_help)
self.menu_Help.addSeparator()
self.menu_Help.addAction(self.action_about)
self.menuActions.addAction(self.action_slice)
self.menu_Windows.addAction(self.action_Layers)
self.menu_Windows.addAction(self.action_Toolbox)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menu_edit.menuAction())
self.menubar.addAction(self.menuActions.menuAction())
self.menubar.addAction(self.menu_Settings.menuAction())
self.menubar.addAction(self.menu_Windows.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.retranslateUi(main_window)
self.object_tabs.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(main_window)
def retranslateUi(self, main_window):
main_window.setWindowTitle(QtGui.QApplication.translate("main_window", "Reticulatus", None, QtGui.QApplication.UnicodeUTF8))
self.layer_slider.setToolTip(QtGui.QApplication.translate("main_window", "Layer clip plane", None, QtGui.QApplication.UnicodeUTF8))
self.object_tabs.setTabText(self.object_tabs.indexOf(self.object_3d), QtGui.QApplication.translate("main_window", "3D Object", None, QtGui.QApplication.UnicodeUTF8))
self.object_tabs.setTabText(self.object_tabs.indexOf(self.gcode), QtGui.QApplication.translate("main_window", "GCode", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("main_window", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menu_edit.setTitle(QtGui.QApplication.translate("main_window", "&Edit", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Settings.setTitle(QtGui.QApplication.translate("main_window", "&Settings", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Help.setTitle(QtGui.QApplication.translate("main_window", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuActions.setTitle(QtGui.QApplication.translate("main_window", "&Actions", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Windows.setTitle(QtGui.QApplication.translate("main_window", "&Windows", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("main_window", "Layers", None, QtGui.QApplication.UnicodeUTF8))
self.action_file.setText(QtGui.QApplication.translate("main_window", "&file", None, QtGui.QApplication.UnicodeUTF8))
self.action_new.setText(QtGui.QApplication.translate("main_window", "&New", None, QtGui.QApplication.UnicodeUTF8))
self.action_open.setText(QtGui.QApplication.translate("main_window", "&Open", None, QtGui.QApplication.UnicodeUTF8))
self.action_save.setText(QtGui.QApplication.translate("main_window", "&Save", None, QtGui.QApplication.UnicodeUTF8))
self.action_quit.setText(QtGui.QApplication.translate("main_window", "&Quit", None, QtGui.QApplication.UnicodeUTF8))
self.action_print_settings.setText(QtGui.QApplication.translate("main_window", "&Printer", None, QtGui.QApplication.UnicodeUTF8))
self.action_slice_settings.setText(QtGui.QApplication.translate("main_window", "S&licing", None, QtGui.QApplication.UnicodeUTF8))
self.action_help.setText(QtGui.QApplication.translate("main_window", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.action_about.setText(QtGui.QApplication.translate("main_window", "&About", None, QtGui.QApplication.UnicodeUTF8))
self.action_display_settings.setText(QtGui.QApplication.translate("main_window", "&Display", None, QtGui.QApplication.UnicodeUTF8))
self.action_slice.setText(QtGui.QApplication.translate("main_window", "&Slice", None, QtGui.QApplication.UnicodeUTF8))
self.action_Layers.setText(QtGui.QApplication.translate("main_window", "&Layers", None, QtGui.QApplication.UnicodeUTF8))
self.action_Toolbox.setText(QtGui.QApplication.translate("main_window", "&Toolbox", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -5,630,036,228,393,934,000 | 61.209302 | 173 | 0.717458 | false |
Ichimonji10/robottelo | robottelo/ui/container.py | 1 | 8490 | # -*- encoding: utf-8 -*-
from robottelo.constants import FILTER
from robottelo.ui.base import Base, UINoSuchElementError, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
class Container(Base):
"""Provides the CRUD functionality for Docker Containers."""
def navigate_to_entity(self):
"""Navigate to All Containers page"""
Navigator(self.browser).go_to_all_containers()
def _search_locator(self):
"""Specify locator for Container entity search procedure"""
return locators['container.search_entity']
def _configure_orgs(self, orgs, org_select):
"""Provides configuration capabilities for docker container
organization. The following format should be used::
orgs=['Aoes6V', 'JIFNPC'], org_select=True
"""
self.configure_entity(
orgs,
FILTER['container_org'],
tab_locator=tab_locators['tab_org'],
entity_select=org_select,
)
def _configure_locations(self, locations, loc_select):
"""Provides configuration capabilities for docker container location
The following format should be used::
locations=['Default Location'], loc_select=True
"""
self.configure_entity(
locations,
FILTER['container_loc'],
tab_locator=tab_locators['tab_loc'],
entity_select=loc_select,
)
def _form_locator_name(self, partial_locator):
"""Form locator using provided friendly UI name, e.g. 'Content View'"""
return '.'.join((
'container',
(partial_locator.lower()).replace(' ', '_')
))
def create(self, resource_name, name, command, parameter_list):
"""Creates a docker container. All values should be passed in absolute
correspondence to UI. Parameters names created in self-descriptive
manner. Of course, we can easily expand list of parameters and create
custom flows for specific situations. Here are some examples of
parameter_list values from each main tab::
[
{'main_tab_name': 'Preliminary', 'sub_tab_name': 'Location',
'name': ['Default Location']},
{'main_tab_name': 'Image', 'sub_tab_name': 'Content View',
'name': 'Lifecycle Environment', 'value': self.lce.name},
{'main_tab_name': 'Image', 'sub_tab_name': 'Docker Hub',
'name': 'Docker Hub Tag', 'value': 'latest'},
{'main_tab_name': 'Configuration', 'name': 'Memory',
'value': '512m'},
{'main_tab_name': 'Environment', 'name': 'TTY', 'value': True},
]
"""
# send_keys() can't send left parenthesis (see SeleniumHQ/selenium#674)
# which is used in compute resource name (e.g. 'test (Docker)')
if ' (' in resource_name:
self.click(locators['container.resource_name'])
# typing compute resource name without parenthesis part
self.text_field_update(
common_locators['select_list_search_box'],
resource_name.split(' (')[0]
)
strategy, value = common_locators['entity_select_list']
# selecting compute resource by its full name (with parenthesis
# part)
self.click((strategy, value % resource_name))
else:
self.assign_value(
locators['container.resource_name'], resource_name)
for parameter in parameter_list:
if parameter['main_tab_name'] == 'Preliminary':
if parameter['sub_tab_name'] == 'Organization':
self._configure_orgs(parameter['name'], True)
elif parameter['sub_tab_name'] == 'Location':
self._configure_locations(parameter['name'], True)
self.click(locators['container.next_section'])
current_tab = self._form_locator_name('Content View Tab')
for parameter in parameter_list:
if parameter['main_tab_name'] == 'Image':
current_tab = self._form_locator_name(
parameter['sub_tab_name'] + ' Tab')
self.click(locators[current_tab])
self.assign_value(
locators[
self._form_locator_name(
'registry.' + parameter['name'])
]
if parameter['sub_tab_name'] == 'External registry' else
locators[self._form_locator_name(parameter['name'])],
parameter['value']
)
self.click(locators[current_tab + '_next'])
self.assign_value(locators['container.name'], name)
self.assign_value(locators['container.command'], command)
for parameter in parameter_list:
if parameter['main_tab_name'] == 'Configuration':
self.assign_value(
locators[self._form_locator_name(parameter['name'])],
parameter['value']
)
self.click(locators['container.next_section'])
self.browser.refresh()
for parameter in parameter_list:
if parameter['main_tab_name'] == 'Environment':
self.assign_value(
locators[self._form_locator_name(parameter['name'])],
parameter['value']
)
self.click(locators['container.next_section'])
strategy, value = locators['container.created_container_name']
element = self.wait_until_element((strategy, value % name))
if element is None:
raise UINoSuchElementError(
'Container with name {0} was not created successfully'
.format(name)
)
def search(self, resource_name, container_name):
"""Searches for existing container from particular compute resource. It
is necessary to use custom search here as we need to select compute
resource tab before searching for particular container and also, there
is no search button to click
"""
self.navigate_to_entity()
strategy, value = locators['container.resource_search_tab']
self.click((strategy, value % resource_name))
self.text_field_update(
locators['container.search_filter'], container_name)
strategy, value = self._search_locator()
return self.wait_until_element((strategy, value % container_name))
def delete(self, resource_name, container_name, really=True):
"""Removes the container entity"""
element = self.search(resource_name, container_name)
if element is None:
raise UIError(
'Could not find container "{0}"'.format(container_name))
element.click()
self.wait_for_ajax()
self.click(locators['container.delete'], wait_for_ajax=False)
self.handle_alert(really)
def set_power_status(self, resource_name, cont_name, power_on):
"""Perform power on or power off for container
:param bool power_on: True - for On, False - for Off
"""
status = None
locator_on = (locators['container.power_on'][0],
locators['container.power_on'][1] % cont_name)
locator_off = (locators['container.power_off'][0],
locators['container.power_off'][1] % cont_name)
locator_status = (locators['container.power_status'][0],
locators['container.power_status'][1] % cont_name)
element = self.search(resource_name, cont_name)
if element is None:
raise UIError(
'Could not find container "{0}"'.format(cont_name))
self.wait_for_ajax()
if power_on is True:
self.click(locator_on)
self.search(resource_name, cont_name)
if self.wait_until_element(locator_off):
status = self.wait_until_element(locator_status).text
elif power_on is False:
self.click(locator_off, wait_for_ajax=False)
self.handle_alert(True)
self.search(resource_name, cont_name)
if self.wait_until_element(locator_on):
status = self.wait_until_element(locator_status).text
return status
| gpl-3.0 | 8,288,520,776,015,055,000 | 42.989637 | 79 | 0.578445 | false |
mitodl/micromasters | cms/migrations/0025_infolinks.py | 1 | 1226 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-05 22:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0024_programtabpage'),
]
operations = [
migrations.CreateModel(
name='InfoLinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('url', models.URLField(blank=True, help_text='A url for an external page. There will be a link to this url from the program page.', null=True)),
('title_url', models.TextField(blank=True, help_text='The text for the link to an external homepage.')),
('program_page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='info_links', to='cms.ProgramPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| bsd-3-clause | -5,478,774,938,762,284,000 | 38.548387 | 161 | 0.604405 | false |
eagle00789/PythonMiniProbe | test_sensors.py | 1 | 3294 | #!/usr/bin/env python
from nose.tools import *
from sensors import nmap,adns,apt,cpuload,cputemp
def test_nmap_get_kind():
"""nmap returns the correct kind"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.get_kind(), 'mpnmap')
def test_nmap_icmp_echo_request():
"""nmap const ICMP_ECHO_REQUEST is set correct"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.ICMP_ECHO_REQUEST, 8)
def test_nmap_dec2bin():
"""nmap dec2bin results"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.dec2bin(255,8),'11111111')
assert_equal(test_nmap.dec2bin(254,8),'11111110')
assert_equal(test_nmap.dec2bin(128,8),'10000000')
assert_equal(test_nmap.dec2bin(127,8),'01111111')
assert_equal(test_nmap.dec2bin(0,8),'00000000')
def test_nmap_ip2bin():
"""nmap ip2bin results"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.ip2bin('255.255.255.255'),'11111111111111111111111111111111')
assert_equal(test_nmap.ip2bin('254.254.254.254'),'11111110111111101111111011111110')
assert_equal(test_nmap.ip2bin('128.128.128.128'),'10000000100000001000000010000000')
assert_equal(test_nmap.ip2bin('127.127.127.127'),'01111111011111110111111101111111')
assert_equal(test_nmap.ip2bin('0.0.0.0'),'00000000000000000000000000000000')
def test_nmap_bin2ip():
"""nmap bin2ip results"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.bin2ip('11111111111111111111111111111111'),'255.255.255.255')
assert_equal(test_nmap.bin2ip('11111110111111101111111011111110'),'254.254.254.254')
assert_equal(test_nmap.bin2ip('10000000100000001000000010000000'),'128.128.128.128')
assert_equal(test_nmap.bin2ip('01111111011111110111111101111111'),'127.127.127.127')
assert_equal(test_nmap.bin2ip('00000000000000000000000000000000'),'0.0.0.0')
def test_nmap_validateCIDRBlock():
"""nmap validateCIDRBlock results"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.validateCIDRBlock('127.0.0.0'),'Error: Invalid CIDR format!')
assert_equal(test_nmap.validateCIDRBlock('256.256.256.256/8'),'Error: quad 256 wrong size.')
assert_equal(test_nmap.validateCIDRBlock('127.0.0.0/33'),'Error: subnet 33 wrong size.')
assert_true(test_nmap.validateCIDRBlock('127.0.0.0/8'))
def test_nmap_returnCIDR():
"""nmap returnCIDR results"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.returnCIDR('127.0.0.0/30'),['127.0.0.0', '127.0.0.1', '127.0.0.2', '127.0.0.3'])
def test_nmap_checksum():
"""nmap checksum results"""
test_nmap = nmap.NMAP()
assert_equal(test_nmap.checksum('test'),6182)
assert_equal(test_nmap.checksum('python'),43951)
assert_equal(test_nmap.checksum('prtg'),6950)
def test_adns_get_kind():
"""dns returns the correct kind"""
test_adns = adns.aDNS()
assert_equal(test_adns.get_kind(), 'mpdns')
def test_apt_get_kind():
"""apt returns the correct kind"""
test_apt = apt.APT()
assert_equal(test_apt.get_kind(), 'mpapt')
def test_cpuload_get_kind():
"""cpuload returns the correct kind"""
test_cpuload = cpuload.CPULoad()
assert_equal(test_cpuload.get_kind(), 'mpcpuload')
def test_cputemp_get_kind():
"""cputemp returns the correct kind"""
test_cputemp = cputemp.CPUTemp()
assert_equal(test_cputemp.get_kind(), 'mpcputemp')
| bsd-3-clause | -7,744,472,642,238,725,000 | 39.666667 | 107 | 0.693989 | false |
vpelletier/neoppod | neo/master/backup_app.py | 1 | 16200 | #
# Copyright (C) 2012-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random, weakref
from bisect import bisect
from collections import defaultdict
from neo.lib import logging
from neo.lib.bootstrap import BootstrapManager
from neo.lib.exception import PrimaryFailure
from neo.lib.handler import EventHandler
from neo.lib.node import NodeManager
from neo.lib.protocol import CellStates, ClusterStates, \
NodeStates, NodeTypes, Packets, uuid_str, INVALID_TID, ZERO_TID
from neo.lib.util import add64, dump
from .app import StateChangedException
from .pt import PartitionTable
from .handlers.backup import BackupHandler
"""
Backup algorithm
This implementation relies on normal storage replication.
Storage nodes that are specialised for backup are not in the same NEO cluster,
but are managed by another master in a different cluster.
When the cluster is in BACKINGUP state, its master acts like a client to the
master of the main cluster. It gets notified of new data thanks to invalidation,
and notifies in turn its storage nodes what/when to replicate.
Storages stay in UP_TO_DATE state, even if partitions are synchronized up to
different tids. Storage nodes remember they are in such state and when
switching into RUNNING state, the cluster cuts the DB at the "backup TID", which
is the last TID for which we have all data. This TID can't be guessed from
'trans' and 'obj' tables, like it is done in normal mode, so:
- The master must even notify storages of transactions that don't modify their
partitions: see Replicate packets without any source.
- 'backup_tid' properties exist in many places, on the master and the storages,
so that the DB can be made consistent again at any moment, without losing
any (or little) data.
Out of backup storage nodes assigned to a partition, one is chosen as primary
for that partition. It means only this node will fetch data from the upstream
cluster, to minimize bandwidth between clusters. Other replicas will
synchronize from the primary node.
There is no UUID conflict between the 2 clusters:
- Storage nodes connect anonymously to upstream.
- Master node receives a new from upstream master and uses it only when
communicating with it.
"""
class BackupApplication(object):
pt = None
def __init__(self, app, name, master_addresses):
self.app = weakref.proxy(app)
self.name = name
self.nm = NodeManager()
for master_address in master_addresses:
self.nm.createMaster(address=master_address)
em = property(lambda self: self.app.em)
ssl = property(lambda self: self.app.ssl)
def close(self):
self.nm.close()
del self.__dict__
def log(self):
self.nm.log()
if self.pt is not None:
self.pt.log()
def provideService(self):
logging.info('provide backup')
poll = self.em.poll
app = self.app
pt = app.pt
while True:
app.changeClusterState(ClusterStates.STARTING_BACKUP)
bootstrap = BootstrapManager(self, self.name, NodeTypes.CLIENT)
# {offset -> node}
self.primary_partition_dict = {}
# [[tid]]
self.tid_list = tuple([] for _ in xrange(pt.getPartitions()))
try:
while True:
for node in pt.getNodeSet(readable=True):
if not app.isStorageReady(node.getUUID()):
break
else:
break
poll(1)
node, conn, uuid, num_partitions, num_replicas = \
bootstrap.getPrimaryConnection()
try:
app.changeClusterState(ClusterStates.BACKINGUP)
del bootstrap, node
if num_partitions != pt.getPartitions():
raise RuntimeError("inconsistent number of partitions")
self.pt = PartitionTable(num_partitions, num_replicas)
conn.setHandler(BackupHandler(self))
conn.ask(Packets.AskNodeInformation())
conn.ask(Packets.AskPartitionTable())
conn.ask(Packets.AskLastTransaction())
# debug variable to log how big 'tid_list' can be.
self.debug_tid_count = 0
while True:
poll(1)
except PrimaryFailure, msg:
logging.error('upstream master is down: %s', msg)
finally:
app.backup_tid = pt.getBackupTid()
try:
conn.close()
except PrimaryFailure:
pass
try:
del self.pt
except AttributeError:
pass
except StateChangedException, e:
if e.args[0] != ClusterStates.STOPPING_BACKUP:
raise
app.changeClusterState(*e.args)
tid = app.backup_tid
# Wait for non-primary partitions to catch up,
# so that all UP_TO_DATE cells are really UP_TO_DATE.
# XXX: Another possibility could be to outdate such cells, and
# they would be quickly updated at the beginning of the
# RUNNING phase. This may simplify code.
# Any unfinished replication from upstream will be truncated.
while pt.getBackupTid(min) < tid:
poll(1)
last_tid = app.getLastTransaction()
handler = EventHandler(app)
if tid < last_tid:
assert tid != ZERO_TID
logging.warning("Truncating at %s (last_tid was %s)",
dump(app.backup_tid), dump(last_tid))
else:
# We will do a dummy truncation, just to leave backup mode,
# so it's fine to start automatically if there's any
# missing storage.
# XXX: Consider using another method to leave backup mode,
# at least when there's nothing to truncate. Because
# in case of StoppedOperation during VERIFYING state,
# this flag will be wrongly set to False.
app._startup_allowed = True
# If any error happened before reaching this line, we'd go back
# to backup mode, which is the right mode to recover.
del app.backup_tid
# Now back to RECOVERY...
return tid
finally:
del self.primary_partition_dict, self.tid_list
pt.clearReplicating()
def nodeLost(self, node):
getCellList = self.app.pt.getCellList
trigger_set = set()
for offset, primary_node in self.primary_partition_dict.items():
if primary_node is not node:
continue
cell_list = getCellList(offset, readable=True)
cell = max(cell_list, key=lambda cell: cell.backup_tid)
tid = cell.backup_tid
self.primary_partition_dict[offset] = primary_node = cell.getNode()
p = Packets.Replicate(tid, '', {offset: primary_node.getAddress()})
for cell in cell_list:
cell.replicating = tid
if cell.backup_tid < tid:
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset, dump(tid),
uuid_str(primary_node.getUUID()))
cell.getNode().getConnection().notify(p)
trigger_set.add(primary_node)
for node in trigger_set:
self.triggerBackup(node)
def invalidatePartitions(self, tid, partition_set):
app = self.app
prev_tid = app.getLastTransaction()
app.setLastTransaction(tid)
pt = app.pt
trigger_set = set()
untouched_dict = defaultdict(dict)
for offset in xrange(pt.getPartitions()):
try:
last_max_tid = self.tid_list[offset][-1]
except IndexError:
last_max_tid = prev_tid
if offset in partition_set:
self.tid_list[offset].append(tid)
node_list = []
for cell in pt.getCellList(offset, readable=True):
node = cell.getNode()
assert node.isConnected(), node
if cell.backup_tid == prev_tid:
# Let's given 4 TID t0,t1,t2,t3: if a cell is only
# modified by t0 & t3 and has all data for t0, 4 values
# are possible for its 'backup_tid' until it replicates
# up to t3: t0, t1, t2 or t3 - 1
# Choosing the smallest one (t0) is easier to implement
# but when leaving backup mode, we would always lose
# data if the last full transaction does not modify
# all partitions. t1 is wrong for the same reason.
# So we have chosen the highest one (t3 - 1).
# t2 should also work but maybe harder to implement.
cell.backup_tid = add64(tid, -1)
logging.debug(
"partition %u: updating backup_tid of %r to %s",
offset, cell, dump(cell.backup_tid))
else:
assert cell.backup_tid < last_max_tid, (
cell.backup_tid, last_max_tid, prev_tid, tid)
if app.isStorageReady(node.getUUID()):
node_list.append(node)
assert node_list
trigger_set.update(node_list)
# Make sure we have a primary storage for this partition.
if offset not in self.primary_partition_dict:
self.primary_partition_dict[offset] = \
random.choice(node_list)
else:
# Partition not touched, so increase 'backup_tid' of all
# "up-to-date" replicas, without having to replicate.
for cell in pt.getCellList(offset, readable=True):
if last_max_tid <= cell.backup_tid:
cell.backup_tid = tid
untouched_dict[cell.getNode()][offset] = None
elif last_max_tid <= cell.replicating:
# Same for 'replicating' to avoid useless orders.
logging.debug("silently update replicating order"
" of %s for partition %u, up to %s",
uuid_str(cell.getUUID()), offset, dump(tid))
cell.replicating = tid
for node, untouched_dict in untouched_dict.iteritems():
if app.isStorageReady(node.getUUID()):
node.notify(Packets.Replicate(tid, '', untouched_dict))
for node in trigger_set:
self.triggerBackup(node)
count = sum(map(len, self.tid_list))
if self.debug_tid_count < count:
logging.debug("Maximum number of tracked tids: %u", count)
self.debug_tid_count = count
def triggerBackup(self, node):
tid_list = self.tid_list
tid = self.app.getLastTransaction()
replicate_list = []
for offset, cell in self.app.pt.iterNodeCell(node):
max_tid = tid_list[offset]
if max_tid and self.primary_partition_dict[offset] is node and \
max(cell.backup_tid, cell.replicating) < max_tid[-1]:
cell.replicating = tid
replicate_list.append(offset)
if not replicate_list:
return
getCellList = self.pt.getCellList
source_dict = {}
address_set = set()
for offset in replicate_list:
cell_list = getCellList(offset, readable=True)
random.shuffle(cell_list)
assert cell_list, offset
for cell in cell_list:
addr = cell.getAddress()
if addr in address_set:
break
else:
address_set.add(addr)
source_dict[offset] = addr
logging.debug("ask %s to replicate partition %u up to %s from %r",
uuid_str(node.getUUID()), offset, dump(tid), addr)
node.getConnection().notify(Packets.Replicate(
tid, self.name, source_dict))
def notifyReplicationDone(self, node, offset, tid):
app = self.app
cell = app.pt.getCell(offset, node.getUUID())
tid_list = self.tid_list[offset]
if tid_list: # may be empty if the cell is out-of-date
# or if we're not fully initialized
if tid < tid_list[0]:
cell.replicating = tid
else:
try:
tid = add64(tid_list[bisect(tid_list, tid)], -1)
except IndexError:
last_tid = app.getLastTransaction()
if tid < last_tid:
tid = last_tid
node.notify(Packets.Replicate(tid, '', {offset: None}))
logging.debug("partition %u: updating backup_tid of %r to %s",
offset, cell, dump(tid))
cell.backup_tid = tid
# Forget tids we won't need anymore.
cell_list = app.pt.getCellList(offset, readable=True)
del tid_list[:bisect(tid_list, min(x.backup_tid for x in cell_list))]
primary_node = self.primary_partition_dict.get(offset)
primary = primary_node is node
result = None if primary else app.pt.setUpToDate(node, offset)
assert cell.isReadable()
if result: # was out-of-date
if primary_node is not None:
max_tid, = [x.backup_tid for x in cell_list
if x.getNode() is primary_node]
if tid < max_tid:
cell.replicating = max_tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(node.getUUID()), offset, dump(max_tid),
uuid_str(primary_node.getUUID()))
node.notify(Packets.Replicate(max_tid, '',
{offset: primary_node.getAddress()}))
else:
if app.getClusterState() == ClusterStates.BACKINGUP:
self.triggerBackup(node)
if primary:
# Notify secondary storages that they can replicate from
# primary ones, even if they are already replicating.
p = Packets.Replicate(tid, '', {offset: node.getAddress()})
for cell in cell_list:
if max(cell.backup_tid, cell.replicating) < tid:
cell.replicating = tid
logging.debug(
"ask %s to replicate partition %u up to %s from %s",
uuid_str(cell.getUUID()), offset,
dump(tid), uuid_str(node.getUUID()))
cell.getNode().notify(p)
return result
| gpl-2.0 | -8,403,937,226,814,069,000 | 45.685879 | 80 | 0.555 | false |
redhat-openstack/glance | glance/cmd/registry.py | 1 | 2664 | #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Reference implementation server for Glance Registry
"""
import eventlet
import os
import sys
# Monkey patch socket and time
eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True)
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
import osprofiler.notifier
import osprofiler.web
from glance.common import config
from glance.common import wsgi
from glance import notifier
from glance.openstack.common import log
from glance.openstack.common import systemd
CONF = cfg.CONF
CONF.import_group("profiler", "glance.common.wsgi")
def main():
try:
config.parse_args()
wsgi.set_eventlet_hub()
log.setup('glance')
if cfg.CONF.profiler.enabled:
_notifier = osprofiler.notifier.create("Messaging",
notifier.messaging, {},
notifier.get_transport(),
"glance", "registry",
cfg.CONF.bind_host)
osprofiler.notifier.set(_notifier)
else:
osprofiler.web.disable()
server = wsgi.Server()
server.start(config.load_paste_app('glance-registry'),
default_port=9191)
systemd.notify_once()
server.wait()
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
| apache-2.0 | -6,918,632,415,636,315,000 | 32.3 | 78 | 0.626502 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_vpn_server_configurations_operations.py | 1 | 28218 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnServerConfigurationsOperations:
"""VpnServerConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
**kwargs
) -> "_models.VpnServerConfiguration":
"""Retrieves the details of a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being retrieved.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.VpnServerConfiguration",
**kwargs
) -> "_models.VpnServerConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'VpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.VpnServerConfiguration",
**kwargs
) -> AsyncLROPoller["_models.VpnServerConfiguration"]:
"""Creates a VpnServerConfiguration resource if it doesn't exist else updates the existing
VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being created or
updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to create or update
VpnServerConfiguration.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
vpn_server_configuration_parameters=vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
vpn_server_configuration_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VpnServerConfiguration":
"""Updates VpnServerConfiguration tags.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being updated.
:type vpn_server_configuration_name: str
:param vpn_server_configuration_parameters: Parameters supplied to update
VpnServerConfiguration tags.
:type vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.VpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_server_configuration_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
vpn_server_configuration_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a VpnServerConfiguration.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:param vpn_server_configuration_name: The name of the VpnServerConfiguration being deleted.
:type vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vpn_server_configuration_name=vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vpnServerConfigurationName': self._serialize.url("vpn_server_configuration_name", vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations/{vpnServerConfigurationName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVpnServerConfigurationsResult"]:
"""Lists all the vpnServerConfigurations in a resource group.
:param resource_group_name: The resource group name of the VpnServerConfiguration.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVpnServerConfigurationsResult"]:
"""Lists all the VpnServerConfigurations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.ListVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnServerConfigurations'} # type: ignore
| mit | 9,197,678,067,185,579,000 | 50.871324 | 215 | 0.658941 | false |
adusca/treeherder | treeherder/perf/models.py | 1 | 2417 | from django.core.validators import MinLengthValidator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from treeherder.model.models import (MachinePlatform,
OptionCollection,
Repository)
SIGNATURE_HASH_LENGTH = 40L
@python_2_unicode_compatible
class PerformanceFramework(models.Model):
name = models.SlugField(max_length=255L, unique=True)
class Meta:
db_table = 'performance_framework'
def __str__(self):
return self.name
@python_2_unicode_compatible
class PerformanceSignature(models.Model):
signature_hash = models.CharField(max_length=SIGNATURE_HASH_LENGTH,
validators=[
MinLengthValidator(SIGNATURE_HASH_LENGTH)
],
unique=True,
db_index=True)
framework = models.ForeignKey(PerformanceFramework)
platform = models.ForeignKey(MachinePlatform)
option_collection = models.ForeignKey(OptionCollection)
suite = models.CharField(max_length=80L)
test = models.CharField(max_length=80L, blank=True)
# extra properties to distinguish the test (that don't fit into
# option collection for whatever reason)
extra_properties = JSONField(max_length=1024)
class Meta:
db_table = 'performance_signature'
def __str__(self):
return self.signature_hash
@python_2_unicode_compatible
class PerformanceDatum(models.Model):
repository = models.ForeignKey(Repository)
job_id = models.PositiveIntegerField(db_index=True)
result_set_id = models.PositiveIntegerField(db_index=True)
signature = models.ForeignKey(PerformanceSignature)
value = models.FloatField()
push_timestamp = models.DateTimeField(db_index=True)
class Meta:
db_table = 'performance_datum'
index_together = [('repository', 'signature', 'push_timestamp'),
('repository', 'job_id'),
('repository', 'result_set_id')]
unique_together = ('repository', 'job_id', 'result_set_id',
'signature', 'push_timestamp')
def __str__(self):
return "{} {}".format(self.value, self.push_timestamp)
| mpl-2.0 | -5,983,352,198,108,781,000 | 33.042254 | 83 | 0.620604 | false |
lovetox/gajim | src/common/crypto.py | 1 | 4823 | # common crypto functions (mostly specific to XEP-0116, but useful elsewhere)
# -*- coding:utf-8 -*-
## src/common/crypto.py
##
## Copyright (C) 2007 Brendan Taylor <whateley AT gmail.com>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import sys
import os
import math
from hashlib import sha256 as SHA256
# convert a large integer to a big-endian bitstring
def encode_mpi(n):
if n >= 256:
return encode_mpi(n // 256) + bytes([n % 256])
else:
return bytes([n])
# convert a large integer to a big-endian bitstring, padded with \x00s to
# a multiple of 16 bytes
def encode_mpi_with_padding(n):
return pad_to_multiple(encode_mpi(n), 16, '\x00', True)
# pad 'string' to a multiple of 'multiple_of' with 'char'.
# pad on the left if 'left', otherwise pad on the right.
def pad_to_multiple(string, multiple_of, char, left):
mod = len(string) % multiple_of
if mod == 0:
return string
else:
padding = (multiple_of - mod) * char
if left:
return padding + string
else:
return string + padding
# convert a big-endian bitstring to an integer
def decode_mpi(s):
if len(s) == 0:
return 0
else:
return 256 * decode_mpi(s[:-1]) + s[-1]
def sha256(string):
sh = SHA256()
sh.update(string)
return sh.digest()
base28_chr = "acdefghikmopqruvwxy123456789"
def sas_28x5(m_a, form_b):
sha = sha256(m_a + form_b + b'Short Authentication String')
lsb24 = decode_mpi(sha[-3:])
return base28(lsb24)
def base28(n):
if n >= 28:
return base28(n // 28) + base28_chr[n % 28]
else:
return base28_chr[n]
def add_entropy_sources_OpenSSL():
# Other possibly variable data. This are very low quality sources of
# entropy, but some of them are installation dependent and can be hard
# to guess for the attacker.
# Data available on all platforms Unix, Windows
sources = [sys.argv, sys.builtin_module_names,
sys.copyright, sys.getfilesystemencoding(), sys.hexversion,
sys.modules, sys.path, sys.version, sys.api_version,
os.environ, os.getcwd(), os.getpid()]
for s in sources:
OpenSSL.rand.add(str(s).encode('utf-8'), 1)
# On Windows add the current contents of the screen to the PRNG state.
# if os.name == 'nt':
# OpenSSL.rand.screen()
# The /proc filesystem on POSIX systems contains many random variables:
# memory statistics, interrupt counts, network packet counts
if os.name == 'posix':
dirs = ['/proc', '/proc/net', '/proc/self']
for d in dirs:
if os.access(d, os.R_OK):
for filename in os.listdir(d):
OpenSSL.rand.add(filename.encode('utf-8'), 0)
try:
with open(d + os.sep + filename, "r") as fp:
# Limit the ammount of read bytes, in case a memory
# file was opened
OpenSSL.rand.add(str(fp.read(5000)).encode('utf-8'),
1)
except:
# Ignore all read and access errors
pass
PYOPENSSL_PRNG_PRESENT = False
try:
import OpenSSL.rand
PYOPENSSL_PRNG_PRESENT = True
except ImportError:
# PyOpenSSL PRNG not available
pass
def random_bytes(bytes_):
if PYOPENSSL_PRNG_PRESENT:
OpenSSL.rand.add(os.urandom(bytes_), bytes_)
return OpenSSL.rand.bytes(bytes_)
else:
return os.urandom(bytes_)
def generate_nonce():
return random_bytes(8)
# generate a random number between 'bottom' and 'top'
def srand(bottom, top):
# minimum number of bytes needed to represent that range
bytes = int(math.ceil(math.log(top - bottom, 256)))
# in retrospect, this is horribly inadequate.
return (decode_mpi(random_bytes(bytes)) % (top - bottom)) + bottom
# a faster version of (base ** exp) % mod
# taken from <http://lists.danga.com/pipermail/yadis/2005-September/001445.html>
def powmod(base, exp, mod):
square = base % mod
result = 1
while exp > 0:
if exp & 1: # exponent is odd
result = (result * square) % mod
square = (square * square) % mod
exp //= 2
return result
| gpl-3.0 | 1,936,273,987,310,581,000 | 30.940397 | 94 | 0.6243 | false |
python-dirbtuves/it-brandos-egzaminai | exams/E2018/pagrindinis/u2/u2.py | 1 | 1377 | from itertools import islice
from pathlib import Path
from typing import Dict
def seconds(v: int, m: int, s: int) -> int:
# Ši funkcija verčia valandas, minutes ir sekundes į sekundes.
return v * 3600 + m * 60 + s
def save_results(path: Path, pabaiga: Dict[str, int]) -> None:
with path.open('w') as f:
# Rūšiuojame slidininkus pagal laiką ir vardus.
for laikas, slidininkas in sorted((v, k) for k, v in pabaiga.items()):
# Sekundes verčiame į minutes ir sekundes.
m, s = divmod(laikas, 60)
print(f'{slidininkas:<20}{m} {s}', file=f)
def main(path: Path) -> None:
startas: Dict[str, int] = {}
pabaiga: Dict[str, int] = {}
with open(path / 'U2.txt') as f:
# Skaitome starto duomenis.
n = int(next(f))
for eilute in islice(f, n):
slidininkas = eilute[:20]
laikas = map(int, eilute[20:].split())
startas[slidininkas] = seconds(*laikas)
# Skaitome finišo duomenis.
m = int(next(f))
for eilute in islice(f, m):
slidininkas = eilute[:20]
laikas = map(int, eilute[20:].split())
# Įsimename per kiek laiko sekundėmis slidininkas pasiekė finišą.
pabaiga[slidininkas] = seconds(*laikas) - startas[slidininkas]
save_results(path / 'U2rez.txt', pabaiga)
| agpl-3.0 | -4,870,201,727,215,256,000 | 33.075 | 78 | 0.590609 | false |
Woraufhin/logic | formula.py | 1 | 1112 | import itertools
import string
from abc import ABCMeta, abstractproperty
import attr
def is_valid_formula(inst, attr, value):
if not isinstance(value, (Formula, str)):
raise ValueError('{} is not a valid formula type.'.format(value))
class Formula(object):
__metaclass__ = ABCMeta
group = {'open': '(', 'close': ')'}
@abstractproperty
def token(self):
pass
@attr.s
class Atomic(Formula):
token = list(itertools.chain.from_iterable(
[string.uppercase, string.lowercase]))
exp = attr.ib(validator=is_valid_formula)
@attr.s
class And(Formula):
token = ['^', '&']
left = attr.ib(validator=is_valid_formula)
right = attr.ib(validator=is_valid_formula)
@attr.s
class Or(Formula):
token = ['|']
left = attr.ib(validator=is_valid_formula)
right = attr.ib(validator=is_valid_formula)
@attr.s
class Imply(Formula):
token = ['>']
left = attr.ib(validator=is_valid_formula)
right = attr.ib(validator=is_valid_formula)
@attr.s
class Not(Formula):
token = ['~']
exp = attr.ib(validator=is_valid_formula)
| mit | 1,520,875,086,732,139,300 | 19.592593 | 73 | 0.642986 | false |
googleapis/python-dataflow-client | google/cloud/dataflow_v1beta3/types/snapshots.py | 1 | 5677 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.dataflow.v1beta3",
manifest={
"SnapshotState",
"PubsubSnapshotMetadata",
"Snapshot",
"GetSnapshotRequest",
"DeleteSnapshotRequest",
"DeleteSnapshotResponse",
"ListSnapshotsRequest",
"ListSnapshotsResponse",
},
)
class SnapshotState(proto.Enum):
r"""Snapshot state."""
UNKNOWN_SNAPSHOT_STATE = 0
PENDING = 1
RUNNING = 2
READY = 3
FAILED = 4
DELETED = 5
class PubsubSnapshotMetadata(proto.Message):
r"""Represents a Pubsub snapshot.
Attributes:
topic_name (str):
The name of the Pubsub topic.
snapshot_name (str):
The name of the Pubsub snapshot.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
The expire time of the Pubsub snapshot.
"""
topic_name = proto.Field(proto.STRING, number=1,)
snapshot_name = proto.Field(proto.STRING, number=2,)
expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
class Snapshot(proto.Message):
r"""Represents a snapshot of a job.
Attributes:
id (str):
The unique ID of this snapshot.
project_id (str):
The project this snapshot belongs to.
source_job_id (str):
The job this snapshot was created from.
creation_time (google.protobuf.timestamp_pb2.Timestamp):
The time this snapshot was created.
ttl (google.protobuf.duration_pb2.Duration):
The time after which this snapshot will be
automatically deleted.
state (google.cloud.dataflow_v1beta3.types.SnapshotState):
State of the snapshot.
pubsub_metadata (Sequence[google.cloud.dataflow_v1beta3.types.PubsubSnapshotMetadata]):
PubSub snapshot metadata.
description (str):
User specified description of the snapshot.
Maybe empty.
disk_size_bytes (int):
The disk byte size of the snapshot. Only
available for snapshots in READY state.
region (str):
Cloud region where this snapshot lives in,
e.g., "us-central1".
"""
id = proto.Field(proto.STRING, number=1,)
project_id = proto.Field(proto.STRING, number=2,)
source_job_id = proto.Field(proto.STRING, number=3,)
creation_time = proto.Field(
proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
)
ttl = proto.Field(proto.MESSAGE, number=5, message=duration_pb2.Duration,)
state = proto.Field(proto.ENUM, number=6, enum="SnapshotState",)
pubsub_metadata = proto.RepeatedField(
proto.MESSAGE, number=7, message="PubsubSnapshotMetadata",
)
description = proto.Field(proto.STRING, number=8,)
disk_size_bytes = proto.Field(proto.INT64, number=9,)
region = proto.Field(proto.STRING, number=10,)
class GetSnapshotRequest(proto.Message):
r"""Request to get information about a snapshot
Attributes:
project_id (str):
The ID of the Cloud Platform project that the
snapshot belongs to.
snapshot_id (str):
The ID of the snapshot.
location (str):
The location that contains this snapshot.
"""
project_id = proto.Field(proto.STRING, number=1,)
snapshot_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
class DeleteSnapshotRequest(proto.Message):
r"""Request to delete a snapshot.
Attributes:
project_id (str):
The ID of the Cloud Platform project that the
snapshot belongs to.
snapshot_id (str):
The ID of the snapshot.
location (str):
The location that contains this snapshot.
"""
project_id = proto.Field(proto.STRING, number=1,)
snapshot_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
class DeleteSnapshotResponse(proto.Message):
r"""Response from deleting a snapshot. """
class ListSnapshotsRequest(proto.Message):
r"""Request to list snapshots.
Attributes:
project_id (str):
The project ID to list snapshots for.
job_id (str):
If specified, list snapshots created from
this job.
location (str):
The location to list snapshots in.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=3,)
location = proto.Field(proto.STRING, number=2,)
class ListSnapshotsResponse(proto.Message):
r"""List of snapshots.
Attributes:
snapshots (Sequence[google.cloud.dataflow_v1beta3.types.Snapshot]):
Returned snapshots.
"""
snapshots = proto.RepeatedField(proto.MESSAGE, number=1, message="Snapshot",)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 4,473,963,810,072,339,500 | 31.815029 | 95 | 0.648758 | false |
invariantor/ImageSplit-Classification | image split and classification/image_split.py | 1 | 6276 | import numpy as np
import pylab
import mahotas as mh
import types
# constants
upper_distance = 100 #the start searching
approxWidth = 40
threshold = 300
border = 1
def pre_process(image):
"""
pre_process will return black_white image, given a colorful image as input.
"""
T = mh.thresholding.otsu(image)
image1 =image > T
image2 = [[0]* image1.shape[1] for i in range(image1.shape[0])]
for i in range(image1.shape[0]):
for j in range(image1.shape[1]):
if (image1[i][j] != [0,0,0]).any():
image2[i][j] = 1
image2 = np.array(image2, dtype = np.uint8)
return image2
def locate(image):
"""
Given an screenshot as input, return the position of the matching game
as well as the size of the game(num_x,num_y)
and the size of each grids(size_x,size_y).
"""
image = pre_process(image)
height,width = image.shape
# stop going down when a grid is found
up = upper_distance
while True:
num_white =0
for j in range(width):
num_white+=image[up][j]
if num_white>(approxWidth/2):
break
up +=1
# stop going up when a grid is found
down = height-1
pre_num_white =0 #the number of white pixels in the last step
for j in range(width):
pre_num_white+=image[down][j]
while True:
num_white =0
for j in range(width):
num_white+=image[down][j]
if num_white-pre_num_white>(approxWidth/2):
break
pre_num_white = num_white
down -=1
current_image = image[up:]
"""cut the top part(including the time bar, all sorts of buttons) away
which will interfere with our searching process"""
current_image = np.array(current_image)
c_height,c_width = current_image.shape
# stop going right when a grid is found
left = 0
pre_num_white =0
for i in range(c_height):
pre_num_white+=current_image[i][left]
while True:
num_white =0
for i in range(c_height):
num_white+=current_image[i][left]
if num_white-pre_num_white>(approxWidth/2):
break
pre_num_white = num_white
left +=1
# stop going left when a grid is found
right = c_width-1
pre_num_white =0
for i in range(c_height):
pre_num_white+=current_image[i][right]
while True:
num_white =0
for i in range(c_height):
num_white+=current_image[i][right]
if num_white-pre_num_white>(approxWidth/2):
break
pre_num_white = num_white
right -=1
temp = [0]*(down+1-up)
for i in range(len(temp)):
temp[i] = current_image[i][left:right+1]
current_image = np.array(temp)
height,width = current_image.shape
divd_x = []
for i in range(height):
num_white = sum(current_image[i])
if num_white < approxWidth/2:
divd_x.append(i)
temp_x = [divd_x[i] for i in range(len(divd_x)) if ((i==0) or (i==len(divd_x)-1)) or not (divd_x[i-1]+1==divd_x[i] and divd_x[i+1]-1==divd_x[i])]
# only keep the truly dividing lines, namely those marginal lines.
divd_x =temp_x
divd_y = []
for j in range(width):
num_white = 0
for i in range(height):
num_white += current_image[i][j]
if num_white < approxWidth/2:
divd_y.append(j)
temp_y = [divd_y[i] for i in range(len(divd_y)) if ((i==0) or (i==len(divd_y)-1)) or not (divd_y[i-1]+1==divd_y[i] and divd_y[i+1]-1==divd_y[i])]
# only keep the truly dividing lines, namely those marginal lines.
divd_y = temp_y
#print divd_x
#print divd_y
"""
This part needs further refinement.
"""
if len(divd_x):
size_x = divd_x[0]
num_x = divd_x[-1] / size_x +1
else:
size_x = height - 1
num_x = 1
if len(divd_y):
size_y = divd_y[0]
num_y = divd_y[-1] / size_y +1
else:
size_y = height - 1
num_y = 1
position = (up,down,left,right)
info = (size_x,size_y,num_x,num_y)
return position, info
def split(image,position,info):
"""
Return a 2d matrix label, which labels different kinds of grids using natural numbers.
(By convention, the empty grid is labeled 0)
"""
size_x, size_y, num_x, num_y = info
up, down, left, right = position
T = mh.thresholding.otsu(image)
image = image >T
temp = [0]* (down+1-up)
for i in range(len(temp)):
temp[i] = image[up+i][left:right+1]
temp = np.array(temp)
image = temp
game = [[0]* num_y for j in range(num_x)]
for i in range(num_x):
for j in range(num_y):
grid = [0]* size_x
for k in range(size_x):
grid[k] = image[i*(size_x+1)+k][j*(size_y+1):(j+1)*(size_y+1)-1]
game[i][j] = grid
# using a quite naive method -- calculating the statistical distance between two grids
# improvement is needed here, to speed up the program
black = [[[0]*3]*size_y]*size_x
records = [black]
label = [[0]* num_y for j in range(num_x)]
for i in range(num_x):
for j in range(num_y):
find = False
for index in range(len(records)):
if distance(records[index],game[i][j])< threshold:
label[i][j] = index
find =True
break
if not find:
records.append(game[i][j])
label[i][j] = len(records)-1
return label
def distance(a1,a2):
"""
recursively calculate the distance between a1 and a2
"""
if (type(a1)== np.uint8) or (type(a1) == types.IntType) or (type(a1)==np.bool_):
return abs(int(a1)-int(a2))
if len(a1)!= len(a2):
print "Wrong Format","len(a1)=",len(a1),"len(a2)=",len(a2)
return
dis =0
for i in range(len(a1)):
dis += distance(a1[i],a2[i])
return dis | mit | -6,850,167,160,207,502,000 | 28.608491 | 149 | 0.53362 | false |
Maselkov/GW2Bot | guildwars2/evtc.py | 1 | 12990 | import datetime
import aiohttp
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from .exceptions import APIError
from .utils.chat import (embed_list_lines, en_space, magic_space,
zero_width_space)
UTC_TZ = datetime.timezone.utc
BASE_URL = "https://dps.report/"
UPLOAD_URL = BASE_URL + "uploadContent"
JSON_URL = BASE_URL + "getJson"
TOKEN_URL = BASE_URL + "getUserToken"
ALLOWED_FORMATS = (".evtc", ".zevtc", ".zip")
class EvtcMixin:
async def get_dpsreport_usertoken(self, user):
doc = await self.bot.database.get(user, self)
token = doc.get("dpsreport_token")
if not token:
try:
async with self.session.get(TOKEN_URL) as r:
data = await r.json()
token = data["userToken"]
await self.bot.database.set(
user, {"dpsreport_token": token}, self)
return token
except:
return None
async def upload_log(self, file, user):
params = {"json": 1}
token = await self.get_dpsreport_usertoken(user)
if token:
params["userToken"] = token
data = aiohttp.FormData()
data.add_field("file", await file.read(), filename=file.filename)
async with self.session.post(
UPLOAD_URL, data=data, params=params) as r:
resp = await r.json()
error = resp["error"]
if error:
raise APIError(error)
return resp
async def find_duplicate_dps_report(self, doc):
margin_of_error = datetime.timedelta(seconds=10)
doc = await self.db.encounters.find_one({
"boss_id": doc["boss_id"],
"players": {
"$eq": doc["players"]
},
"date": {
"$gte": doc["date"] - margin_of_error,
"$lt": doc["date"] + margin_of_error
},
"start_date": {
"$gte": doc["start_date"] - margin_of_error,
"$lt": doc["start_date"] + margin_of_error
},
})
return True if doc else False
async def upload_embed(self, ctx, result):
if not result["encounter"]["jsonAvailable"]:
return None
async with self.session.get(
JSON_URL, params={"id": result["id"]}) as r:
data = await r.json()
lines = []
targets = data["phases"][0]["targets"]
group_dps = 0
for target in targets:
group_dps += sum(
p["dpsTargets"][target][0]["dps"] for p in data["players"])
def get_graph(percentage):
bar_count = round(percentage / 5)
bars = ""
bars += "▀" * bar_count
bars += "━" * (20 - bar_count)
return bars
def get_dps(player):
bars = ""
dps = player["dps"]
if not group_dps or not dps:
percentage = 0
else:
percentage = round(100 / group_dps * dps)
bars = get_graph(percentage)
bars += f"` **{dps}** DPS | **{percentage}%** of group DPS"
return bars
players = []
for player in data["players"]:
dps = 0
for target in targets:
dps += player["dpsTargets"][target][0]["dps"]
player["dps"] = dps
players.append(player)
players.sort(key=lambda p: p["dps"], reverse=True)
for player in players:
down_count = player["defenses"][0]["downCount"]
prof = self.get_emoji(ctx, player["profession"])
line = f"{prof} **{player['name']}** *({player['account']})*"
if down_count:
line += (f" | {self.get_emoji(ctx, 'downed')}Downed "
f"count: **{down_count}**")
lines.append(line)
dpses = []
charater_name_max_length = 19
for player in players:
line = self.get_emoji(ctx, player["profession"])
align = (charater_name_max_length - len(player["name"])) * " "
line += "`" + player["name"] + align + get_dps(player)
dpses.append(line)
dpses.append(f"> Group DPS: **{group_dps}**")
color = discord.Color.green(
) if data["success"] else discord.Color.red()
minutes, seconds = data["duration"].split()[:2]
minutes = int(minutes[:-1])
seconds = int(seconds[:-1])
duration_time = (minutes * 60) + seconds
duration = f"**{minutes}** minutes, **{seconds}** seconds"
embed = discord.Embed(
title="DPS Report",
description="Encounter duration: " + duration,
url=result["permalink"],
color=color)
boss_lines = []
for target in targets:
target = data["targets"][target]
if data["success"]:
health_left = 0
else:
percent_burned = target["healthPercentBurned"]
health_left = 100 - percent_burned
health_left = round(health_left, 2)
if len(targets) > 1:
boss_lines.append(f"**{target['name']}**")
boss_lines.append(f"Health: **{health_left}%**")
boss_lines.append(get_graph(health_left))
embed.add_field(name="> **BOSS**", value="\n".join(boss_lines))
buff_lines = []
sought_buffs = ["Might", "Fury", "Quickness", "Alacrity"]
buffs = []
for buff in sought_buffs:
for key, value in data["buffMap"].items():
if value["name"] == buff:
buffs.append({
"name": value["name"],
"id": int(key[1:]),
"stacking": value["stacking"]
})
break
separator = 2 * en_space
line = zero_width_space + (en_space * (charater_name_max_length + 6))
for buff in sought_buffs:
line += self.get_emoji(
ctx, buff, fallback=True,
fallback_fmt="{:1.1}") + f"{separator}{2 * en_space}"
buff_lines.append(line)
groups = []
for player in players:
if player["group"] not in groups:
groups.append(player["group"])
if len(groups) > 1:
players.sort(key=lambda p: p["group"])
current_group = None
for player in players:
if "buffUptimes" not in player:
continue
if len(groups) > 1:
if not current_group or player["group"] != current_group:
current_group = player["group"]
buff_lines.append(f"> **GROUP {current_group}**")
line = "`"
line = self.get_emoji(ctx, player["profession"])
align = (3 + charater_name_max_length - len(player["name"])) * " "
line += "`" + player["name"] + align
for buff in buffs:
for buff_uptime in player["buffUptimes"]:
if buff["id"] == buff_uptime["id"]:
uptime = str(buff_uptime["buffData"][0]["uptime"])
break
else:
uptime = "0"
if not buff["stacking"]:
uptime += "%"
line += uptime
line += separator + ((6 - len(uptime)) * magic_space)
line += '`'
buff_lines.append(line)
embed = embed_list_lines(embed, lines, "> **PLAYERS**")
embed = embed_list_lines(embed, dpses, "> **DPS**")
embed = embed_list_lines(embed, buff_lines, "> **BUFFS**")
boss = self.gamedata["bosses"].get(str(result["encounter"]["bossId"]))
date_format = "%Y-%m-%d %H:%M:%S %z"
date = datetime.datetime.strptime(data["timeEnd"] + "00", date_format)
start_date = datetime.datetime.strptime(data["timeStart"] + "00",
date_format)
date = date.astimezone(datetime.timezone.utc)
start_date = start_date.astimezone(datetime.timezone.utc)
doc = {
"boss_id": result["encounter"]["bossId"],
"start_date": start_date,
"date": date,
"players":
sorted([player["account"] for player in data["players"]]),
"permalink": result["permalink"],
"success": data["success"],
"duration": duration_time
}
duplicate = await self.find_duplicate_dps_report(doc)
if not duplicate:
await self.db.encounters.insert_one(doc)
embed.timestamp = date
embed.set_footer(text="Recorded at", icon_url=self.bot.user.avatar_url)
if boss:
embed.set_author(name=data["fightName"], icon_url=boss["icon"])
return embed
@commands.group(case_insensitive=True)
async def evtc(self, ctx):
"""Process an EVTC combat log or enable automatic processing
Simply upload your file and in the "add a comment" field type $evtc,
in other words invoke this command while uploading a file.
Use this command ($evtc) without uploading a file to see other commands
Accepted formats are: .evtc, .zevtc, .zip
It's highly recommended to enable compression in your Arc settings.
With the setting enabled logs sized will rarely, if ever, be higher
than the Discord upload limit
"""
if ctx.invoked_subcommand is None and not ctx.message.attachments:
return await ctx.send_help(ctx.command)
for attachment in ctx.message.attachments:
if attachment.filename.endswith(ALLOWED_FORMATS):
break
else:
return await ctx.send_help(ctx.command)
if ctx.guild:
doc = await self.bot.database.get(ctx.channel, self)
settings = doc.get("evtc", {})
enabled = settings.get("enabled")
if not ctx.channel.permissions_for(ctx.me).embed_links:
return await ctx.send(
"I need embed links permission to process logs.")
if enabled:
return
await self.process_evtc(ctx.message)
@commands.cooldown(1, 5, BucketType.guild)
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
@evtc.command(name="channel")
async def evtc_channel(self, ctx):
"""Sets this channel to be automatically used to process logs"""
doc = await self.bot.database.get(ctx.channel, self)
enabled = not doc.get("evtc.enabled", False)
await self.bot.database.set(ctx.channel, {"evtc.enabled": enabled},
self)
if enabled:
msg = ("Automatic EVTC processing enabled. Simply upload the file "
"wish to be processed in this channel. Accepted "
"formats: `.evtc`, `.zevtc`, `.zip` ")
if not ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send("I won't be able to process logs without Embed "
"Links permission.")
else:
msg = ("Automatic EVTC processing diasbled")
await ctx.send(msg)
async def process_evtc(self, message):
embeds = []
prompt = await message.channel.send("Processing logs... " +
self.get_emoji(message, "loading"))
for attachment in message.attachments:
if attachment.filename.endswith(ALLOWED_FORMATS):
try:
resp = await self.upload_log(attachment, message.author)
embeds.append(await self.upload_embed(message, resp))
except Exception as e:
self.log.exception(
"Exception processing EVTC log ", exc_info=e)
return await prompt.edit(
content="Error processing your log! :x:")
for embed in embeds:
await message.channel.send(embed=embed)
try:
await prompt.delete()
await message.delete()
except discord.HTTPException:
pass
@commands.Cog.listener()
async def on_message(self, message):
if not message.attachments:
return
if not message.guild:
return
for attachment in message.attachments:
if attachment.filename.endswith(ALLOWED_FORMATS):
break
else:
return
doc = await self.bot.database.get(message.channel, self)
settings = doc.get("evtc", {})
enabled = settings.get("enabled")
if not enabled:
return
await self.process_evtc(message)
| mit | -8,968,317,482,185,168,000 | 39.702194 | 79 | 0.522027 | false |
juhnowski/FishingRod | production/pygsl-0.9.5/tests/block_test.py | 1 | 60228 | #!/usr/bin/env python
# Author : Pierre Schnizer
import types
import tempfile
import pygsl
import pygsl._numobj as nummodule
from pygsl import vector, ArrayType
from pygsl import matrix_pierre
matrix = matrix_pierre
from pygsl import _block, get_typecode
from array_check import myord, myorda, array_check
import unittest
import sys
sys.stderr = sys.stdout
#pygsl.set_debug_level(10)
def getopentmpfile(mode='rb'):
file = tempfile.TemporaryFile(mode)
assert(type(file.file) == types.FileType)
return file.file
class _DefaultTestCase(unittest.TestCase):
_type = ''
_base = None
_reference_value = 137
#_retrieve = None
def setUp(self):
#print "Testing class ", self.__class__.__name__
sys.stdout.flush()
sys.stderr.flush()
self._mysetUp()
def _get_reference_value(self):
return self._reference_value
def _get_format(self):
return self._format
def _get_function_direct(self, suffix=None):
"""
translate some prefix to the full qualified name of the block
"""
if suffix == None:
suffix = self.function
if self._type == '':
tmp = '_'
else:
tmp = '_' + self._type + '_'
# base is matrix or vector or .....
assert self._base != None, 'Use a derived class!'
base = self._base
function = eval('_block.gsl_' + base + tmp + suffix)
return function
def _get_function_ui(self, suffix=None):
"""
Get the method to the underlying function from the UI.
"""
if suffix == None:
suffix = self.function
if self._type == '': tmp = '.'
else:
tmp = '.' + self._type + '.'
# base is matrix or vector or .....
assert self._base != None, 'Use a derived class!'
base = self._base
function = eval(base + tmp + suffix)
return function
def _get_function(self, suffix=None):
if self._retrieve == 'direct':
return self._get_function_direct(suffix)
elif self._retrieve == 'UI':
return self._get_function_ui(suffix)
else:
tmp = str(self._retrieve)
raise ValueError, "Unknown switch for _retrieve: " + tmp
def test_0_matrixtype(self):
test = 0
try:
assert type(self.array) == ArrayType, "Not an array type"
test = 1
finally:
if test == 0:
print "Expected a type of %s but got a type of %s" %(ArrayType, type(self.array))
def tearDown(self):
self._mytearDown()
class _DirectAccess:
_retrieve = 'direct'
class _UIAccess:
_retrieve = 'UI'
class _DefaultMatrixTestCase(_DefaultTestCase):
_base = 'matrix'
class _DefaultVectorTestCase(_DefaultTestCase):
_base = 'vector'
class _DoubleMatrixTestCase(_DefaultMatrixTestCase):
_type = ''
_format = '%f'
class _FloatMatrixTestCase(_DefaultMatrixTestCase):
_type = 'float'
_format = '%f'
class _ComplexMatrixTestCase(_DefaultMatrixTestCase):
_type = 'complex'
_format = '%f'
class _ComplexFloatMatrixTestCase(_DefaultMatrixTestCase):
_type = 'complex_float'
_format = '%f'
class _LongMatrixTestCase(_DefaultMatrixTestCase):
_type = 'long'
_format = '%ld'
class _IntMatrixTestCase(_DefaultMatrixTestCase):
_type = 'int'
_format = '%d'
class _ShortMatrixTestCase(_DefaultMatrixTestCase):
_type = 'short'
_format = '%d'
class _CharMatrixTestCase(_DefaultMatrixTestCase):
_type = 'char'
_format = '%c'
class _DoubleVectorTestCase(_DefaultVectorTestCase):
_type = ''
_format = '%f'
class _FloatVectorTestCase(_DefaultVectorTestCase):
_type = 'float'
_format = '%f'
class _ComplexVectorTestCase(_DefaultVectorTestCase):
_type = 'complex'
_format = '%f'
class _ComplexFloatVectorTestCase(_DefaultVectorTestCase):
_type = 'complex_float'
_format = '%f'
class _LongVectorTestCase(_DefaultVectorTestCase):
_type = 'long'
_format = '%ld'
class _IntVectorTestCase(_DefaultVectorTestCase):
_type = 'int'
_format = '%d'
class _ShortVectorTestCase(_DefaultVectorTestCase):
_type = 'short'
_format = '%d'
class _CharVectorTestCase(_DefaultVectorTestCase):
_type = 'char'
_format = '%c'
_reference_value = chr(137)
class _SetIdentityMatrixTestCase(_DefaultMatrixTestCase):
function = 'set_identity'
size = 10
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size))
def test_1_matrixsize(self):
array_check(self.array, None, (self.size, self.size))
def test_2_diagonale(self):
for i in range(self.size):
assert self.array[i,i] == 1, "Diagonale not one !"
def test_3_diagonale(self):
for i in range(self.size):
for j in range(self.size):
if i == j :
continue
assert self.array[i,j] == 0, "Of Diagonale not zero!"
def _mytearDown(self):
del self.array
self.array = None
class SetIdentityMatrixTestCase(_DoubleMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityMatrixUITestCase(_DoubleMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityFloatMatrixTestCase(_FloatMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityComplexMatrixTestCase(_ComplexMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityComplexFloatMatrixTestCase(_ComplexFloatMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityLongMatrixTestCase(_LongMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityIntMatrixTestCase(_IntMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityShortMatrixTestCase(_ShortMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityFloatMatrixUITestCase(_FloatMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityComplexMatrixUITestCase(_ComplexMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityComplexFloatMatrixUITestCase(_ComplexFloatMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityLongMatrixUITestCase(_LongMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityIntMatrixUITestCase(_IntMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityShortMatrixUITestCase(_ShortMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
pass
class SetIdentityCharMatrixTestCase(_CharMatrixTestCase,
_UIAccess,
_SetIdentityMatrixTestCase,
):
def test_2_diagonale(self):
for i in range(self.size):
assert myord(self.array[i,i][0]) == 1, "Diagonale not one !"
def test_3_diagonale(self):
for i in range(self.size):
for j in range(self.size):
if i == j :
continue
test = 0
try:
assert myord(self.array[i,j][0]) == 0, "Of Diagonale not zero!"
test = 1
finally:
if test == 0:
print self.array
print self.array[i,j]
class SetIdentityCharMatrixTestCase(_CharMatrixTestCase,
_DirectAccess,
_SetIdentityMatrixTestCase,
):
def test_2_diagonale(self):
for i in range(self.size):
assert myorda(self.array[i,i]) == 1, "Diagonale not one !"
def test_3_diagonale(self):
for i in range(self.size):
for j in range(self.size):
if i == j :
continue
assert myorda(self.array[i,j]) == 0, "Of Diagonale not zero!"
class _SetZeroMatrixTestCase(_DefaultMatrixTestCase):
function = 'set_zero'
size = 10
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size))
def test_1_matrixsize(self):
array_check(self.array, None, (self.size, self.size))
def test_2_all(self):
for i in range(self.size):
for j in range(self.size):
assert self.array[i,j] == 0, "Off Diagonale not zero!"
def test_2_isnull(self):
tmp = self._get_function('isnull')
test = 0
try:
a = tmp(self.array)
test = 1
finally:
if test == 0:
print self, tmp
assert tmp(self.array)
def _mytearDown(self):
del self.array
self.array = None
class SetZeroMatrixTestCase(_DoubleMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroFloatMatrixTestCase(_FloatMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroComplexMatrixTestCase(_ComplexMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroComplexFloatMatrixTestCase(_ComplexFloatMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroLongMatrixTestCase(_LongMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroIntMatrixTestCase(_IntMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroShortMatrixTestCase(_ShortMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroMatrixUITestCase(_DoubleMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroFloatMatrixUITestCase(_FloatMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroComplexMatrixUITestCase(_ComplexMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroComplexFloatMatrixUITestCase(_ComplexFloatMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroLongMatrixUITestCase(_LongMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroIntMatrixUITestCase(_IntMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroShortMatrixUITestCase(_ShortMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
pass
class SetZeroCharMatrixTestCase(_CharMatrixTestCase,
_DirectAccess,
_SetZeroMatrixTestCase,
):
def test_2_all(self):
for i in range(self.size):
for j in range(self.size):
test = 0
try:
assert myorda(self.array[i,j]) == 0, "Of Diagonale not zero!"
test = 1
finally:
if test == 0:
print repr(self.array[i,j])
class SetZeroCharMatrixUITestCase(_CharMatrixTestCase,
_UIAccess,
_SetZeroMatrixTestCase,
):
def test_2_all(self):
for i in range(self.size):
for j in range(self.size):
test = 0
try:
assert myorda(self.array[i,j]) == 0, "Of Diagonale not zero!"
test = 1
finally:
if test == 0:
print repr(self.array[i,j])
class _SetAllMatrixTestCase(_DefaultMatrixTestCase):
function = 'set_all'
size = 10
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size), self._get_reference_value())
def test_1_matrixsize(self):
array_check(self.array, None, (self.size, self.size))
def test_2_all(self):
for i in range(self.size):
for j in range(self.size):
assert self.array[i,j] == self._get_reference_value(), "Value not 137!"
def _mytearDown(self):
del self.array
self.array = None
class SetAllFloatMatrixTestCase(_FloatMatrixTestCase,
_DirectAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllComplexMatrixTestCase(_ComplexMatrixTestCase,
_DirectAccess,
_SetAllMatrixTestCase,
):
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size), self._get_reference_value()+0j)
class SetAllComplexFloatMatrixTestCase(_ComplexFloatMatrixTestCase,
_DirectAccess,
_SetAllMatrixTestCase,
):
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size), 137+0j)
class SetAllLongMatrixTestCase(_LongMatrixTestCase,
_DirectAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllIntMatrixTestCase(_IntMatrixTestCase,
_DirectAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllShortMatrixTestCase(_ShortMatrixTestCase,
_DirectAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllFloatMatrixUITestCase(_FloatMatrixTestCase,
_UIAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllComplexMatrixUITestCase(_ComplexMatrixTestCase,
_UIAccess,
_SetAllMatrixTestCase,
):
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size), 137+0j)
class SetAllComplexFloatMatrixUITestCase(_ComplexFloatMatrixTestCase,
_UIAccess,
_SetAllMatrixTestCase,
):
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size), 137+0j)
class SetAllLongMatrixUITestCase(_LongMatrixTestCase,
_UIAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllIntMatrixUITestCase(_IntMatrixTestCase,
_UIAccess,
_SetAllMatrixTestCase,
):
pass
class SetAllShortMatrixUITestCase(_ShortMatrixTestCase,
_UIAccess,
_SetAllMatrixTestCase,
):
pass
class _MatrixSetup:
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp((self.size, self.size), chr(137))
def test_2_all(self):
for i in range(self.size):
for j in range(self.size):
assert myorda(self.array[i,j]) == 137, "Of Diagonale not zero!"
class SetAllCharMatrixTestCase(_CharMatrixTestCase,
_DirectAccess,
_MatrixSetup,
_SetAllMatrixTestCase,
):
pass
class SetAllCharMatrixUITestCase(_CharMatrixTestCase,
_UIAccess,
_MatrixSetup,
_SetAllMatrixTestCase,
):
pass
class _DiagonalMatrixTestCase(_DefaultMatrixTestCase):
size = 4
def _mysetUp(self):
tmp = self._get_function('set_zero')
array = tmp((self.size, self.size))
type = get_typecode(array)
array = nummodule.zeros((self.size,self.size)).astype(type)
for i in range(self.size):
for j in range(self.size):
if i < j:
array[i,j] = -i
else:
array[i,j] = i
self.array = array
def test_1_matrixsize(self):
array_check(self.array, None, (self.size, self.size))
def _gettranspose(self):
function = self._get_function('transpose')
tmp = function(self.array)
assert(tmp[0] == 0)
return tmp[1]
def test_2_matrixsizetranspose(self):
tmp = self._gettranspose()
assert tmp.shape == (self.size, self.size), "Not of size 10, 10"
def test_3_diagonal(self):
function = self._get_function('diagonal')
tmp = function(self.array)
for i in range(self.size):
msg = "Error in getting diagonal! tmp[+"+`i`+"] = " + `tmp`
#assert tmp[i] == i, msg
def test_4_diagonaltranspose(self):
tmp = self._gettranspose()
for i in range(self.size):
msg = "Error in getting diagonal! tmp[+"+`i`+"] = " + `tmp`
#assert tmp[i,i] == i, msg
def test_5_super_diagonal(self):
function = self._get_function('superdiagonal')
for j in range(1,self.size):
tmp = function(self.array, j)
for i in range(self.size - j):
#assert tmp[i,j] == i*-1, "Error in getting super diagonal!"
pass
def test_6_super_diagonaltranspose(self):
function = self._get_function('superdiagonal')
array = self._gettranspose()
for j in range(1,self.size):
tmp = function(array, j)
for i in range(self.size - j):
msg = "Error in getting super diagonal! tmp[+"+`i`+"] = " + `tmp`
#assert tmp[i,j] == i*1+j, msg
def test_7_sub_diagonal(self):
function = self._get_function('subdiagonal')
for j in range(1,self.size):
tmp = function(self.array, j)
for i in range(self.size - j):
assert tmp[i] == i+j, "Error in getting sub diagonal!"
def _mytearDown(self):
del self.array
self.array = None
class DiagonaMatrixTestCase(_DoubleMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalFloatMatrixTestCase(_FloatMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalComplexMatrixTestCase(_ComplexMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalComplexFloatMatrixTestCase(_ComplexFloatMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalLongMatrixTestCase(_LongMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalIntMatrixTestCase(_IntMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalShortMatrixTestCase(_ShortMatrixTestCase,
_DirectAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonaMatrixUITestCase(_DoubleMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalFloatMatrixUITestCase(_FloatMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalComplexMatrixUITestCase(_ComplexMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalComplexFloatMatrixUITestCase(_ComplexFloatMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalLongMatrixUITestCase(_LongMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalIntMatrixUITestCase(_IntMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class DiagonalShortMatrixUITestCase(_ShortMatrixTestCase,
_UIAccess,
_DiagonalMatrixTestCase,
):
pass
class _MinMaxMatrixTestCase(_DefaultMatrixTestCase):
size = 10
def _mysetUp(self):
tmp = self._get_function('set_zero')
array = tmp((self.size, self.size))
type = get_typecode(array)
array = nummodule.zeros((self.size,self.size)).astype(type)
array[5,4] = -1
array[8,7] = 1
self.array = array
def test_max(self):
function = self._get_function('max')
assert(function(self.array)== 1)
def test_min(self):
function = self._get_function('min')
assert(function(self.array)== -1)
def test_minmax(self):
function = self._get_function('minmax')
tmp = function(self.array)
assert(tmp[0] == -1)
assert(tmp[1] == 1)
def test_minmax(self):
function = self._get_function('minmax')
tmp = function(self.array)
assert(tmp[0] == -1)
assert(tmp[1] == 1)
def test_maxindex(self):
function = self._get_function('max_index')
tmp = function(self.array)
assert(tmp[0] == 8)
assert(tmp[1] == 7)
def test_minindex(self):
function = self._get_function('min_index')
tmp = function(self.array)
assert(tmp[0] == 5)
assert(tmp[1] == 4)
def test_minmaxindex(self):
function = self._get_function('minmax_index')
tmp = function(self.array)
assert(tmp[0] == 5)
assert(tmp[1] == 4)
assert(tmp[2] == 8)
assert(tmp[3] == 7)
def _mytearDown(self):
pass
class MinMaxMatrixTestCase(_DoubleMatrixTestCase,
_DirectAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxFloatMatrixTestCase(_FloatMatrixTestCase,
_DirectAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxLongMatrixTestCase(_LongMatrixTestCase,
_DirectAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxIntMatrixTestCase(_IntMatrixTestCase,
_DirectAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxShortMatrixTestCase(_ShortMatrixTestCase,
_DirectAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxMatrixUITestCase(_DoubleMatrixTestCase,
_UIAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxFloatMatrixUITestCase(_FloatMatrixTestCase,
_UIAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxLongMatrixUITestCase(_LongMatrixTestCase,
_UIAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxIntMatrixUITestCase(_IntMatrixTestCase,
_UIAccess,
_MinMaxMatrixTestCase,
):
pass
class MinMaxShortMatrixUITestCase(_ShortMatrixTestCase,
_UIAccess,
_MinMaxMatrixTestCase,
):
pass
class _SwapMatrixTestCase(_DefaultMatrixTestCase):
size = 10
def _mysetUp(self):
tmp = self._get_function('set_zero')
array = tmp((self.size, self.size))
type = get_typecode(array)
array = nummodule.fromfunction(lambda x,y,size=self.size : x*size + y,
(self.size, self.size))
self.array = array.astype(type)
self.array1 = (array*10).astype(type)
def test_1_swap(self):
function = self._get_function('swap')
type = get_typecode(self.array)
tmp = function(self.array, self.array1)
function = self._get_function('isnull')
assert(function((tmp[1]/10).astype(type) - tmp[2]))
def test_2_swap_columns(self):
function = self._get_function('swap_columns')
tmp = function(self.array, 3, 5)
assert(tmp[0] == 0)
for i in range(self.size):
assert(tmp[1][i,3]==10*i+5)
assert(tmp[1][i,5]==10*i+3)
def test_3_swap_rows(self):
function = self._get_function('swap_rows')
tmp = function(self.array, 3, 5)
assert(tmp[0] == 0)
for i in range(self.size):
assert(tmp[1][3,i]==i+50)
assert(tmp[1][5,i]==i+30)
def test_4_swap_rowcol(self):
function = self._get_function('swap_rowcol')
tmp = function(self.array, 3, 5)
assert(tmp[0] == 0)
for i in range(self.size):
assert(tmp[1][3,i]==10*i+5)
for i in range(self.size):
if i == 3:
assert(tmp[1][3,5] == 55)
elif i == 5:
assert(tmp[1][5,5] == 33)
else:
assert(tmp[1][i,5]==30+i)
# def test_5_fwrite(self):
# print "Seek finished "
# file = getopentmpfile('w')
# function = self._get_function('fwrite')
# tmp = function(file, self.array)
#
# def test_6_fread(self):
#
# file = getopentmpfile('w+')
# function = self._get_function('fwrite')
# tmp = function(file, (self.array * 2).astype(self.get_typecode(array)))
# assert(tmp == 0)
# file.seek(0)
#
# function = self._get_function('fread')
# tmp = function(file, self.array.shape)
# assert(tmp[0] == 0)
# for i in range(self.size):
# for j in range(self.size):
# assert(tmp[1][i,j] == self.array[i,j] * 2)
#
#
# def test_7_fprintf(self):
# file = getopentmpfile('w')
# function = self._get_function('fprintf')
# tmp = function(file, self.array, self._get_format())
# assert(tmp == 0)
#
# def test_8_fscanf(self):
# file = getopentmpfile('w+')
# function = self._get_function('fprintf')
# ttype = self.get_typecode(array)
# tmp = function(file, (self.array*2).astype(ttype), self._get_format())
#
# function = self._get_function('fscanf')
# file.seek(0)
#
# tmp = function(file, self.array.shape)
# assert(tmp[0] == 0)
# for i in range(self.size):
# for j in range(self.size):
# assert(tmp[1][i,j] == self.array[i,j] * 2)
def _mytearDown(self):
pass
class SwapMatrixTestCase(_DoubleMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapFloatMatrixTestCase(_FloatMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapComplexMatrixTestCase(_ComplexMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapComplexFloatMatrixTestCase(_ComplexFloatMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapLongMatrixTestCase(_LongMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapIntMatrixTestCase(_IntMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapShortMatrixTestCase(_ShortMatrixTestCase,
_DirectAccess,
_SwapMatrixTestCase,
):
pass
class SwapMatrixUITestCase(_DoubleMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
class SwapFloatMatrixUITestCase(_FloatMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
class SwapComplexMatrixUITestCase(_ComplexMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
class SwapComplexFloatMatrixUITestCase(_ComplexFloatMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
class SwapLongMatrixUITestCase(_LongMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
class SwapIntMatrixUITestCase(_IntMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
class SwapShortMatrixUITestCase(_ShortMatrixTestCase,
_UIAccess,
_SwapMatrixTestCase,
):
pass
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Vectors
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
class _SetBasisVectorTestCase(_DefaultVectorTestCase):
function = 'set_basis'
size = 10
basis = 5
def _mysetUp(self):
tmp = self._get_function()
basis = self.basis
tmp1 = tmp(self.size, basis)
assert(tmp1[0] == 0)
self.array = tmp1[1]
def test_1_matrixsize(self):
array_check(self.array, None, (self.size,))
def test_2_diagonale(self):
assert self.array[self.basis] == 1, "Basis not one !"
def test_3_diagonale(self):
for i in range(self.size):
if i == self.basis :
continue
assert self.array[i] == 0, "Basis not zero!"
def _mytearDown(self):
del self.array
self.array = None
class SetBasisVectorTestCase(_DoubleVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisVectorUITestCase(_DoubleVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisFloatVectorTestCase(_FloatVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisComplexVectorTestCase(_ComplexVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisComplexFloatVectorTestCase(_ComplexFloatVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisLongVectorTestCase(_LongVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisIntVectorTestCase(_IntVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisShortVectorTestCase(_ShortVectorTestCase,
_DirectAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisFloatVectorUITestCase(_FloatVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisComplexVectorUITestCase(_ComplexVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisComplexFloatVectorUITestCase(_ComplexFloatVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisLongVectorUITestCase(_LongVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisIntVectorUITestCase(_IntVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class SetBasisShortVectorUITestCase(_ShortVectorTestCase,
_UIAccess,
_SetBasisVectorTestCase,
):
pass
class _CharVectorSetup:
def _mysetup(self):
self.array = tmp(self.size, myord(137))
#def test_2_diagonale(self):
# assert ord(self.array[self.basis][0]) == 1, "Diagonale not one !"
#def test_3_diagonale(self):
# for i in range(self.size):
# if i == self.basis :
# continue
# assert ord(self.array[i][0]) == 0, \
# "Off Diagonale not zero!"
class SetBasisCharVectorUITestCase(_CharVectorTestCase,
_UIAccess,
_CharVectorSetup,
_SetBasisVectorTestCase,
):
def test_2_diagonale(self):
assert myord(self.array[self.basis]) == 1, "Basis not one !"
def test_3_diagonale(self):
for i in range(self.size):
if i == self.basis :
continue
assert myord(self.array[i]) == 0, "Basis not zero!"
class SetBasisCharVectorTestCase(_CharVectorTestCase,
_DirectAccess,
_CharVectorSetup,
_SetBasisVectorTestCase,
):
def test_2_diagonale(self):
assert myord(self.array[self.basis]) == 1, "Basis not one !"
def test_3_diagonale(self):
for i in range(self.size):
if i == self.basis :
continue
assert myord(self.array[i]) == 0, "Basis not zero!"
class _SetZeroVectorTestCase(_DefaultVectorTestCase):
function = 'set_zero'
size = 10
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp(self.size)
def test_1_matrixsize(self):
assert self.array.shape == (self.size,), "Not of size 10, 10"
def test_2_all(self):
for i in range(self.size):
assert self.array[i] == 0, "Off Diagonale not zero!"
def test_2_isnull(self):
tmp = self._get_function('isnull')
assert tmp(self.array)
def _mytearDown(self):
del self.array
self.array = None
class SetZeroVectorTestCase(_DoubleVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroFloatVectorTestCase(_FloatVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroComplexVectorTestCase(_ComplexVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroComplexFloatVectorTestCase(_ComplexFloatVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroLongVectorTestCase(_LongVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroIntVectorTestCase(_IntVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroShortVectorTestCase(_ShortVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroVectorUITestCase(_DoubleVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroFloatVectorUITestCase(_FloatVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroComplexVectorUITestCase(_ComplexVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroComplexFloatVectorUITestCase(_ComplexFloatVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroLongVectorUITestCase(_LongVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroIntVectorUITestCase(_IntVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroShortVectorUITestCase(_ShortVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
pass
class SetZeroCharVectorTestCase(_CharVectorTestCase,
_DirectAccess,
_SetZeroVectorTestCase,
):
def test_2_all(self):
for i in range(self.size):
test = 0
cztmp = myorda(self.array[i])
try:
assert cztmp == 0, "Of Diagonale not zero!"
test = 1
finally:
if test == 0:
print "Of Diagonale not zero (but %s) for class %s !" (cztmp, self)
class SetZeroCharVectorUITestCase(_CharVectorTestCase,
_UIAccess,
_SetZeroVectorTestCase,
):
def test_2_all(self):
for i in range(self.size):
assert myorda(self.array[i]) == 0, "Of Diagonale not zero!"
class _SetAllVectorTestCase(_DefaultVectorTestCase):
function = 'set_all'
size = 10
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp(self.size, self._get_reference_value())
def test_1_matrixsize(self):
array_check(self.array, None, (self.size,))
def test_2_all(self):
for i in range(self.size):
tmp = self.array[i]
try:
test = 0
assert tmp == self._get_reference_value(), "Value not 137!"
test = 1
finally:
if test == 0:
print type(self.array), get_typecode(self.array)
print "self.array[%d] was %s" %(i, tmp)
def _mytearDown(self):
del self.array
self.array = None
class SetAllFloatVectorTestCase(_FloatVectorTestCase,
_DirectAccess,
_SetAllVectorTestCase,
):
pass
class _ComplexVectorSetup:
def _mysetUp(self):
tmp = self._get_function()
self.array = tmp(self.size, 137+0j)
class SetAllComplexVectorTestCase(_ComplexVectorTestCase,
_DirectAccess,
_ComplexVectorSetup,
_SetAllVectorTestCase,
):
pass
class SetAllComplexFloatVectorTestCase(_ComplexFloatVectorTestCase,
_DirectAccess,
_ComplexVectorSetup,
_SetAllVectorTestCase,
):
pass
class SetAllLongVectorTestCase(_LongVectorTestCase,
_DirectAccess,
_SetAllVectorTestCase,
):
pass
class SetAllIntVectorTestCase(_IntVectorTestCase,
_DirectAccess,
_SetAllVectorTestCase,
):
pass
class SetAllShortVectorTestCase(_ShortVectorTestCase,
_DirectAccess,
_SetAllVectorTestCase,
):
pass
class SetAllFloatVectorUITestCase(_FloatVectorTestCase,
_UIAccess,
_SetAllVectorTestCase,
):
pass
class SetAllComplexVectorUITestCase(_ComplexVectorTestCase,
_UIAccess,
_ComplexVectorSetup,
_SetAllVectorTestCase,
):
pass
class SetAllComplexFloatVectorUITestCase(_ComplexFloatVectorTestCase,
_UIAccess,
_ComplexVectorSetup,
_SetAllVectorTestCase,
):
pass
class SetAllLongVectorUITestCase(_LongVectorTestCase,
_UIAccess,
_SetAllVectorTestCase,
):
pass
class SetAllIntVectorUITestCase(_IntVectorTestCase,
_UIAccess,
_SetAllVectorTestCase,
):
pass
class SetAllShortVectorUITestCase(_ShortVectorTestCase,
_UIAccess,
_SetAllVectorTestCase,
):
pass
class SetAllCharVectorTestCase(_CharVectorTestCase,
_DirectAccess,
_CharVectorSetup,
_SetAllVectorTestCase,
):
pass
class SetAllCharVectorUITestCase(_CharVectorTestCase,
_UIAccess,
_CharVectorSetup,
_SetAllVectorTestCase,
):
pass
class _MinMaxVectorTestCase(_DefaultVectorTestCase):
size = 10
def _mysetUp(self):
tmp = self._get_function('set_zero')
array = tmp((self.size))
type = get_typecode(array)
array = nummodule.zeros((self.size,)).astype(type)
array[5] = -1
array[8] = 1
self.array = array
def test_max(self):
function = self._get_function('max')
assert(function(self.array)== 1)
def test_min(self):
function = self._get_function('min')
assert(function(self.array)== -1)
def test_minmax(self):
function = self._get_function('minmax')
tmp = function(self.array)
assert(tmp[0] == -1)
assert(tmp[1] == 1)
def test_maxindex(self):
function = self._get_function('max_index')
tmp = function(self.array)
assert(tmp == 8)
def test_minindex(self):
function = self._get_function('min_index')
tmp = function(self.array)
assert(tmp == 5)
def test_minmaxindex(self):
function = self._get_function('minmax_index')
tmp = function(self.array)
assert(tmp[0] == 5)
assert(tmp[1] == 8)
def _mytearDown(self):
pass
class MinMaxVectorTestCase(_DoubleVectorTestCase,
_DirectAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxFloatVectorTestCase(_FloatVectorTestCase,
_DirectAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxLongVectorTestCase(_LongVectorTestCase,
_DirectAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxIntVectorTestCase(_IntVectorTestCase,
_DirectAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxShortVectorTestCase(_ShortVectorTestCase,
_DirectAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxVectorUITestCase(_DoubleVectorTestCase,
_UIAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxFloatVectorUITestCase(_FloatVectorTestCase,
_UIAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxLongVectorUITestCase(_LongVectorTestCase,
_UIAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxIntVectorUITestCase(_IntVectorTestCase,
_UIAccess,
_MinMaxVectorTestCase,
):
pass
class MinMaxShortVectorUITestCase(_ShortVectorTestCase,
_UIAccess,
_MinMaxVectorTestCase,
):
pass
class _SwapVectorTestCase(_DefaultVectorTestCase):
size = 10
def _mysetUp(self):
tmp = self._get_function('set_zero')
array = tmp(self.size)
type = get_typecode(array)
array = nummodule.arange(self.size)
self.array = array.astype(type)
self.array1 = (array*10).astype(type)
def testswap(self):
function = self._get_function('swap')
type = get_typecode(self.array)
tmp = function(self.array, self.array1)
function = self._get_function('isnull')
assert(function((tmp[1]/10).astype(type) - tmp[2]))
def testswap_elements(self):
function = self._get_function('swap_elements')
tmp = function(self.array, 3, 5)
assert(tmp[0] == 0)
for i in range(self.size):
if i == 3:
assert(tmp[1][3] == 5)
elif i == 5:
assert(tmp[1][5] == 3)
else:
assert(tmp[1][i]==i)
def test_reverse(self):
function = self._get_function('reverse')
tmp = function(self.array)
assert(tmp[0] == 0)
for i in range(self.size):
assert(tmp[1][-(i+1)]==i)
# def test_fwrite(self):
# file = getopentmpfile('w')
# function = self._get_function('fwrite')
# #print "Testing fwrite!"
# tmp = function(file, self.array)
#
# def test_fread(self):
# file = getopentmpfile('w+')
# function = self._get_function('fwrite')
# tmp = function(file, (self.array * 2).astype(self.get_typecode(array)))
# assert(tmp == 0)
# file.seek(0)
# function = self._get_function('fread')
# tmp = function(file, self.array.shape[0])
# assert(tmp[0] == 0)
# for i in range(self.size):
# assert(tmp[1][i] == self.array[i] * 2)
#
# def test_fprintf(self):
# file = getopentmpfile('w')
# function = self._get_function('fprintf')
# tmp = function(file, self.array, self._get_format())
# assert(tmp == 0)
#
# def test_fscanf(self):
# file = getopentmpfile('w+')
# function = self._get_function('fprintf')
# ttype = self.get_typecode(array)
# tmp = function(file, (self.array*2).astype(ttype), self._get_format())
#
# function = self._get_function('fscanf')
# file.seek(0)
#
# tmp = function(file, self.array.shape[0])
# assert(tmp[0] == 0)
# for i in range(self.size):
# assert(tmp[1][i] == self.array[i] * 2)
def _mytearDown(self):
pass
class SwapVectorTestCase(_DoubleVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapFloatVectorTestCase(_FloatVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapComplexVectorTestCase(_ComplexVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapComplexFloatVectorTestCase(_ComplexFloatVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapLongVectorTestCase(_LongVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapIntVectorTestCase(_IntVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapShortVectorTestCase(_ShortVectorTestCase,
_DirectAccess,
_SwapVectorTestCase,
):
pass
class SwapVectorUITestCase(_DoubleVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
class SwapFloatVectorUITestCase(_FloatVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
class SwapComplexVectorUITestCase(_ComplexVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
class SwapComplexFloatVectorUITestCase(_ComplexFloatVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
class SwapLongVectorUITestCase(_LongVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
class SwapIntVectorUITestCase(_IntVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
class SwapShortVectorUITestCase(_ShortVectorTestCase,
_UIAccess,
_SwapVectorTestCase,
):
pass
#del DiagonalComplexFloatMatrixTestCase
#del DiagonalComplexFloatMatrixUITestCase
# del SwapComplexFloatMatrixTestCase
# del SwapComplexFloatMatrixUITestCase
# del SwapComplexFloatVectorTestCase
# del SwapComplexFloatVectorUITestCase
# del SwapComplexMatrixTestCase
# del SwapComplexMatrixUITestCase
# del SwapComplexVectorTestCase
# del SwapComplexVectorUITestCase
# del SwapFloatMatrixTestCase
# del SwapFloatMatrixUITestCase
# del SwapFloatVectorTestCase
# del SwapFloatVectorUITestCase
# del SwapIntMatrixTestCase
# del SwapIntMatrixUITestCase
# del SwapIntVectorTestCase
# del SwapIntVectorUITestCase
# del SwapLongMatrixTestCase
# del SwapLongMatrixUITestCase
# del SwapLongVectorTestCase
# del SwapLongVectorUITestCase
# del SwapMatrixTestCase
# del SwapMatrixUITestCase
# del SwapVectorTestCase
# del SwapVectorUITestCase
# del SwapShortMatrixTestCase
# del SwapShortMatrixUITestCase
# del SwapShortVectorTestCase
# del SwapShortVectorUITestCase
# del SetZeroComplexFloatVectorUITestCase
# del SetZeroComplexFloatVectorTestCase
# del SetZeroComplexVectorUITestCase
# del SetZeroComplexVectorTestCase
# del SetZeroComplexFloatMatrixUITestCase
# del SetZeroComplexFloatMatrixTestCase
# del SetZeroComplexMatrixUITestCase
# del SetZeroComplexMatrixTestCase
# del SetZeroIntMatrixTestCase
# del SetZeroIntMatrixUITestCase
# del SetZeroIntVectorTestCase
# del SetZeroIntVectorUITestCase
# del SetZeroLongMatrixTestCase
# del SetZeroLongMatrixUITestCase
# del SetZeroLongVectorTestCase
# del SetZeroLongVectorUITestCase
# del SetZeroMatrixTestCase
# del SetZeroMatrixUITestCase
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Remove ..
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# These just provide a few values.....
del _DefaultTestCase
del _DefaultVectorTestCase
del _DefaultMatrixTestCase
del _DoubleMatrixTestCase
del _FloatMatrixTestCase
del _ComplexMatrixTestCase
del _ComplexFloatMatrixTestCase
del _LongMatrixTestCase
del _ShortMatrixTestCase
del _IntMatrixTestCase
del _CharMatrixTestCase
del _DoubleVectorTestCase
del _FloatVectorTestCase
del _ComplexVectorTestCase
del _ComplexFloatVectorTestCase
del _LongVectorTestCase
del _ShortVectorTestCase
del _IntVectorTestCase
del _CharVectorTestCase
del _DirectAccess
del _UIAccess
del _SetIdentityMatrixTestCase
del _MinMaxMatrixTestCase
del _DiagonalMatrixTestCase
del _SetZeroMatrixTestCase
del _SetAllMatrixTestCase
del _SwapMatrixTestCase
del _SetBasisVectorTestCase
del _MinMaxVectorTestCase
del _SetZeroVectorTestCase
del _SetAllVectorTestCase
del _SwapVectorTestCase
if __name__ == '__main__':
unittest.main()
| mit | -2,381,157,358,845,461,500 | 32.201764 | 97 | 0.473467 | false |
rawodb/bitcoin | test/functional/interface_bitcoin_cli.py | 1 | 4196 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Bitcoin Core RPC client version" in cli_response)
self.log.info("Compare responses from gewalletinfo RPC and `bitcoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `bitcoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `bitcoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| mit | -2,408,801,850,134,865,400 | 55.702703 | 160 | 0.676358 | false |
CitoEngine/cito_engine | app/tests/test_comments_view.py | 1 | 2629 | """Copyright 2014 Cyrus Dasadia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase, Client
from django.contrib.auth.models import User
from appauth.models import Perms
from . import factories
class TestCommentsView(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(username='hodor', password='hodor',
first_name='Hodor', last_name='HodorHodor',
email='[email protected]')
def test_add_comments_without_login(self):
"""Testing adding comments without login"""
response = self.client.post('/comments/add/')
self.assertRedirects(response, '/login/?next=/comments/add/')
def login(self):
self.client.login(username='hodor', password='hodor')
def test_add_comments_without_perms(self):
"""Testing adding comments without login"""
Perms.objects.create(user=self.user, access_level=5).save()
self.login()
response = self.client.post('/comments/add/')
self.assertTemplateUsed(response, 'unauthorized.html')
def test_doing_get_to_comments_add_view(self):
"""Doing an HTTP get instead of POST to comments view should silently
fail to /incidents/
"""
Perms.objects.create(user=self.user, access_level=4).save()
self.login()
response = self.client.get('/comments/add/')
self.assertRedirects(response, '/incidents/')
def test_post_a_comment(self):
"""Actual comments post"""
incident = factories.IncidentFactory()
Perms.objects.create(user=self.user, access_level=4).save()
self.login()
response = self.client.post('/comments/add/', data={'incident': incident.id,
'user': self.user.id,
'text': 'Do you remember bo2k?'}, follow=True)
self.assertRedirects(response, '/incidents/view/%s/' % incident.id)
self.assertContains(response, 'Do you remember bo2k?') | apache-2.0 | -662,581,660,333,275,400 | 41.419355 | 106 | 0.637124 | false |
mit-ll/LO-PHI | lophi-automation/lophi_automation/dataconsumers/logudp.py | 1 | 1294 | """
Class to handle logging over UDP
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import socket
import logging
logger = logging.getLogger(__name__)
class LogUDP:
def __init__(self,address,port):
"""
Intialize our UDP logger
@param address: Address of remote server
@param port: port of listening server
"""
self.address = address
self.port = port
self.SOCK = None
self.connected = False
def _connect(self):
"""
Create our socket
"""
if self.connected:
return True
try:
self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.connected = True
return True
except:
logger.error("Could not open UDP socket")
return False
def append(self, data):
"""
Write raw data to the UDP socket
@param data: Data to be written to the UDP socket
"""
assert self._connect()
try:
self.SOCK.sendto(data,(self.address,self.port))
except:
logger.error("Could not send UDP packet") | bsd-3-clause | -5,603,330,343,461,050,000 | 22.125 | 72 | 0.506955 | false |
ageis/securedrop | securedrop/store.py | 1 | 6181 | # -*- coding: utf-8 -*-
import os
import re
import config
import zipfile
import crypto_util
import uuid
import tempfile
import subprocess
from cStringIO import StringIO
import gzip
from werkzeug import secure_filename
from secure_tempfile import SecureTemporaryFile
import logging
log = logging.getLogger(__name__)
VALIDATE_FILENAME = re.compile(
"^(?P<index>\d+)\-[a-z0-9-_]*(?P<file_type>msg|doc\.(gz|zip)|reply)\.gpg$").match
class PathException(Exception):
"""An exception raised by `util.verify` when it encounters a bad path. A path
can be bad when it is not absolute or not normalized.
"""
pass
def verify(p):
"""Assert that the path is absolute, normalized, inside `config.STORE_DIR`, and
matches the filename format.
"""
if not os.path.isabs(config.STORE_DIR):
raise PathException("config.STORE_DIR(%s) is not absolute" % (
config.STORE_DIR, ))
# os.path.abspath makes the path absolute and normalizes '/foo/../bar' to
# '/bar', etc. We have to check that the path is normalized before checking
# that it starts with the `config.STORE_DIR` or else a malicious actor could
# append a bunch of '../../..' to access files outside of the store.
if not p == os.path.abspath(p):
raise PathException("The path is not absolute and/or normalized")
# Check that the path p is in config.STORE_DIR
if os.path.relpath(p, config.STORE_DIR).startswith('..'):
raise PathException("Invalid directory %s" % (p, ))
if os.path.isfile(p):
filename = os.path.basename(p)
ext = os.path.splitext(filename)[-1]
if filename == '_FLAG':
return True
if ext != '.gpg':
# if there's an extension, verify it's a GPG
raise PathException("Invalid file extension %s" % (ext, ))
if not VALIDATE_FILENAME(filename):
raise PathException("Invalid filename %s" % (filename, ))
def path(*s):
"""Get the normalized, absolute file path, within `config.STORE_DIR`."""
joined = os.path.join(os.path.abspath(config.STORE_DIR), *s)
absolute = os.path.abspath(joined)
verify(absolute)
return absolute
def get_bulk_archive(selected_submissions, zip_directory=''):
"""Generate a zip file from the selected submissions"""
zip_file = tempfile.NamedTemporaryFile(prefix='tmp_securedrop_bulk_dl_',
dir=config.TEMP_DIR,
delete=False)
sources = set([i.source.journalist_designation for i in selected_submissions])
# The below nested for-loops are there to create a more usable
# folder structure per #383
with zipfile.ZipFile(zip_file, 'w') as zip:
for source in sources:
submissions = [s for s in selected_submissions if s.source.journalist_designation == source]
for submission in submissions:
filename = path(submission.source.filesystem_id,
submission.filename)
verify(filename)
document_number = submission.filename.split('-')[0]
zip.write(filename, arcname=os.path.join(
zip_directory,
source,
"%s_%s" % (document_number,
submission.source.last_updated.date()),
os.path.basename(filename)
))
return zip_file
def save_file_submission(sid, count, journalist_filename, filename, stream):
sanitized_filename = secure_filename(filename)
# We store file submissions in a .gz file for two reasons:
#
# 1. Downloading large files over Tor is very slow. If we can
# compress the file, we can speed up future downloads.
#
# 2. We want to record the original filename because it might be
# useful, either for context about the content of the submission
# or for figuring out which application should be used to open
# it. However, we'd like to encrypt that info and have the
# decrypted file automatically have the name of the original
# file. Given various usability constraints in GPG and Tails, this
# is the most user-friendly way we have found to do this.
encrypted_file_name = "{0}-{1}-doc.gz.gpg".format(
count,
journalist_filename)
encrypted_file_path = path(sid, encrypted_file_name)
with SecureTemporaryFile("/tmp") as stf:
with gzip.GzipFile(filename=sanitized_filename, mode='wb', fileobj=stf) as gzf:
# Buffer the stream into the gzip file to avoid excessive
# memory consumption
while True:
buf = stream.read(1024 * 8)
if not buf:
break
gzf.write(buf)
crypto_util.encrypt(stf, config.JOURNALIST_KEY, encrypted_file_path)
return encrypted_file_name
def save_message_submission(sid, count, journalist_filename, message):
filename = "{0}-{1}-msg.gpg".format(count, journalist_filename)
msg_loc = path(sid, filename)
crypto_util.encrypt(message, config.JOURNALIST_KEY, msg_loc)
return filename
def rename_submission(sid, orig_filename, journalist_filename):
check_submission_name = VALIDATE_FILENAME(orig_filename)
if check_submission_name:
parsed_filename = check_submission_name.groupdict()
if parsed_filename.get('file_type'):
new_filename = "{}-{}-{}.gpg".format(
parsed_filename['index'], journalist_filename,
parsed_filename['file_type'])
try:
os.rename(path(sid, orig_filename), path(sid, new_filename))
except OSError:
pass
else:
return new_filename # Only return new filename if successful
return orig_filename
def secure_unlink(fn, recursive=False):
verify(fn)
command = ['srm']
if recursive:
command.append('-r')
command.append(fn)
subprocess.check_call(command)
return "success"
def delete_source_directory(source_id):
secure_unlink(path(source_id), recursive=True)
return "success"
| agpl-3.0 | 4,611,774,342,310,068,000 | 36.011976 | 104 | 0.629672 | false |
jprawiharjo/Nerddit | Storm/Streaming/Push_to_Cassandra_Bolt.py | 1 | 3976 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 13:37:20 2016
@author: jprawiharjo
"""
from cassandra.cluster import Cluster
import cassandra
from collections import namedtuple
from pyleus.storm import SimpleBolt
from Streaming.Doc_Processor import DataFrame
import logging
log = logging.getLogger('cassandra_bolt')
# create CassandraCluster
CassandraCluster = Cluster(["ec2-52-27-157-187.us-west-2.compute.amazonaws.com",
"ec2-52-34-178-13.us-west-2.compute.amazonaws.com",
"ec2-52-35-186-215.us-west-2.compute.amazonaws.com",
'ec2-52-10-19-240.us-west-2.compute.amazonaws.com'])
keyspace = 'wikidata'
tablename = "titlelinks"
class Push_to_Cassandra(SimpleBolt):
def initialize(self):
self.session = CassandraCluster.connect(keyspace)
self.session.default_consistency_level = cassandra.ConsistencyLevel.ALL
#self.session.encoder.mapping[tuple] = self.session.encoder.cql_encode_set_collection
queryAddNew1 = "INSERT INTO {} (id, title, linksto) VALUES (?, ?, ?) IF NOT EXISTS".format(tablename)
self.preparedAddNew1 = self.session.prepare(queryAddNew1)
queryAddNew2 = "INSERT INTO {} (id, title, linksto, referredby) VALUES (?, ?, ?, ?) IF NOT EXISTS".format(tablename)
self.preparedAddNew2 = self.session.prepare(queryAddNew2)
queryUpdateReferredbyTitle = "UPDATE {} SET id = ?, linksto = ? WHERE title = ? IF EXISTS".format(tablename)
self.preparedReferredbyTitle = self.session.prepare(queryUpdateReferredbyTitle)
queryUpdateReferredbyOnly = "UPDATE {} SET referredby = referredby + ? WHERE title = ? IF EXISTS".format(tablename)
self.preparedReferredbyOnly = self.session.prepare(queryUpdateReferredbyOnly)
queryAddNewReferredBy = "INSERT INTO {} (title, referredby) VALUES (?, ?) IF NOT EXISTS".format(tablename)
self.preparedAddNewReferredBy = self.session.prepare(queryAddNewReferredBy)
self.bulk_data = []
log.debug("Initialized")
def process_tick(self):
log.debug("Process Tick")
log.debug(len(self.bulk_data))
linkage = {}
for row in self.bulk_data:
if len(row.Links) > 0:
log.debug('Processing Links')
for link in row.Links:
if link in linkage.keys():
linkage[link].add(row.Title)
else:
linkage[link] = set([row.Title])
for row in self.bulk_data:
log.debug(row.Title)
if row.Title in linkage.keys():
bound1 = self.preparedAddNew2.bind((str(row.Id), str(row.Title), row.Links, linkage[row.Title]))
else:
bound1 = self.preparedAddNew1.bind((str(row.Id), str(row.Title), row.Links))
res = self.session.execute(bound1)
res = res.current_rows[0].applied
#log.debug("Insertion Result = " + str(res))
if not(res):
bound2 = self.preparedReferredbyTitle.bind((str(row.Id), row.Links, str(row.Title)))
self.session.execute_async(bound2)
#Inserting into database
for k,v in linkage.iteritems():
log.debug(k)
log.debug(v)
bound3 = self.preparedReferredbyOnly.bind((v, k))
res = self.session.execute(bound3)
res = res.current_rows[0].applied
if not(res):
bound4 = self.preparedAddNewReferredBy.bind((k, v))
res = self.session.execute_async(bound4)
self.bulk_data = []
def process_tuple(self, tup):
result = DataFrame(*tup.values)
self.bulk_data.append(result)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
filename='/tmp/cassandra_bolt.log',
filemode='a',
)
Push_to_Cassandra().run() | gpl-3.0 | 9,044,771,838,797,030,000 | 37.240385 | 124 | 0.608903 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/placeholder_type.py | 1 | 1630 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.enums',
marshal='google.ads.googleads.v7',
manifest={
'PlaceholderTypeEnum',
},
)
class PlaceholderTypeEnum(proto.Message):
r"""Container for enum describing possible placeholder types for
a feed mapping.
"""
class PlaceholderType(proto.Enum):
r"""Possible placeholder types for a feed mapping."""
UNSPECIFIED = 0
UNKNOWN = 1
SITELINK = 2
CALL = 3
APP = 4
LOCATION = 5
AFFILIATE_LOCATION = 6
CALLOUT = 7
STRUCTURED_SNIPPET = 8
MESSAGE = 9
PRICE = 10
PROMOTION = 11
AD_CUSTOMIZER = 12
DYNAMIC_EDUCATION = 13
DYNAMIC_FLIGHT = 14
DYNAMIC_CUSTOM = 15
DYNAMIC_HOTEL = 16
DYNAMIC_REAL_ESTATE = 17
DYNAMIC_TRAVEL = 18
DYNAMIC_LOCAL = 19
DYNAMIC_JOB = 20
IMAGE = 21
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -9,196,301,452,706,389,000 | 27.103448 | 74 | 0.634969 | false |
julierthanjulie/PedestrianTracking | generate_frames.py | 1 | 3979 |
"""
This code generates frames from CSV values that can be stiched together using FFMPEG
to animate pedestrian data. This version produces an animation at 4x speed.
"""
print "Importing..."
# Please ensure the following dependencies are installed before use:
import pylab
import numpy as np
import itertools
import sys, getopt
import operator
import collections
drawing_by_frame = []
#
def generate_frames(argv):
# Some default values if nothing is provided in command line arguments.
traces = 'bubble_pop_traces.csv'
background = 'trails_480.png'
# Get command line arguments.
# -f specify a file name. This code expects csv files in the format PedestrianID, X, Y, FrameNum
# -b specify a backgroun image. Any format available to pylab is acceptable.
try:
opts,args = getopt.getopt(argv, "f:b:")
except getopt.GetoptError:
print "Getopt Error"
exit(2)
for opt, arg in opts:
if opt == "-f":
traces = arg
elif opt == "-b":
background = arg
# Name each frame based on the filename
figure_name = traces.split("/")[-1].split(".")[-2]
# Load up csv file
trace = np.loadtxt(traces, comments=';', delimiter=',')
traces = itertools.groupby(trace, lambda x:x[0])
# These values should match those in pedestrian_tracking.py
w,h=640,360
border=20
# Some values from trail validation
valid = 0
avg_length = 0
num_traces = 0
# Load up background image.
background = pylab.imread(background)
pylab.imshow(background)
for id,t in traces:
pts = np.array(list(t))
invalid = False
# Validate Trails
if (pts[0,1]>border and pts[0,1]<w-border) and (pts[0,2]>border and pts[0,2]<h-border):
invalid = True
if (pts[-1,1]>border and pts[-1,1]<w-border) and (pts[-1,2]>border and pts[-1,2]<h-border):
invalid = True
if len(pts) < 200:
invalid = True
if ((pts[0,2] > h-border) and (pts[0,1] > w/2-75 and pts[0,1] < w/2+75) or (pts[-1,2] > h-border) and (pts[-1,1] > w/2-75 and pts[-1,1] < w/2+75)):
invalid = True
# For all valid trails, prepare them for generating animated trails by frame number
if not invalid:
num_traces += 1
avg_length += len(pts)
# Drawing colour for traces given as RGB
colour = (0,0,1)
for pt in pts:
this_frame = [pt[3], pt[1], pt[2], pt[0]]
drawing_by_frame.append(this_frame)
valid += 1
x = np.clip(pts[:,1],0,w)
y = np.clip(pts[:,2],0,h)
print "Valid Trails: " , valid, " Average Length:" , avg_length/num_traces
drawing_by_frame.sort()
last_frame = drawing_by_frame[-1][0]
current_frame = drawing_by_frame[0][0]
drawing_dict = collections.defaultdict(list)
count = 0
while len(drawing_by_frame) > 0:
#print "Next Frame, " , current_frame
pylab.imshow(background)
while drawing_by_frame[0][0] == current_frame:
list_one = drawing_by_frame.pop(0)
x = drawing_dict[list_one[3]]
x.append([list_one[1], list_one[2]])
drawing_dict[list_one[3]] = x
# Adjust mod value here to adjust frame drawing frequency
# Draw stuff here
if (current_frame % 10 ==0):
print "Percentage Complete: " , (current_frame/last_frame)*100
draw_dict(drawing_dict, w, h, border, figure_name, current_frame, count)
count += 1
pylab.clf()
current_frame = drawing_by_frame[0][0]
def draw_dict(dict, w, h, border, figure_name, frame, count):
for trace in dict:
print trace
pts = dict[trace]
pylab.plot([p[0] for p in pts], [p[1] for p in pts],'-',color=(0,0,1),alpha=0.5, linewidth=2)
pylab.xlim(0,w)
pylab.ylim(h,0)
pylab.axis('off')
pylab.subplots_adjust(0,0,1,1,0,0)
pylab.savefig("Frames/" + figure_name + "_" + str(count).zfill(6) + '.png', dpi=150,bbox_inches='tight', pad_inches=0)
#pylab.savefig("Frames/" + 'frame' + str(int(frame)) + '.png', dpi=150,bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
print "Starting Frame Generation"
generate_frames(sys.argv[1:])
| mit | -6,176,049,097,840,916,000 | 22.96988 | 149 | 0.643629 | false |
by46/simplekit | simplekit/email/__init__.py | 1 | 4151 | import httplib
import os.path
import requests
import six
from simplekit import settings
from simplekit.exceptions import MailException
PRIORITY_NORMAL = 0
PRIORITY_LOW = 1
PRIORITY_HIGH = 2
CONTENT_TYPE_HTML = 0
CONTENT_TYPE_TEXT = 1
ENCODING_UTF8 = 0
ENCODING_ASCII = 1
ENCODING_UTF32 = 2
ENCODING_UNICODE = 3
MEDIA_TYPE_GIF = 0
MEDIA_TYPE_JPEG = 1
MEDIA_TYPE_TIFF = 2
MEDIA_TYPE_PDF = 3
MEDIA_TYPE_RTF = 4
MEDIA_TYPE_SOAP = 5
MEDIA_TYPE_ZIP = 6
MEDIA_TYPE_OTHER = 7
MAIL_TYPE_SMTP = 1
MAIL_TYPE_LONDON2 = 0
class SmtpSetting(dict):
def __init__(self, subject_encoding, body_encoding, attachments=None):
kwargs = dict(SubjectEncoding=subject_encoding,
BodyEncoding=body_encoding,
Attachments=attachments)
super(SmtpSetting, self).__init__(**kwargs)
self.__dict__ = self
class MailAttachment(dict):
def __init__(self, filename, file_content, media_type=MEDIA_TYPE_OTHER):
kwargs = dict(FileName=filename,
FileContent=file_content,
MediaType=media_type)
super(MailAttachment, self).__init__(**kwargs)
self.__dict__ = self
class LondonIISetting(dict):
def __init__(self, company_code, country_code, language_code, system_id, template_id, mail_template_variables):
kwargs = dict(CompanyCode=company_code,
CountryCode=country_code,
LanguageCode=language_code,
SystemID=system_id,
TemplateID=template_id,
MailTemplateVariables=mail_template_variables)
super(LondonIISetting, self).__init__(**kwargs)
self.__dict__ = self
class MailTemplateVariable(dict):
def __init__(self, key, value):
kwargs = dict(Key=key, Value=value)
super(MailTemplateVariable, self).__init__(**kwargs)
def send_email_inner(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL,
content_type=CONTENT_TYPE_TEXT,
mail_type=None, smtp_setting=None, london_2_setting=None):
if isinstance(to, (list, tuple)):
to = ';'.join(to)
body = dict(From=sender,
To=to,
CC=cc,
BCC=bcc,
Subject=subject,
Body=body,
Priority=priority,
ContentType=content_type,
MailType=mail_type,
SmtpSetting=smtp_setting,
LondonIISetting=london_2_setting)
response = requests.post(settings.URL_EMAIL, json=body,
headers={'Content-Type': 'Application/json', 'accept': 'application/json'})
if response.status_code != httplib.OK:
del body['SmtpSetting']
raise MailException("Send mail use api {0} status code: {1}\n body : {2}\n response content : {3}".format(
settings.URL_EMAIL, response.status_code, body, response.content))
def send_email(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL,
content_type=CONTENT_TYPE_TEXT,
files=None):
attachments = []
import base64
if files:
for item in files:
if isinstance(item, six.string_types):
filename = os.path.basename(item)
file_content = open(item, 'rb').read()
file_content = base64.b64encode(file_content)
media_type = MEDIA_TYPE_OTHER
attachment = MailAttachment(filename, file_content, media_type)
attachments.append(attachment)
else:
attachments.append(item)
smtp_setting = SmtpSetting(ENCODING_UTF8, ENCODING_UTF8, attachments)
send_email_inner(sender, to, subject, body, cc, bcc, priority, content_type, MAIL_TYPE_SMTP, smtp_setting)
if __name__ == '__main__':
send_email('[email protected]', '[email protected]', '(info) testing', 'testing body',
files=['__init__.py'])
| mit | -3,068,504,204,268,324,000 | 33.177966 | 115 | 0.582751 | false |
bixbydev/Bixby_v3 | database/mysql/base.py | 1 | 2501 | #!/usr/bin/env python
# Filename: base.py
#=====================================================================#
# Copyright (c) 2015 Bradley Hilton <[email protected]> #
# Distributed under the terms of the GNU GENERAL PUBLIC LICENSE V3. #
#=====================================================================#
import os
import sys
import time
from config import config
from logger.log import log
log.debug('mysql.base Loaded')
print config.MYSQL_BACKUPPATH
try:
import MySQLdb
except ImportError, e:
'The Module MySQLdb is not installed'
log.critical('Failed to load MySQLdb Module: '+str(e))
sys.exit(1)
def backup_mysql():
"""Backups the DB until things get very large I am going to do this every
time. Or until I am sure my code is good."""
dnsdt = str(time.strftime('%Y%m%d%H%M%S', time.localtime()))
dump_file = os.path.join(config.MYSQL_BACKUPPATH
, config.MYSQL_DB+'_bak_'+dnsdt+'.sql')
log.info("""Creating mysqldump: '%s'""" %dump_file)
os.system("""mysqldump -h '%s' -u '%s' -p'%s' '%s' > '%s'""" \
%(config.MYSQL_HOST,
config.MYSQL_USER,
config.MYSQL_PASSWORD,
config.MYSQL_DB,
dump_file))
def restore_mysql(db, sqlfile):
if not os.path.exists(sqlfile):
raise TypeError("""This is totally the wrong error because
I don't know the right error""")
log.info("Restoring DB: %s from File: %s" %(db, sqlfile))
os.system("""mysql -h '%s' -u '%s' -p'%s' '%s' < %s""" \
%(config.MYSQL_HOST,
config.MYSQL_USER,
config.MYSQL_PASSWORD,
db, sqlfile))
class CursorWrapper(object):
"""Wrapper to open a MySQL Connection and creates a Cursor"""
def __init__(self, host=config.MYSQL_HOST,
user=config.MYSQL_USER,
passwd=config.MYSQL_PASSWORD,
db=config.MYSQL_DB):
self.example = 'Testing'
self.connection = MySQLdb.connect (host = host,
user = user,
passwd = passwd,
db = db)
self.cursor = self.connection.cursor()
log.info("""Setting autocommit = \"True\"""")
self.connection.autocommit(True)
log.info("Connected to MySQL Host: %s Database: %s" % (host, db))
def close(self):
self.cursor.close()
log.info('MySQL Cursor Closed')
# self.connection.commit()
self.connection.close()
log.info('MySQL Connection Closed')
| gpl-3.0 | -985,612,127,546,954,500 | 30.658228 | 75 | 0.566973 | false |
pentestfail/TA-FireEye_TAP | bin/input_module_fireeye_tap_incidents.py | 1 | 4568 |
# encoding = utf-8
import os
import sys
import time
import datetime
import json
def validate_input(helper, definition):
api_env = definition.parameters.get('api_env', None)
instanceid = definition.parameters.get('instance_id', None)
apikey = definition.parameters.get('apikey', None)
api_limit = definition.parameters.get('api_limit', None)
api_timeout = definition.parameters.get('api_timeout', None)
pass
def collect_events(helper, ew):
# Retrieve runtime variables
opt_environment = helper.get_arg('api_env')
opt_instanceid = helper.get_arg('instance_id')
opt_apikey = helper.get_arg('apikey')
opt_limit = helper.get_arg('api_limit')
opt_timeout = float(helper.get_arg('api_timeout'))
# Create checkpoint key
opt_checkpoint = "incidents_" + opt_environment + "_" + opt_instanceid
#Create last status entry for storage as checkpoint
current_status = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
#Check for last query execution data in kvstore & generate if not present
try:
last_status = helper.get_check_point(opt_checkpoint) or time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(0))
helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Last successful checkpoint time: " + str(last_status))
except Exception as e:
helper.log_error("[" + opt_instanceid + "] TAP Incidents - Unable to retrieve last execution checkpoint!")
raise e
# use simple rest call to load the events
header = {}
data = {}
parameter = {}
parameter['limit'] = opt_limit
parameter['sort'] = "-createDate"
parameter['withCount'] = "1"
parameter['includes'] = "revisions._updatedBy"
parameter['query'] = str('{"updateDate":{"$gte":"' + last_status + '"}}')
url = "https://" + opt_environment + ".fireeye.com/tap/id/" + opt_instanceid + "/api/v1/incidents"
method = 'GET'
header['x-mansfield-key'] = opt_apikey
try:
# Leverage helper function to send http request
response = helper.send_http_request(url, method, parameters=parameter, payload=None, headers=header, cookies=None, verify=True, cert=None, timeout=opt_timeout, use_proxy=True)
# Return API response code
r_status = response.status_code
# Return API request status_code
if r_status is not 200:
helper.log_error("[" + opt_instanceid + "] Incidents API unsuccessful status_code=" + str(r_status))
response.raise_for_status()
# Return API request as JSON
obj = response.json()
if obj is None:
helper.log_info("[" + opt_instanceid + "] No new incidents retrieved from TAP.")
# Iterate over incidents in array & index
i=0
for incident in obj.get("incidents"):
singleIncident = (obj.get("incidents")[i])
singleIncident['tap_instance'] = opt_instanceid
singleIncident['tap_environment'] = opt_environment
# Rename underscore fields so Splunk will index values
singleIncident['_alert'] = singleIncident['_alert']
singleIncident['updatedBy'] = singleIncident['_updatedBy']
singleIncident['createdBy'] = singleIncident['_createdBy']
singleIncident['assignedTo'] = singleIncident['_assignedTo']
# Remove underscore fieldnames and values
del singleIncident['_alert']
del singleIncident['_updatedBy']
del singleIncident['_createdBy']
del singleIncident['_assignedTo']
event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(singleIncident))
try:
ew.write_event(event)
helper.log_debug("[" + opt_instanceid + "] Added incident:" + str(singleIncident['id']))
except Exception as error:
helper.log_error("[" + opt_instanceid + "] Unable to add incident:" + str(singleIncident['id']))
i = i + 1
#Update last completed execution time
helper.save_check_point(opt_checkpoint, current_status)
helper.log_info("[" + opt_instanceid + "] Incidents collection complete. Records added: " + str(i))
helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Storing checkpoint time: " + current_status)
except Exception as error:
helper.log_error("[" + opt_instanceid + "] TAP Incidents - An unknown error occurred!")
raise error | mit | -2,108,804,593,542,460,400 | 43.794118 | 183 | 0.632443 | false |
ODInfoBiz/csvengine-ui | csvengine/data_cache.py | 1 | 7704 | '''
Created on Dec 7, 2015
@author: jumbrich
'''
import hashlib
import requests
from StringIO import StringIO
import os
import urllib
import urlnorm
from werkzeug.exceptions import RequestEntityTooLarge
from pyyacp.yacp import YACParser
from csvengine.utils import assure_path_exists
import structlog
log =structlog.get_logger()
class DataCache(object):
DB="db"
WEB="web"
TMP="tmp"
def __init__(self, config, max_file_size):
self.submit_folder = {
DataCache.WEB: assure_path_exists(config['web_submit']),
DataCache.DB: assure_path_exists(config['db_submit']),
DataCache.TMP: assure_path_exists(config['tmp_submit'])
}
self.cleaned_folder = {
DataCache.WEB: assure_path_exists(config['web_cleaned']),
DataCache.DB: assure_path_exists(config['db_cleaned']),
DataCache.TMP: assure_path_exists(config['tmp_cleaned'])
}
self.max_file_size = max_file_size
def submitToWeb(self, file=None, url=None, content=None):
return self.submit(file=file, url=url, content=content, toFolder=DataCache.WEB)
def submitToTmp(self, file=None, url=None, content=None):
return self.submit(file=file, url=url, content=content, toFolder=DataCache.TMP)
def submitToDB(self, file=None, url=None, content=None):
return self.submit(file=file, url=url, content=content, toFolder=DataCache.DB)
def submit(self, file=None, url=None, content=None, toFolder=None):
"""
1) retrieve and compute hash of original content
2) store submitted content using hash as filename IFF not exist
optional URL as symlink
:param file:
:param url:
:param content:
:param toFolder:
:return: the md5 of the original file
"""
if toFolder in self.submit_folder and toFolder in self.cleaned_folder:
s_folder = self.submit_folder[toFolder]
c_folder = self.cleaned_folder[toFolder]
else:
return None
if file:
md5 = storeFile(file, s_folder)
elif url:
md5 = storeURL(url, s_folder, max_file_size=self.max_file_size)
elif content:
md5 = storeContent(content, s_folder)
else:
return None
# check if cleaned exists
submitted_path=os.path.join(s_folder, md5)
cleaned_path = os.path.join(c_folder, md5)
# at first look for stored cleaned version
if os.path.exists(cleaned_path):
return md5
else:
# generate and store cleaned version
table = YACParser(filename=submitted_path)
cleaned = table.generate()
storeContent(cleaned, c_folder, md5=md5)
return md5
def getParser(self, fileHash, folder=None, original=False):
"""
returns a parser and stores cleaned file if not already available
"""
if folder:
file_path = os.path.join(folder, fileHash)
if os.path.exists(file_path):
if folder in self.cleaned_folder.values():
return YACParser(filename=file_path, skip_guess_encoding=True)
else:
return YACParser(filename=file_path)
else:
if not original:
for f in self.cleaned_folder:
cleaned_path = os.path.join(self.cleaned_folder[f], fileHash)
if os.path.exists(cleaned_path):
return YACParser(filename=cleaned_path, skip_guess_encoding=True)
for f in self.submit_folder:
submit_path = os.path.join(self.submit_folder[f], fileHash)
cleaned_path = os.path.join(self.cleaned_folder[f], fileHash)
if os.path.exists(submit_path):
table = YACParser(filename=submit_path)
if not os.path.exists(cleaned_path):
cleaned = table.generate()
storeContent(cleaned, cleaned_path, md5=fileHash)
return table
return None
def exists(self, fileHash):
for f in self.cleaned_folder:
cleaned_path = os.path.join(self.cleaned_folder[f], fileHash)
if os.path.exists(cleaned_path):
return True
return False
def getSubmit(self, fileHash, folder=False):
if folder:
submit_file = getFileContent(fileHash, self.submit_folder[folder])
if submit_file:
return submit_file
else:
for f in self.submit_folder:
submit_file = getFileContent(fileHash, self.submit_folder[f])
if submit_file:
return submit_file
return None
def getFileName(self, url, folder=None):
url_norm = urlnorm.norm(url.strip())
url_fname = urllib.quote_plus(url_norm)
if folder:
submit_path = os.path.join(self.submit_folder[folder], url_fname)
if os.path.exists(submit_path):
return os.readlink(submit_path)
else:
for f in self.submit_folder:
submit_path = os.path.join(self.submit_folder[f], url_fname)
if os.path.exists(submit_path):
return os.readlink(submit_path)
return None
def getFileContent(fileID, path=None):
if path:
fileID = os.path.join(path,fileID)
if not os.path.exists(fileID):
return None
with open(fileID) as f:
return f.read()
def getFileName(url, path):
url_norm = urlnorm.norm(url.strip())
url_fname = urllib.quote_plus(url_norm)
f=os.path.join(path,url_fname)
return os.readlink(f)
def getURLContent(url, path):
with open(getFileName(url, path)) as f:
return f.read()
def storeFile(f, path):
c = f.read()
md5 = hashlib.md5(c).hexdigest()
fpath = os.path.join(path, md5)
log.debug("storing file", file=fpath)
with open(fpath,'w') as f:
f.write(c)
log.info("file stored", file=fpath)
return md5
def storeContent(content, path, md5=None):
if not md5:
md5 = hashlib.md5(content).hexdigest()
fpath = os.path.join(path, md5)
log.debug("storing content", file=fpath)
with open(fpath,'w') as f:
f.write(content)
log.info("content stored", file=fpath)
return md5
def storeURL(url, path, max_file_size):
#download URL and send fileID
log.debug("downloading url", url=url, max_file_size=max_file_size )
try:
r = requests.get(url, stream=True)
size = 0
ctt = StringIO()
sig = hashlib.md5()
for chunk in r.iter_content(2048):
size += len(chunk)
ctt.write(chunk)
sig.update(chunk)
if size > max_file_size:
r.close()
raise RequestEntityTooLarge()
md5 = sig.hexdigest()
ctt.seek(0)
fpath=os.path.join(path, md5)
if os.path.exists(fpath):
print 'file exists', fpath
return md5
log.debug("storing url", url=url, file=fpath)
with open (fpath,'w') as fd:
t = ctt.read(1048576)
while t:
fd.write(t)
t = ctt.read(1048576)
url_norm = urlnorm.norm(url.strip())
url_fname = urllib.quote_plus(url_norm)
f = os.path.join(path, url_fname)
os.symlink(fpath,f)
log.debug("url stored", url=url, file=fpath)
return md5
except Exception as e:
raise e | gpl-3.0 | 3,413,626,609,846,684,000 | 30.970954 | 89 | 0.579439 | false |
turtlewit/GSHS_RPG | AdventureEngine/CoreEngine/input.py | 2 | 3088 | #------------------------------------------------------------------------------#
# Copyright 2016-2017 Golden Sierra Game Development Class #
# This file is part of Verloren (GSHS_RPG). #
# #
# Verloren (GSHS_RPG) is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# Verloren (GSHS_RPG) is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with Verloren (GSHS_RPG). If not, see <http://www.gnu.org/licenses/>. #
#------------------------------------------------------------------------------#
import sys
import curses
class Input:
#renderer = None
commandHistory = []
command = None
unf_command = ""
cheese = "cheese"
takeTextInput = False
char = None
def Update(self, renderer):
Input.command = None
Input.char = None
if renderer:
currentCharacter = renderer.m_screen.getch()
if currentCharacter != -1:
if currentCharacter != curses.KEY_RESIZE:
Input.char = currentCharacter
if Input.takeTextInput:
if currentCharacter == ord('\n'):
if len(Input.unf_command.split()) > 0:
Input.commandHistory.insert(0,Input.command)
Input.command = Input.unf_command
else:
Input.command = 10
renderer.m_cmd = ""
Input.unf_command = ""
if sys.platform == 'linux' \
or sys.platform == 'linux2' \
or sys.platform == 'linux-armv7l':
if currentCharacter == 127 \
or currentCharacter == curses.KEY_BACKSPACE:
renderer.m_cmd = renderer.m_cmd[:-1]
Input.unf_command = Input.unf_command[:-1]
else:
if currentCharacter == 8:
renderer.m_cmd = renderer.m_cmd[:-1]
Input.unf_command = Input.unf_command[:-1]
if currentCharacter >=32 and currentCharacter <= 126:
if renderer.m_vorCmd:
if len(Input.unf_command) \
< renderer.BUFFER_X \
- len(renderer.m_vorCmd) \
- 1:
renderer.m_cmd += chr(currentCharacter)
Input.unf_command += chr(currentCharacter)
if currentCharacter in [
curses.KEY_UP,
curses.KEY_DOWN,
curses.KEY_LEFT,
curses.KEY_RIGHT,
27
]:
Input.command = currentCharacter
| gpl-3.0 | 1,061,857,247,540,585,600 | 35.204819 | 80 | 0.510687 | false |
TheOriginalBDM/Lazy-Cleaner-9000 | code/clean_sweep_vision.py | 1 | 6258 | #!/usr/bin/env python
from picamera.array import PiRGBArray
from picamera import PiCamera
import cv2
import time
from colormath.color_diff import delta_e_cie2000
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
def nothing(*arg):
pass
def is_allowed_color(cur_int, avg_int, m_val):
b = abs(cur_int[0] - avg_int[0])
g = abs(cur_int[1] - avg_int[1])
r = abs(cur_int[2] - avg_int[2])
if (b > m_val or g > m_val or r > m_val):
return True
else:
return False
def make_gt_val(val, min_val):
if val < min_val:
val = min_val
return val
def make_odd(val):
if val % 2 == 0:
val += 1
return val
def get_avg_bgr(in_img, in_cntrs):
ttlA = 0
sum_roiA_mean = (0, 0, 0)
avg_roiA_mean = (0, 0, 0)
ttlA = len(in_cntrs)
for cnt2 in in_cntrs:
x2, y2, w2, h2 = cv2.boundingRect(cnt2)
roiA = in_img[y:y2+w2, x:x2+h2]
roiA_mean = cv2.mean(roiA)
int_roiA_mean = (int(roiA_mean[0]), int(roiA_mean[1]), int(roiA_mean[2]))
sum_roiA_mean = (int_roiA_mean[0] + sum_roiA_mean[0], int_roiA_mean[1] + sum_roiA_mean[1], int_roiA_mean[2] + sum_roiA_mean[2])
if ttlA > 0:
avg_roiA_mean = (sum_roiA_mean[0]/ttlA, sum_roiA_mean[1]/ttlA, sum_roiA_mean[2]/ttlA)
return avg_roiA_mean
window_nm = 'img_cntrls'
cam_res_w = 640
cam_res_h = 480
cam_fr_rt = 32
cv2.namedWindow(window_nm)
cv2.createTrackbar('blur_size', window_nm, 7 , 21, nothing)
cv2.createTrackbar('canny_min', window_nm, 156, 255, nothing)
cv2.createTrackbar('thresh_min', window_nm, 7 , 255, nothing)
cv2.createTrackbar('min_area', window_nm, 5 , 2000, nothing)
cv2.createTrackbar('max_area', window_nm, 40000 , 90000, nothing)
cv2.createTrackbar('max_delta', window_nm, 20 , 100, nothing)
cv2.createTrackbar('get_avg', window_nm, 0 , 1, nothing)
cv2.createTrackbar('get_mode', window_nm, 0, 7, nothing)
camera = PiCamera()
camera.resolution = (cam_res_w, cam_res_h)
camera.framerate = cam_fr_rt
rawCapture = PiRGBArray(camera, size=(cam_res_w, cam_res_h))
time.sleep(0.2)
avg_roi_mean = (0, 0, 0) #b, g, r
delta_color = 000.0000
for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True):
#############################################
### GET THE CURRENT FRAME FROM THE CAMERA ###
#############################################
im = frame.array
im_raw = im #keep a copy in case we want to look at it later
####################
### GET SETTINGS ###
####################
s = cv2.getTrackbarPos('get_avg', window_nm)
blur_size = cv2.getTrackbarPos('blur_size',window_nm)
canny_min = cv2.getTrackbarPos('canny_min',window_nm)
thresh_min = cv2.getTrackbarPos('thresh_min',window_nm)
min_area = cv2.getTrackbarPos('min_area',window_nm)
max_area = cv2.getTrackbarPos('max_area',window_nm)
max_delta = cv2.getTrackbarPos('max_delta',window_nm)
mode = cv2.getTrackbarPos('get_mode', window_nm)
############################
### ENSURE CORRECT VALUE ###
############################
blur_size = make_odd(blur_size)
blur_size = make_gt_val(blur_size, 0)
thresh_min = make_odd(thresh_min)
thresh_min = make_gt_val(thresh_min, 0)
########################################################
### START IMAGE PROCESSING TO FIND OBJECTS IN RANGE ###
########################################################
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.blur(imgray, (blur_size, blur_size))
#edged = cv2.Canny(blur, canny_min, 255)
ret3, thresh = cv2.threshold(blur, thresh_min, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
###S = 1 means get an average of the overall RGB picture
if s == 1:
blur_size == 0
thresh_size = 1
min_area = 0
ovr_avg = get_avg_bgr(im, contours)
avg_roi_mean = ovr_avg
print avg_roi_mean
cv2.setTrackbarPos('get_avg', window_nm, 0)
else:
ttl_area = 0
ttl_cntrs = len(contours)
ttl_color = 0
sum_roi_mean = (0, 0, 0)
for cnt in contours:
a = cv2.contourArea(cnt)
### DO WE HAVE SOMETHING IN THE RIGHT SIZE (NO NEED TO PICK UP CARS) ###
if min_area < a < max_area:
ttl_area += 1
x, y, h, w = cv2.boundingRect(cnt)
roi = im[y:y+h, x:x+w]
roi_mean = cv2.mean(roi)
int_roi_mean = (int(roi_mean[0]), int(roi_mean[1]), int(roi_mean[2]))
b, g, r = avg_roi_mean
bckgrnd_lab = convert_color(sRGBColor(r, g, b), LabColor)
contColor_lab = convert_color(sRGBColor(roi_mean[2],roi_mean[1], roi_mean[0]), LabColor)
delta_color = round(delta_e_cie2000(bckgrnd_lab, contColor_lab),1)
if delta_color >= max_delta:
# if is_allowed_color(int_roi_mean, avg_roi_mean, max_dev):
cv2.rectangle(im, (x, y), (x+h, y+w), int_roi_mean, 2)
ttl_color += 1
strLoc = str(x) + ',' + str(y) + ':' + str(delta_color)
cv2.putText(im, strLoc, (x,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,0,0), 1)
strTTL = str(ttl_cntrs) + ' - ' + str(ttl_area) + ' - ' + str(ttl_color)
cv2.putText(im, str(strTTL), (20,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2)
cv2.putText(im, str(avg_roi_mean), (20, cam_res_h - 20) ,cv2.FONT_HERSHEY_PLAIN, 2.0, avg_roi_mean, 2)
if mode == 0:
cv2.imshow('imgview', im_raw)
print 'Raw image view'
elif mode == 1:
cv2.imshow('imgview', imgray)
print 'Grayscale view'
elif mode == 2:
cv2.imshow('imgview', blur)
print 'Blur view'
elif mode == 3:
cv2.imshow('imgview', blur)
print 'Blur view'
elif mode == 4:
cv2.imshow('imgview', thresh)
print 'Threshold view'
else:
cv2.imshow('imgview', im)
print 'Contour overlay on raw view'
ch = cv2.waitKey(5)
rawCapture.truncate(0)
if ch == 27:
break
cv2.destroyAllWindows()
| gpl-3.0 | 5,175,269,980,451,224,000 | 30.606061 | 135 | 0.563279 | false |
raiden-network/raiden | raiden/utils/upgrades.py | 1 | 8374 | import os
import sqlite3
import sys
from contextlib import closing
from glob import escape, glob
from pathlib import Path
import filelock
import structlog
from raiden.constants import RAIDEN_DB_VERSION
from raiden.storage.sqlite import SQLiteStorage
from raiden.storage.versions import VERSION_RE, filter_db_names, latest_db_file
from raiden.utils.typing import Any, Callable, DatabasePath, List, NamedTuple
class UpgradeRecord(NamedTuple):
from_version: int
function: Callable
UPGRADES_LIST: List[UpgradeRecord] = []
log = structlog.get_logger(__name__)
def get_file_lock(db_filename: Path) -> filelock.FileLock:
lock_file_name = f"{db_filename}.lock"
return filelock.FileLock(lock_file_name)
def update_version(storage: SQLiteStorage, version: int) -> None:
cursor = storage.conn.cursor()
cursor.execute(
'INSERT OR REPLACE INTO settings(name, value) VALUES("version", ?)', (str(version),)
)
def get_file_version(db_path: Path) -> int:
match = VERSION_RE.match(os.path.basename(db_path))
assert match, f'Database name "{db_path}" does not match our format'
file_version = int(match.group(1))
return file_version
def get_db_version(db_filename: Path) -> int:
"""Return the version value stored in the db"""
msg = f"Path '{db_filename}' expected, but not found"
assert os.path.exists(db_filename), msg
# Perform a query directly through SQL rather than using
# storage.get_version()
# as get_version will return the latest version if it doesn't
# find a record in the database.
conn = sqlite3.connect(str(db_filename), detect_types=sqlite3.PARSE_DECLTYPES)
cursor = conn.cursor()
try:
cursor.execute('SELECT value FROM settings WHERE name="version";')
result = cursor.fetchone()
except sqlite3.OperationalError:
raise RuntimeError("Corrupted database. Database does not the settings table.")
if not result:
raise RuntimeError(
"Corrupted database. Settings table does not contain an entry the db version."
)
return int(result[0])
def _copy(old_db_filename: Path, current_db_filename: Path) -> None:
old_conn = sqlite3.connect(old_db_filename, detect_types=sqlite3.PARSE_DECLTYPES)
current_conn = sqlite3.connect(current_db_filename, detect_types=sqlite3.PARSE_DECLTYPES)
with closing(old_conn), closing(current_conn):
old_conn.backup(current_conn)
def delete_dbs_with_failed_migrations(valid_db_names: List[Path]) -> None:
for db_path in valid_db_names:
file_version = get_file_version(db_path)
with get_file_lock(db_path):
db_version = get_db_version(db_path)
# The version matches, nothing to do.
if db_version == file_version:
continue
elif db_version > file_version:
raise RuntimeError(
f"Impossible database version. "
f"The database {db_path} has too high a version ({db_version}), "
f"this should never happen."
)
# The version number in the database is smaller then the current
# target, this means that a migration failed to execute and the db
# is partially upgraded.
else:
os.remove(db_path)
class UpgradeManager:
"""Run migrations when a database upgrade is necessary.
Skip the upgrade if either:
- There is no previous DB
- There is a current DB file and the version in settings matches.
Upgrade procedure:
- Delete corrupted databases.
- Copy the old file to the latest version (e.g. copy version v16 as v18).
- In a transaction: Run every migration. Each migration must decide whether
to proceed or not.
"""
def __init__(self, db_filename: DatabasePath, **kwargs: Any) -> None:
base_name = os.path.basename(db_filename)
match = VERSION_RE.match(base_name)
assert match, f'Database name "{base_name}" does not match our format'
self._current_db_filename = Path(db_filename)
self._kwargs = kwargs
def run(self) -> None:
# First clear up any partially upgraded databases.
#
# A database will be partially upgraded if the process receives a
# SIGKILL/SIGINT while executing migrations. NOTE: It's very probable
# the content of the database remains consistent, because the upgrades
# are executed inside a migration, however making a second copy of the
# database does no harm.
escaped_path = escape(str(self._current_db_filename.parent))
paths = glob(f"{escaped_path}/v*_log.db")
valid_db_names = filter_db_names(paths)
delete_dbs_with_failed_migrations(valid_db_names)
# At this point we know every file version and db version match
# (assuming there are no concurrent runs).
paths = glob(f"{escaped_path}/v*_log.db")
valid_db_names = filter_db_names(paths)
latest_db_path = latest_db_file(valid_db_names)
# First run, there is no database file available
if latest_db_path is None:
return
file_version = get_file_version(latest_db_path)
# The latest version matches our target version, nothing to do.
if file_version == RAIDEN_DB_VERSION:
return
if file_version > RAIDEN_DB_VERSION:
raise RuntimeError(
f"Conflicting database versions detected, latest db version is v{file_version}, "
f"Raiden client version is v{RAIDEN_DB_VERSION}."
f"\n\n"
f"Running a downgraded version of Raiden after an upgrade is not supported, "
f"because the transfers done with the new client are not understandable by the "
f"older."
)
if RAIDEN_DB_VERSION >= 27 and file_version <= 26 and file_version > 1:
msg = (
f"Your Raiden database is version {file_version} and there is no compatible "
f"migration to version {RAIDEN_DB_VERSION} available.\n"
"You need to either start a new Raiden node with a different account, or "
"close and settle all channels, and start over with a fresh database.\n\n"
"More information on this topic at "
"https://raiden-network.readthedocs.io/en/latest/other/known-issues.html"
"#database-upgrades\n\n"
"If you are on **mainnet** and affected by this, please create an issue at "
"https://github.com/raiden-network/raiden/issues/new?title=Mainnet%20Migration%20"
f"{file_version}%20{RAIDEN_DB_VERSION}"
)
log.warning(msg)
sys.exit(msg)
self._upgrade(
target_file=self._current_db_filename,
from_file=latest_db_path,
from_version=file_version,
)
def _upgrade(self, target_file: Path, from_file: Path, from_version: int) -> None:
with get_file_lock(from_file), get_file_lock(target_file):
_copy(from_file, target_file)
# Only instantiate `SQLiteStorage` after the copy. Otherwise
# `_copy` will deadlock because only one connection is allowed to
# `target_file`.
with SQLiteStorage(target_file) as storage:
log.debug(f"Upgrading database from v{from_version} to v{RAIDEN_DB_VERSION}")
try:
version_iteration = from_version
with storage.transaction():
for upgrade_record in UPGRADES_LIST:
if upgrade_record.from_version < from_version:
continue
version_iteration = upgrade_record.function(
storage=storage,
old_version=version_iteration,
current_version=RAIDEN_DB_VERSION,
**self._kwargs,
)
update_version(storage, RAIDEN_DB_VERSION)
except BaseException as e:
log.error(f"Failed to upgrade database: {e}")
raise
| mit | -4,492,969,264,185,019,000 | 36.891403 | 98 | 0.614999 | false |
tymmothy/dds3x25 | dds3x25/dds.py | 1 | 12274 | #!/usr/bin/env python
"""
This is an interface library for Hantek DDS-3X25 arbitrary waveform generator.
Licenced LGPL2+
Copyright (C) 2013 Domas Jokubauskis ([email protected])
Copyright (C) 2014 Tymm Twillman ([email protected])
"""
import struct
import math
import collections
# dds3x25 imports...
from usb_interface import *
from packet import *
def samplepoint_encode(value):
SIGN_BIT = (1 << 11)
encoded = abs(value)
if encoded > DDS.MAX_POINT_VALUE:
msg = "Value {0} is out of range ({1}-{2})".format(value, -DDS.MAX_POINT_VALUE, DDS.MAX_POINT_VALUE)
raise ValueError(msg)
# Note: 0 is negative value
if value > 0:
encoded = (DDS.MAX_POINT_VALUE + 1) - encoded
else:
encoded = encoded | SIGN_BIT
return struct.pack("<H", encoded)
def samplepoint_chunks(data):
"""Cut samplepoint data into 32-point chunks.
If necessary, add padding to the last chunk to make it 64 bytes.
"""
SAMPLEPOINT_CHUNK_SIZE=32
for i in xrange(0, len(data), SAMPLEPOINT_CHUNK_SIZE):
chunkdata = data[i:i+SAMPLEPOINT_CHUNK_SIZE]
chunk = "".join([ samplepoint_encode(x) for x in chunkdata ])
if len(chunk) < SAMPLEPOINT_CHUNK_SIZE * 2:
chunk += "\x91\x1c" * ((SAMPLEPOINT_CHUNK_SIZE - (len(chunk) / 2)))
yield chunk
class DDS(object):
# Hantek 3x25 USB Vendor & Product IDs
USB_VID = 0x0483
USB_PID = 0x5721
# Core DAC clock -> 200 MHz
DAC_CLOCK = int(200e6)
# Maximum DAC clock divider
DAC_CLOCK_DIV_MAX = 131070
# Maximum # of sample points
MAX_POINTS = 4095
# Maximum value of a point
MAX_POINT_VALUE = (1 << 11) - 1
NUM_DIGITAL_OUTPUTS = 12
NUM_DIGITAL_INPUTS = 6
def __init__(self, idVendor=USB_VID, idProduct=USB_PID, **kwargs):
"""Initialize a DDS instance and connect to the hardware.
Args:
idVendor (int): 16-bit USB Vendor ID (VID) for the DDS hardware.
idProduct (int): 16-bit USB Product ID (PID) for the DDS hardware.
Kwargs:
See DDS.configure() for the list of kwargs that __init__ understands.
"""
# Set up defaults for instance variables.
self._ext_trigger = None
self._oneshot = False
self._counter_mode = False
self._programmable_output = True
self._digital_output = 0
self._clock_divider = 128
# do not initialize USB device if used for unit testing
if kwargs.get('testing', False):
return
self._in_ep, self._out_ep = dds_usb_open(idVendor, idProduct)
self.configure(**kwargs)
def transfer(self, data):
self._out_ep.write(data)
return self._in_ep.read(self._in_ep.wMaxPacketSize)
def configure(self, **kwargs):
"""Update the 3x25's configuration settings.
Kwargs:
reset_trig (bool): If True, reset the DDS external trigger.
reset_counter (bool): If True, reset the DDS counter.
oneshot (bool): If True, only output one wave (not continuous).
counter_mode (bool): Set true to enable counter mode.
If True, the 3x25 counts pulses.
If False, the 3x25 measures frequency.
programmable_output (bool): Set true to enable programmable digital output.
If True, digital output pins are controlled by setting digital_output.
If False, digital output pins follow the DAC output value.
ext_trigger ([None, 0 or 1]): Configure external trigger mode.
If None, external triggering is disabled.
If 1, external triggering occurs on rising pulse edges.
If 0, external triggering occurs on falling pulse edges.
digital_output (int): 12-bit unsigned value whose bits are written
to the 3x25's digital output pins.
Note: Only used when programmable_output is enabled.
clock_divider (int): Divisor to use for 200Mhz DAC clock to generate
sample output clock.
Must be an even value from 0-131070
"""
reset_trigger = bool(kwargs.get('reset_trig', False))
reset_counter = bool(kwargs.get('reset_counter', False))
oneshot = bool(kwargs.get('oneshot', self._oneshot))
counter_mode = bool(kwargs.get('counter_mode', self._counter_mode))
programmable_output = bool(kwargs.get('programmable_output', self._programmable_output))
ext_trigger = kwargs.get('ext_trigger', self._ext_trigger)
if ext_trigger not in [ None, 0, 1 ]:
raise ValueError("Invalid value for ext_trigger (must be 1, 0 or None)")
digital_output = int(kwargs.get('digital_output', self._digital_output))
clock_divider = int(kwargs.get('clock_divider', self._clock_divider))
if (clock_divider < 1) or (clock_divider > 131070) or (clock_divider > 1 and clock_divider & 1):
msg = "Clock divider ({0}) must be 1 or an even value between 2 and {1}.".format(clock_divider, DDS.DAC_CLOCK_DIV_MAX)
raise ValueError(msg)
self._oneshot = oneshot
self._counter_mode = counter_mode
self._programmable_output = programmable_output
self._ext_trigger = ext_trigger
self._digital_output = digital_output
self._clock_divider = clock_divider
configure_packet = ConfigurePacket(self, reset_trigger=reset_trigger, reset_counter=reset_counter)
response = self.transfer(str(configure_packet))
response = self._parse_configure_packet_response(response)
return response
def _parse_configure_packet_response(self, packet):
vals = struct.unpack("<HII", packet)
return {
'digital_input' : vals[0],
'frequency' : vals[1] * 2 if self._counter_mode is False else None,
'ticks' : None if vals[2] == 0xffffffff else vals[2],
'counts' : vals[1] if self._counter_mode is True else None,
}
def set_waveform(self, points, clock_divider=None, shift_points=0):
count = len(points)
if shift_points:
points = collections.deque(points)
points.rotate(shift_points)
response = self.transfer(str(PointCountPacket(count, is_start=True)))
assert response[0] == 0xcc
for chunk in samplepoint_chunks(points):
response = self.transfer(chunk)
assert response[0] == 0xcc
response = self.transfer(str(PointCountPacket(count)))
assert response[0] == 0xcc
if clock_divider is not None:
self.configure(clock_divider=clock_divider)
def reset_counter(self):
"""Reset the 3x25 counter state."""
self.configure(reset_counter=True)
def reset_trigger(self):
"""Reset the 3x25 external trigger."""
self.configure(reset_trigger=True)
def digital_write(self, pin, pin_state):
"""Set the output state of a digital output pin.
Args:
pin (int): Number of pin to control.
pin_state (int/bool): If 1/True, pin will be set high.
If 0/False, pin will be set low.
"""
pin_state = 1 if pin_state else 0
digital_output = self._digital_output & ~(1 << pin)
digital_output |= (pin_state << pin)
self.configure(digital_output=digital_output)
def digital_write_port(self, pin_states):
"""Set the output states of all digital output pins.
Args:
pin_states (int): Value comprised of bits to write to
the digital output pins.
"""
self.configure(digital_output=val)
def digital_read(self, pin):
"""Read the state of a digital input pin.
Args:
pin (int): Input pin # to read.
Returns:
0 if the pin is low, 1 if the pin is high.
"""
digital_in = self.configure()['digital_input']
return 1 if (digital_in & (1 << pin)) else 0
def digital_read_port(self):
"""Read the state of all input pins as one integer value.
Returns:
Integer w/bits set to the states of the input pins.
"""
return self.configure()['digital_input']
def count_in_frequency(self):
"""Get the input frequency at the 3x25's COUNT IN port.
The frequency is only available when the 3x25 is NOT in counter mode.
Returns:
Frequency (in Hz) at the COUNT IN port, or None if in counter mode.
"""
return self.configure()['frequency']
def count_in_counts(self):
"""Get the # of pulses counted at the 3x25's COUNT IN port since last reset.
The count is only available when the 3x25 IS in counter mode.
use .reset_counter() to reset the value to 0.
Returns:
# of pulses counted at the COUNT IN port, or None if not in counter mode.
"""
return self.configure()['counts']
def count_in_ticks(self):
return self.configure()['ticks']
@property
def ext_trigger(self):
return self._ext_trigger
@ext_trigger.setter
def ext_trigger(self, trig):
if trig is not None and trig != 0 and trig != 1:
raise ValueError("Invalid value for external trigger (should be 1, 0 or None)")
self.configure(ext_trigger=trig)
@property
def oneshot_mode(self):
return self._oneshot
@oneshot_mode.setter
def oneshot_mode(self, val):
val = True if val else False
self.configure(oneshot=val)
@property
def counter_mode(self):
return self._counter_mode
@counter_mode.setter
def counter_mode(self, val):
val = True if val else False
self.configure(counter_mode=val)
@property
def programmable_output(self):
return self._programmable_output
@programmable_output.setter
def programmable_output(self, val):
self.configure(programmable_output=val)
@staticmethod
def points_and_div_for_freq(freq):
# Calculate divisor based on using max # of available samples possible.
# -- ceil( DAC_CLOCK / (frequency * MAX_POINTS) )
freq = int(freq)
div = (DDS.DAC_CLOCK + (freq - 1) * DDS.MAX_POINTS) / (freq * DDS.MAX_POINTS)
# Adjust if odd value -- divisor has to be 1 or a multiple of 2
if div > 1 and div & 1:
div += 1
# Calculate # of sample points to use w/this divider to get closest
# to requested frequency
# -- round( DAC_CLOCK / (divider * frequency) )
npoints = (DDS.DAC_CLOCK + (div * freq / 2)) / (div * freq)
# Calculate actual frequency
actual = (DDS.DAC_CLOCK / div) / npoints
return (npoints, div, actual)
def generate_sine(self, freq, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0):
phase = float(phase)
npoints, div, actual = DDS.points_and_div_for_freq(freq)
points = []
for i in range(npoints):
i = float(i)
point = (amplitude * math.sin((2.0 * math.pi * i / npoints) + phase)) + offset
points.append(int(point))
self.set_waveform(points, clock_divider=div, shift_points=shift)
return actual
def generate_square(self, freq, duty_cycle=0.5, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0):
phase = float(phase)
npoints, div, actual = DDS.points_and_div_for_freq(freq)
points = []
for i in range(npoints):
shifted = int(i + (phase * npoints) / (2.0 * math.pi)) % npoints
point = amplitude if shifted < (duty_cycle * npoints) else -amplitude
points.append(int(point + offset))
self.set_waveform(points, clock_divider=div, shift_points=shift)
return actual
if __name__ == "__main__":
import time
freq = 6000000
d = DDS()
# print "Generating square wave @ {0} hz".format(freq)
# d.generate_square(25000000, 0.50)
# time.sleep(10)
print "Generating sine wave @ {0} hz".format(freq)
d.generate_sine(freq)
d.programmable_output=True
d.reset_counter()
d.counter_mode = True
| lgpl-2.1 | 106,578,187,649,829,000 | 32.535519 | 130 | 0.607952 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.