code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
from xml.dom import minidom
import io
"""
using xml.etree.ElementTree
"""
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = etree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t")
root = Element('person')
tree = ElementTree(root)
name = Element('name')
root.append(name)
name.text = 'Julie'
root.set('id', '123')
# print etree.tostring(root)
print(prettify(root))
tree.write(open('person.xml', 'w'))
f2 = io.open('person2.xml', 'w', encoding = 'utf-8')
f2.write(prettify(root)) | hoaibang07/Webscrap | sources/xmldemo/xmlcreate.py | Python | gpl-2.0 | 731 |
table = [None]*10000
def store( string):
"""Input a string that's stored in
the table."""
hash_index = calculate_hash_value(string)
if table[hash_index]:
table[hash_index].append(string)
else:
table[hash_index]= [string]
def lookup(string):
"""Return the hash value if the
string is already in the table.
Return -1 otherwise."""
index = calculate_hash_value(string)
if table[index]:
for each_key in table[index]:
if each_key == string:
return string
return -1
def calculate_hash_value(string):
"""Helper function to calulate a
hash value from a string."""
# hash_value is calculated as ** unicode(A)*100 + unicode(h) **
return ord(string[0])*100+ord(string[1])
# Setup
print (calculate_hash_value('Akash'))
print (lookup('Akash'))
# Test store
store('Akash')
print (lookup('Akash'))
| Akash1684/ScriptsPy | hash_map.py | Python | mit | 1,000 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the
network every second.
Usage: stateful_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Spark Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/streaming/stateful_network_wordcount.py \
localhost 9999`
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: stateful_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
sc = SparkContext(appName="PythonStreamingStatefulNetworkWordCount")
ssc = StreamingContext(sc, 1)
ssc.checkpoint("checkpoint")
# RDD with initial state (key, value) pairs
initialStateRDD = sc.parallelize([(u'hello', 1), (u'world', 1)])
def updateFunc(new_values, last_sum):
return sum(new_values) + (last_sum or 0)
lines = ssc.socketTextStream(sys.argv[1], int(sys.argv[2]))
running_counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda word: (word, 1))\
.updateStateByKey(updateFunc, initialRDD=initialStateRDD)
running_counts.pprint()
ssc.start()
ssc.awaitTermination()
| brad-kaiser/spark | examples/src/main/python/streaming/stateful_network_wordcount.py | Python | apache-2.0 | 2,274 |
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (io, Epochs, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds)
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, _BaseEpochs)
from mne.utils import (_TempDir, requires_pandas, slow_test,
clean_warning_registry, run_tests_if_main,
requires_scipy_version)
from mne.io.meas_info import create_info
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = 2
def _get_data():
raw = io.Raw(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
clean_warning_registry() # really clean warning stack
def test_reject():
"""Test epochs rejection
"""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad_epochs, reject='foo')
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad_epochs()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad_epochs(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad_epochs, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad_epochs(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad_epochs,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad_epochs(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
def test_decim():
"""Test epochs decimation
"""
# First with EpochsArray
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = np.random.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.decimate(decim, copy=True).get_data()
data_epochs_2 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs, data_epochs_2)
# Now let's do it with some real data
raw, events, picks = _get_data()
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=False)
assert_raises(ValueError, epochs.decimate, -1)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.preload_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload)
epochs.preload_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class
"""
raw = _get_data()[0]
epochs = _BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, _BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_scipy_version('0.14')
def test_savgol_filter():
"""Test savgol filtering
"""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing
"""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order"""
raw, events = _get_data()[:2]
events2 = events.copy()
np.random.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
baseline=(None, 0), reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters
"""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab'].name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs[['a', 'b']]
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_regular.events, epochs_multi.events)
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file
"""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
assert_true(repr(epochs)) # test repr
epochs.drop_bad_epochs()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks, baseline=(None, 0))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks,
baseline=(None, 0))
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
epochs_no_bl.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 1)
# decim without lowpass
lowpass = raw.info['lowpass']
raw.info['lowpass'] = None
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), decim=4)
assert_equal(len(w), 2)
raw.info['lowpass'] = lowpass
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# test IO
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_no_bl_read.apply_baseline(baseline)
assert_true(epochs_no_bl_read.baseline == baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(), epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'))
assert_equal(epochs_read2.event_id, epochs.event_id)
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop_epochs([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname)
assert_true(len(w) == 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax, picks=picks,
on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs.get_data(), epochs_read.get_data())
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.preload_data()
epochs.crop(0, 0, copy=False)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = io.Raw(raw_fname, proj=True)
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, baseline=(None, 0), proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=True)
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True, add_eeg_ref=False)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
baseline=(None, 0), proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = epochs.average()
evoked_sum = evoked1 + evoked2
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_true(evoked_sum.nave == (evoked1.nave + evoked2.nave))
evoked_diff = evoked1 - evoked1
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, baseline=(None, 0), decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error
"""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), 'Unknown',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection
"""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1,
event_id, tmin, tmax, baseline=(None, 0),
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax, baseline=(None, 0),
reject=reject_crazy, flat=flat)
epochs.drop_bad_epochs()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, flat=flat,
reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax, reject=None, flat=None)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs
"""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=preload,
reject=reject, flat=flat)
if not preload:
epochs2.drop_bad_epochs()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = np.random.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code
"""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax,
baseline=None, preload=True,
reject=None, flat=None)
evoked = epochs.average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), preload=True,
reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.crop(tmin_window, tmax_window, copy=True)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
def test_resample():
"""Test of resample of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=True)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0, copy=False)
assert_true(epochs_resampled is epochs)
def test_detrend():
"""Test detrending of epochs
"""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
detrend=2)
def test_bootstrap():
"""Test of bootstrapping of epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=False,
reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining
"""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad_epochs() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'], copy=False)
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'], copy=False)
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs = epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])[0]
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'],
{'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_raises(Exception, caught == 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events
"""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a'].name, 'a')
assert_equal(epochs[['a', 'b']]['a'].name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class
"""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == 2 * n_proj - 1)
epochs.add_proj(projs, remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='crazy')
# delayed without reject params
assert_raises(RuntimeError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, baseline=(None, 0), proj='delayed', reject=None)
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj='delayed', preload=preload,
add_eeg_ref=True, reject=reject)
epochs2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=True, preload=preload,
add_eeg_ref=True, reject=reject)
assert_allclose(epochs.copy().apply_proj().get_data()[0],
epochs2.get_data()[0], rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs2.get_data(), epochs2.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs2._projector)
assert_allclose(epochs._data, epochs2.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False, add_eeg_ref=True)
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection
"""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for preload in (True, False):
for proj in (True, False, 'delayed'):
for decim in (1, 3):
for ii in range(2):
epochs = Epochs(raw, events, event_id, tmin, tmax,
picks=picks, proj=proj, reject=reject,
preload=preload, decim=decim)
if ii == 1:
epochs.preload_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
if proj is True:
evoked.apply_proj()
epochs_data = epochs.get_data().mean(axis=0)[picks_data]
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
assert_allclose(evoked.data, epochs_data,
rtol=1e-5, atol=1e-15)
def test_drop_epochs():
"""Test dropping of epochs.
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop_epochs, [len(epochs.events)])
assert_raises(IndexError, epochs.drop_epochs, [-1])
assert_raises(ValueError, epochs.drop_epochs, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop_epochs([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent"""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API"""
raw, events = _get_data()[:2]
tests = [(('mag', False), ('grad', 'eeg')),
(('grad', False), ('mag', 'eeg')),
((False, True), ('grad', 'mag'))]
for (meg, eeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains, reject=None,
preload=False)
test = 'eeg' if eeg is True else meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality
"""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=None,
baseline=(None, 0), preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.drop_channels(drop_ch, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality
"""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, ['foo'])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.pick_channels(ch_names, copy=True)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels
"""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids"""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, baseline=(None, 0), proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, baseline=(None, 0),
reject=None, preload=True, proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2.info['filename'] = epochs2.info['filename'].upper()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info['ch_names'].pop(0)
epochs_meg.info['nchan'] -= 1
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['ch_names'][1] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][1]
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.tmin += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array
"""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
rng = np.random.RandomState(42)
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=-.2, baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(np.zeros_like(ep_data), ep_data)
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0., baseline=None)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1,
tmin=-0.2, baseline=(None, 0))
def test_concatenate_epochs():
"""Test concatenate epochs"""
raw, events, picks = _get_data()
epochs = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.drop_channels(epochs2.ch_names[:1], copy=True)])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
def test_add_channels():
"""Test epoch splitting / re-appending channel types
"""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.pick_types(meg=False, eeg=True, copy=True)
epoch_meg = epoch.pick_types(meg=True, copy=True)
epoch_stim = epoch.pick_types(meg=False, stim=True, copy=True)
epoch_eeg_meg = epoch.pick_types(meg=True, eeg=True, copy=True)
epoch_new = epoch_meg.add_channels([epoch_eeg, epoch_stim], copy=True)
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.add_channels([epoch_eeg], copy=True)
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
run_tests_if_main()
| andyh616/mne-python | mne/tests/test_epochs.py | Python | bsd-3-clause | 71,695 |
from mathtools import list_product
def parse_file(file):
fin = open(file)
l = [[int(i) for i in line.strip().split()] for line in fin]
return l
def horizontal_max(l, adjacent):
largest = 0
for line in l:
for i in xrange(len(line) - adjacent + 1):
current = list_product(line[i:i + adjacent])
if current > largest:
largest = current
return largest
def vertical_max(l, adjacent):
largest = 0
for column in xrange(len(l[0])):
for row in xrange(len(l) - adjacent + 1):
current = list_product([l[j][column]
for j in range(row, row + adjacent)])
if current > largest:
largest = current
return largest
def right_diagonal_max(l, adjacent):
largest = 0
for column in xrange(len(l[0]) - adjacent + 1):
for row in xrange(len(l) - adjacent + 1):
current = list_product([l[row + i][column + i]
for i in range(adjacent)])
if current > largest:
largest = current
return largest
def left_diagonal_max(l, adjacent):
largest = 0
for column in xrange(adjacent - 1, len(l[0])):
for row in xrange(len(l) - adjacent + 1):
current = list_product([l[row + i][column - i]
for i in range(adjacent)])
if current > largest:
largest = current
return largest
def all_max(l, adjacent):
return max([horizontal_max(l, adjacent), vertical_max(l, adjacent), right_diagonal_max(l, adjacent), left_diagonal_max(l, adjacent)])
print all_max(parse_file('pr11.txt'), 4)
| pbjr23/project-euler-python | pr11.py | Python | gpl-2.0 | 1,704 |
# -*- coding: utf-8 -*-
"""
Tests for the path module.
This suite runs on Linux, OS X, and Windows right now. To extend the
platform support, just add appropriate pathnames for your
platform (os.name) in each place where the p() function is called.
Then report the result. If you can't get the test to run at all on
your platform, there's probably a bug in path.py -- please report the issue
in the issue tracker at https://github.com/jaraco/path.py.
TestScratchDir.test_touch() takes a while to run. It sleeps a few
seconds to allow some time to pass between calls to check the modify
time on files.
"""
from __future__ import unicode_literals, absolute_import, print_function
import codecs
import os
import sys
import shutil
import time
import ntpath
import posixpath
import textwrap
import platform
import importlib
import pytest
from path import Path, tempdir
from path import CaseInsensitivePattern as ci
from path import SpecialResolver
from path import Multi
def p(**choices):
""" Choose a value from several possible values, based on os.name """
return choices[os.name]
class TestBasics:
def test_relpath(self):
root = Path(p(nt='C:\\', posix='/'))
foo = root / 'foo'
quux = foo / 'quux'
bar = foo / 'bar'
boz = bar / 'Baz' / 'Boz'
up = Path(os.pardir)
# basics
assert root.relpathto(boz) == Path('foo')/'bar'/'Baz'/'Boz'
assert bar.relpathto(boz) == Path('Baz')/'Boz'
assert quux.relpathto(boz) == up/'bar'/'Baz'/'Boz'
assert boz.relpathto(quux) == up/up/up/'quux'
assert boz.relpathto(bar) == up/up
# Path is not the first element in concatenation
assert root.relpathto(boz) == 'foo'/Path('bar')/'Baz'/'Boz'
# x.relpathto(x) == curdir
assert root.relpathto(root) == os.curdir
assert boz.relpathto(boz) == os.curdir
# Make sure case is properly noted (or ignored)
assert boz.relpathto(boz.normcase()) == os.curdir
# relpath()
cwd = Path(os.getcwd())
assert boz.relpath() == cwd.relpathto(boz)
if os.name == 'nt':
# Check relpath across drives.
d = Path('D:\\')
assert d.relpathto(boz) == boz
def test_construction_from_none(self):
"""
"""
try:
Path(None)
except TypeError:
pass
else:
raise Exception("DID NOT RAISE")
def test_construction_from_int(self):
"""
Path class will construct a path as a string of the number
"""
assert Path(1) == '1'
def test_string_compatibility(self):
""" Test compatibility with ordinary strings. """
x = Path('xyzzy')
assert x == 'xyzzy'
assert x == str('xyzzy')
# sorting
items = [Path('fhj'),
Path('fgh'),
'E',
Path('d'),
'A',
Path('B'),
'c']
items.sort()
assert items == ['A', 'B', 'E', 'c', 'd', 'fgh', 'fhj']
# Test p1/p1.
p1 = Path("foo")
p2 = Path("bar")
assert p1/p2 == p(nt='foo\\bar', posix='foo/bar')
def test_properties(self):
# Create sample path object.
f = p(nt='C:\\Program Files\\Python\\Lib\\xyzzy.py',
posix='/usr/local/python/lib/xyzzy.py')
f = Path(f)
# .parent
nt_lib = 'C:\\Program Files\\Python\\Lib'
posix_lib = '/usr/local/python/lib'
expected = p(nt=nt_lib, posix=posix_lib)
assert f.parent == expected
# .name
assert f.name == 'xyzzy.py'
assert f.parent.name == p(nt='Lib', posix='lib')
# .ext
assert f.ext == '.py'
assert f.parent.ext == ''
# .drive
assert f.drive == p(nt='C:', posix='')
def test_methods(self):
# .abspath()
assert Path(os.curdir).abspath() == os.getcwd()
# .getcwd()
cwd = Path.getcwd()
assert isinstance(cwd, Path)
assert cwd == os.getcwd()
def test_UNC(self):
if hasattr(os.path, 'splitunc'):
p = Path(r'\\python1\share1\dir1\file1.txt')
assert p.uncshare == r'\\python1\share1'
assert p.splitunc() == os.path.splitunc(str(p))
def test_explicit_module(self):
"""
The user may specify an explicit path module to use.
"""
nt_ok = Path.using_module(ntpath)(r'foo\bar\baz')
posix_ok = Path.using_module(posixpath)(r'foo/bar/baz')
posix_wrong = Path.using_module(posixpath)(r'foo\bar\baz')
assert nt_ok.dirname() == r'foo\bar'
assert posix_ok.dirname() == r'foo/bar'
assert posix_wrong.dirname() == ''
assert nt_ok / 'quux' == r'foo\bar\baz\quux'
assert posix_ok / 'quux' == r'foo/bar/baz/quux'
def test_explicit_module_classes(self):
"""
Multiple calls to path.using_module should produce the same class.
"""
nt_path = Path.using_module(ntpath)
assert nt_path is Path.using_module(ntpath)
assert nt_path.__name__ == 'Path_ntpath'
def test_joinpath_on_instance(self):
res = Path('foo')
foo_bar = res.joinpath('bar')
assert foo_bar == p(nt='foo\\bar', posix='foo/bar')
def test_joinpath_to_nothing(self):
res = Path('foo')
assert res.joinpath() == res
def test_joinpath_on_class(self):
"Construct a path from a series of strings"
foo_bar = Path.joinpath('foo', 'bar')
assert foo_bar == p(nt='foo\\bar', posix='foo/bar')
def test_joinpath_fails_on_empty(self):
"It doesn't make sense to join nothing at all"
try:
Path.joinpath()
except TypeError:
pass
else:
raise Exception("did not raise")
def test_joinpath_returns_same_type(self):
path_posix = Path.using_module(posixpath)
res = path_posix.joinpath('foo')
assert isinstance(res, path_posix)
res2 = res.joinpath('bar')
assert isinstance(res2, path_posix)
assert res2 == 'foo/bar'
class TestSelfReturn:
"""
Some methods don't necessarily return any value (e.g. makedirs,
makedirs_p, rename, mkdir, touch, chroot). These methods should return
self anyhow to allow methods to be chained.
"""
def test_makedirs_p(self, tmpdir):
"""
Path('foo').makedirs_p() == Path('foo')
"""
p = Path(tmpdir) / "newpath"
ret = p.makedirs_p()
assert p == ret
def test_makedirs_p_extant(self, tmpdir):
p = Path(tmpdir)
ret = p.makedirs_p()
assert p == ret
def test_rename(self, tmpdir):
p = Path(tmpdir) / "somefile"
p.touch()
target = Path(tmpdir) / "otherfile"
ret = p.rename(target)
assert target == ret
def test_mkdir(self, tmpdir):
p = Path(tmpdir) / "newdir"
ret = p.mkdir()
assert p == ret
def test_touch(self, tmpdir):
p = Path(tmpdir) / "empty file"
ret = p.touch()
assert p == ret
class TestScratchDir:
"""
Tests that run in a temporary directory (does not test tempdir class)
"""
def test_context_manager(self, tmpdir):
"""Can be used as context manager for chdir."""
d = Path(tmpdir)
subdir = d / 'subdir'
subdir.makedirs()
old_dir = os.getcwd()
with subdir:
assert os.getcwd() == os.path.realpath(subdir)
assert os.getcwd() == old_dir
def test_touch(self, tmpdir):
# NOTE: This test takes a long time to run (~10 seconds).
# It sleeps several seconds because on Windows, the resolution
# of a file's mtime and ctime is about 2 seconds.
#
# atime isn't tested because on Windows the resolution of atime
# is something like 24 hours.
threshold = 1
d = Path(tmpdir)
f = d / 'test.txt'
t0 = time.time() - threshold
f.touch()
t1 = time.time() + threshold
assert f.exists()
assert f.isfile()
assert f.size == 0
assert t0 <= f.mtime <= t1
if hasattr(os.path, 'getctime'):
ct = f.ctime
assert t0 <= ct <= t1
time.sleep(threshold*2)
fobj = open(f, 'ab')
fobj.write('some bytes'.encode('utf-8'))
fobj.close()
time.sleep(threshold*2)
t2 = time.time() - threshold
f.touch()
t3 = time.time() + threshold
assert t0 <= t1 < t2 <= t3 # sanity check
assert f.exists()
assert f.isfile()
assert f.size == 10
assert t2 <= f.mtime <= t3
if hasattr(os.path, 'getctime'):
ct2 = f.ctime
if os.name == 'nt':
# On Windows, "ctime" is CREATION time
assert ct == ct2
assert ct2 < t2
else:
# On other systems, it might be the CHANGE time
# (especially on Unix, time of inode changes)
assert ct == ct2 or ct2 == f.mtime
def test_listing(self, tmpdir):
d = Path(tmpdir)
assert d.listdir() == []
f = 'testfile.txt'
af = d / f
assert af == os.path.join(d, f)
af.touch()
try:
assert af.exists()
assert d.listdir() == [af]
# .glob()
assert d.glob('testfile.txt') == [af]
assert d.glob('test*.txt') == [af]
assert d.glob('*.txt') == [af]
assert d.glob('*txt') == [af]
assert d.glob('*') == [af]
assert d.glob('*.html') == []
assert d.glob('testfile') == []
finally:
af.remove()
# Try a test with 20 files
files = [d / ('%d.txt' % i) for i in range(20)]
for f in files:
fobj = open(f, 'w')
fobj.write('some text\n')
fobj.close()
try:
files2 = d.listdir()
files.sort()
files2.sort()
assert files == files2
finally:
for f in files:
try:
f.remove()
except:
pass
def test_listdir_other_encoding(self, tmpdir):
"""
Some filesystems allow non-character sequences in path names.
``.listdir`` should still function in this case.
See issue #61 for details.
"""
assert Path(tmpdir).listdir() == []
tmpdir_bytes = str(tmpdir).encode('ascii')
filename = 'r\xe9\xf1emi'.encode('latin-1')
pathname = os.path.join(tmpdir_bytes, filename)
with open(pathname, 'wb'):
pass
# first demonstrate that os.listdir works
assert os.listdir(tmpdir_bytes)
# now try with path.py
results = Path(tmpdir).listdir()
assert len(results) == 1
res, = results
assert isinstance(res, Path)
# OS X seems to encode the bytes in the filename as %XX characters.
if platform.system() == 'Darwin':
assert res.basename() == 'r%E9%F1emi'
return
assert len(res.basename()) == len(filename)
def test_makedirs(self, tmpdir):
d = Path(tmpdir)
# Placeholder file so that when removedirs() is called,
# it doesn't remove the temporary directory itself.
tempf = d / 'temp.txt'
tempf.touch()
try:
foo = d / 'foo'
boz = foo / 'bar' / 'baz' / 'boz'
boz.makedirs()
try:
assert boz.isdir()
finally:
boz.removedirs()
assert not foo.exists()
assert d.exists()
foo.mkdir(0o750)
boz.makedirs(0o700)
try:
assert boz.isdir()
finally:
boz.removedirs()
assert not foo.exists()
assert d.exists()
finally:
os.remove(tempf)
def assertSetsEqual(self, a, b):
ad = {}
for i in a:
ad[i] = None
bd = {}
for i in b:
bd[i] = None
assert ad == bd
def test_shutil(self, tmpdir):
# Note: This only tests the methods exist and do roughly what
# they should, neglecting the details as they are shutil's
# responsibility.
d = Path(tmpdir)
testDir = d / 'testdir'
testFile = testDir / 'testfile.txt'
testA = testDir / 'A'
testCopy = testA / 'testcopy.txt'
testLink = testA / 'testlink.txt'
testB = testDir / 'B'
testC = testB / 'C'
testCopyOfLink = testC / testA.relpathto(testLink)
# Create test dirs and a file
testDir.mkdir()
testA.mkdir()
testB.mkdir()
f = open(testFile, 'w')
f.write('x' * 10000)
f.close()
# Test simple file copying.
testFile.copyfile(testCopy)
assert testCopy.isfile()
assert testFile.bytes() == testCopy.bytes()
# Test copying into a directory.
testCopy2 = testA / testFile.name
testFile.copy(testA)
assert testCopy2.isfile()
assert testFile.bytes() == testCopy2.bytes()
# Make a link for the next test to use.
if hasattr(os, 'symlink'):
testFile.symlink(testLink)
else:
testFile.copy(testLink) # fallback
# Test copying directory tree.
testA.copytree(testC)
assert testC.isdir()
self.assertSetsEqual(
testC.listdir(),
[testC / testCopy.name,
testC / testFile.name,
testCopyOfLink])
assert not testCopyOfLink.islink()
# Clean up for another try.
testC.rmtree()
assert not testC.exists()
# Copy again, preserving symlinks.
testA.copytree(testC, True)
assert testC.isdir()
self.assertSetsEqual(
testC.listdir(),
[testC / testCopy.name,
testC / testFile.name,
testCopyOfLink])
if hasattr(os, 'symlink'):
assert testCopyOfLink.islink()
assert testCopyOfLink.readlink() == testFile
# Clean up.
testDir.rmtree()
assert not testDir.exists()
self.assertList(d.listdir(), [])
def assertList(self, listing, expected):
assert sorted(listing) == sorted(expected)
def test_patterns(self, tmpdir):
d = Path(tmpdir)
names = ['x.tmp', 'x.xtmp', 'x2g', 'x22', 'x.txt']
dirs = [d, d/'xdir', d/'xdir.tmp', d/'xdir.tmp'/'xsubdir']
for e in dirs:
if not e.isdir():
e.makedirs()
for name in names:
(e/name).touch()
self.assertList(d.listdir('*.tmp'), [d/'x.tmp', d/'xdir.tmp'])
self.assertList(d.files('*.tmp'), [d/'x.tmp'])
self.assertList(d.dirs('*.tmp'), [d/'xdir.tmp'])
self.assertList(d.walk(), [e for e in dirs
if e != d] + [e/n for e in dirs
for n in names])
self.assertList(d.walk('*.tmp'),
[e/'x.tmp' for e in dirs] + [d/'xdir.tmp'])
self.assertList(d.walkfiles('*.tmp'), [e/'x.tmp' for e in dirs])
self.assertList(d.walkdirs('*.tmp'), [d/'xdir.tmp'])
def test_unicode(self, tmpdir):
d = Path(tmpdir)
p = d/'unicode.txt'
def test(enc):
""" Test that path works with the specified encoding,
which must be capable of representing the entire range of
Unicode codepoints.
"""
given = ('Hello world\n'
'\u0d0a\u0a0d\u0d15\u0a15\r\n'
'\u0d0a\u0a0d\u0d15\u0a15\x85'
'\u0d0a\u0a0d\u0d15\u0a15\u2028'
'\r'
'hanging')
clean = ('Hello world\n'
'\u0d0a\u0a0d\u0d15\u0a15\n'
'\u0d0a\u0a0d\u0d15\u0a15\n'
'\u0d0a\u0a0d\u0d15\u0a15\n'
'\n'
'hanging')
givenLines = [
('Hello world\n'),
('\u0d0a\u0a0d\u0d15\u0a15\r\n'),
('\u0d0a\u0a0d\u0d15\u0a15\x85'),
('\u0d0a\u0a0d\u0d15\u0a15\u2028'),
('\r'),
('hanging')]
expectedLines = [
('Hello world\n'),
('\u0d0a\u0a0d\u0d15\u0a15\n'),
('\u0d0a\u0a0d\u0d15\u0a15\n'),
('\u0d0a\u0a0d\u0d15\u0a15\n'),
('\n'),
('hanging')]
expectedLines2 = [
('Hello world'),
('\u0d0a\u0a0d\u0d15\u0a15'),
('\u0d0a\u0a0d\u0d15\u0a15'),
('\u0d0a\u0a0d\u0d15\u0a15'),
(''),
('hanging')]
# write bytes manually to file
f = codecs.open(p, 'w', enc)
f.write(given)
f.close()
# test all 3 path read-fully functions, including
# path.lines() in unicode mode.
assert p.bytes() == given.encode(enc)
assert p.text(enc) == clean
assert p.lines(enc) == expectedLines
assert p.lines(enc, retain=False) == expectedLines2
# If this is UTF-16, that's enough.
# The rest of these will unfortunately fail because append=True
# mode causes an extra BOM to be written in the middle of the file.
# UTF-16 is the only encoding that has this problem.
if enc == 'UTF-16':
return
# Write Unicode to file using path.write_text().
cleanNoHanging = clean + '\n' # This test doesn't work with a
# hanging line.
p.write_text(cleanNoHanging, enc)
p.write_text(cleanNoHanging, enc, append=True)
# Check the result.
expectedBytes = 2 * cleanNoHanging.replace('\n',
os.linesep).encode(enc)
expectedLinesNoHanging = expectedLines[:]
expectedLinesNoHanging[-1] += '\n'
assert p.bytes() == expectedBytes
assert p.text(enc) == 2 * cleanNoHanging
assert p.lines(enc) == 2 * expectedLinesNoHanging
assert p.lines(enc, retain=False) == 2 * expectedLines2
# Write Unicode to file using path.write_lines().
# The output in the file should be exactly the same as last time.
p.write_lines(expectedLines, enc)
p.write_lines(expectedLines2, enc, append=True)
# Check the result.
assert p.bytes() == expectedBytes
# Now: same test, but using various newline sequences.
# If linesep is being properly applied, these will be converted
# to the platform standard newline sequence.
p.write_lines(givenLines, enc)
p.write_lines(givenLines, enc, append=True)
# Check the result.
assert p.bytes() == expectedBytes
# Same test, using newline sequences that are different
# from the platform default.
def testLinesep(eol):
p.write_lines(givenLines, enc, linesep=eol)
p.write_lines(givenLines, enc, linesep=eol, append=True)
expected = 2 * cleanNoHanging.replace('\n', eol).encode(enc)
assert p.bytes() == expected
testLinesep('\n')
testLinesep('\r')
testLinesep('\r\n')
testLinesep('\x0d\x85')
# Again, but with linesep=None.
p.write_lines(givenLines, enc, linesep=None)
p.write_lines(givenLines, enc, linesep=None, append=True)
# Check the result.
expectedBytes = 2 * given.encode(enc)
assert p.bytes() == expectedBytes
assert p.text(enc) == 2 * clean
expectedResultLines = expectedLines[:]
expectedResultLines[-1] += expectedLines[0]
expectedResultLines += expectedLines[1:]
assert p.lines(enc) == expectedResultLines
test('UTF-8')
test('UTF-16BE')
test('UTF-16LE')
test('UTF-16')
def test_chunks(self, tmpdir):
p = (tempdir() / 'test.txt').touch()
txt = "0123456789"
size = 5
p.write_text(txt)
for i, chunk in enumerate(p.chunks(size)):
assert chunk == txt[i * size:i * size + size]
assert i == len(txt) / size - 1
@pytest.mark.skipif(not hasattr(os.path, 'samefile'),
reason="samefile not present")
def test_samefile(self, tmpdir):
f1 = (tempdir() / '1.txt').touch()
f1.write_text('foo')
f2 = (tempdir() / '2.txt').touch()
f1.write_text('foo')
f3 = (tempdir() / '3.txt').touch()
f1.write_text('bar')
f4 = (tempdir() / '4.txt')
f1.copyfile(f4)
assert os.path.samefile(f1, f2) == f1.samefile(f2)
assert os.path.samefile(f1, f3) == f1.samefile(f3)
assert os.path.samefile(f1, f4) == f1.samefile(f4)
assert os.path.samefile(f1, f1) == f1.samefile(f1)
def test_rmtree_p(self, tmpdir):
d = Path(tmpdir)
sub = d / 'subfolder'
sub.mkdir()
(sub / 'afile').write_text('something')
sub.rmtree_p()
assert not sub.exists()
try:
sub.rmtree_p()
except OSError:
self.fail("Calling `rmtree_p` on non-existent directory "
"should not raise an exception.")
class TestMergeTree:
@pytest.fixture(autouse=True)
def testing_structure(self, tmpdir):
self.test_dir = Path(tmpdir)
self.subdir_a = self.test_dir / 'A'
self.test_file = self.subdir_a / 'testfile.txt'
self.test_link = self.subdir_a / 'testlink.txt'
self.subdir_b = self.test_dir / 'B'
self.subdir_a.mkdir()
self.subdir_b.mkdir()
with open(self.test_file, 'w') as f:
f.write('x' * 10000)
if hasattr(os, 'symlink'):
self.test_file.symlink(self.test_link)
else:
self.test_file.copy(self.test_link)
def test_with_nonexisting_dst_kwargs(self):
self.subdir_a.merge_tree(self.subdir_b, symlinks=True)
assert self.subdir_b.isdir()
expected = set((
self.subdir_b / self.test_file.name,
self.subdir_b / self.test_link.name,
))
assert set(self.subdir_b.listdir()) == expected
assert Path(self.subdir_b / self.test_link.name).islink()
def test_with_nonexisting_dst_args(self):
self.subdir_a.merge_tree(self.subdir_b, True)
assert self.subdir_b.isdir()
expected = set((
self.subdir_b / self.test_file.name,
self.subdir_b / self.test_link.name,
))
assert set(self.subdir_b.listdir()) == expected
assert Path(self.subdir_b / self.test_link.name).islink()
def test_with_existing_dst(self):
self.subdir_b.rmtree()
self.subdir_a.copytree(self.subdir_b, True)
self.test_link.remove()
test_new = self.subdir_a / 'newfile.txt'
test_new.touch()
with open(self.test_file, 'w') as f:
f.write('x' * 5000)
self.subdir_a.merge_tree(self.subdir_b, True)
assert self.subdir_b.isdir()
expected = set((
self.subdir_b / self.test_file.name,
self.subdir_b / self.test_link.name,
self.subdir_b / test_new.name,
))
assert set(self.subdir_b.listdir()) == expected
assert Path(self.subdir_b / self.test_link.name).islink()
assert len(Path(self.subdir_b / self.test_file.name).bytes()) == 5000
def test_copytree_parameters(self):
"""
merge_tree should accept parameters to copytree, such as 'ignore'
"""
ignore = shutil.ignore_patterns('testlink*')
self.subdir_a.merge_tree(self.subdir_b, ignore=ignore)
assert self.subdir_b.isdir()
assert self.subdir_b.listdir() == [self.subdir_b / self.test_file.name]
class TestChdir:
def test_chdir_or_cd(self, tmpdir):
""" tests the chdir or cd method """
d = Path(str(tmpdir))
cwd = d.getcwd()
# ensure the cwd isn't our tempdir
assert str(d) != str(cwd)
# now, we're going to chdir to tempdir
d.chdir()
# we now ensure that our cwd is the tempdir
assert str(d.getcwd()) == str(tmpdir)
# we're resetting our path
d = Path(cwd)
# we ensure that our cwd is still set to tempdir
assert str(d.getcwd()) == str(tmpdir)
# we're calling the alias cd method
d.cd()
# now, we ensure cwd isn'r tempdir
assert str(d.getcwd()) == str(cwd)
assert str(d.getcwd()) != str(tmpdir)
class TestSubclass:
class PathSubclass(Path):
pass
def test_subclass_produces_same_class(self):
"""
When operations are invoked on a subclass, they should produce another
instance of that subclass.
"""
p = self.PathSubclass('/foo')
subdir = p / 'bar'
assert isinstance(subdir, self.PathSubclass)
class TestTempDir:
def test_constructor(self):
"""
One should be able to readily construct a temporary directory
"""
d = tempdir()
assert isinstance(d, Path)
assert d.exists()
assert d.isdir()
d.rmdir()
assert not d.exists()
def test_next_class(self):
"""
It should be possible to invoke operations on a tempdir and get
Path classes.
"""
d = tempdir()
sub = d / 'subdir'
assert isinstance(sub, Path)
d.rmdir()
def test_context_manager(self):
"""
One should be able to use a tempdir object as a context, which will
clean up the contents after.
"""
d = tempdir()
res = d.__enter__()
assert res is d
(d / 'somefile.txt').touch()
assert not isinstance(d / 'somefile.txt', tempdir)
d.__exit__(None, None, None)
assert not d.exists()
def test_context_manager_exception(self):
"""
The context manager will not clean up if an exception occurs.
"""
d = tempdir()
d.__enter__()
(d / 'somefile.txt').touch()
assert not isinstance(d / 'somefile.txt', tempdir)
d.__exit__(TypeError, TypeError('foo'), None)
assert d.exists()
def test_context_manager_using_with(self):
"""
The context manager will allow using the with keyword and
provide a temporry directory that will be deleted after that.
"""
with tempdir() as d:
assert d.isdir()
assert not d.isdir()
class TestUnicode:
@pytest.fixture(autouse=True)
def unicode_name_in_tmpdir(self, tmpdir):
# build a snowman (dir) in the temporary directory
Path(tmpdir).joinpath('☃').mkdir()
def test_walkdirs_with_unicode_name(self, tmpdir):
for res in Path(tmpdir).walkdirs():
pass
class TestPatternMatching:
def test_fnmatch_simple(self):
p = Path('FooBar')
assert p.fnmatch('Foo*')
assert p.fnmatch('Foo[ABC]ar')
def test_fnmatch_custom_mod(self):
p = Path('FooBar')
p.module = ntpath
assert p.fnmatch('foobar')
assert p.fnmatch('FOO[ABC]AR')
def test_fnmatch_custom_normcase(self):
normcase = lambda path: path.upper()
p = Path('FooBar')
assert p.fnmatch('foobar', normcase=normcase)
assert p.fnmatch('FOO[ABC]AR', normcase=normcase)
def test_listdir_simple(self):
p = Path('.')
assert len(p.listdir()) == len(os.listdir('.'))
def test_listdir_empty_pattern(self):
p = Path('.')
assert p.listdir('') == []
def test_listdir_patterns(self, tmpdir):
p = Path(tmpdir)
(p/'sub').mkdir()
(p/'File').touch()
assert p.listdir('s*') == [p / 'sub']
assert len(p.listdir('*')) == 2
def test_listdir_custom_module(self, tmpdir):
"""
Listdir patterns should honor the case sensitivity of the path module
used by that Path class.
"""
always_unix = Path.using_module(posixpath)
p = always_unix(tmpdir)
(p/'sub').mkdir()
(p/'File').touch()
assert p.listdir('S*') == []
always_win = Path.using_module(ntpath)
p = always_win(tmpdir)
assert p.listdir('S*') == [p/'sub']
assert p.listdir('f*') == [p/'File']
def test_listdir_case_insensitive(self, tmpdir):
"""
Listdir patterns should honor the case sensitivity of the path module
used by that Path class.
"""
p = Path(tmpdir)
(p/'sub').mkdir()
(p/'File').touch()
assert p.listdir(ci('S*')) == [p/'sub']
assert p.listdir(ci('f*')) == [p/'File']
assert p.files(ci('S*')) == []
assert p.dirs(ci('f*')) == []
def test_walk_case_insensitive(self, tmpdir):
p = Path(tmpdir)
(p/'sub1'/'foo').makedirs_p()
(p/'sub2'/'foo').makedirs_p()
(p/'sub1'/'foo'/'bar.Txt').touch()
(p/'sub2'/'foo'/'bar.TXT').touch()
(p/'sub2'/'foo'/'bar.txt.bz2').touch()
files = list(p.walkfiles(ci('*.txt')))
assert len(files) == 2
assert p/'sub2'/'foo'/'bar.TXT' in files
assert p/'sub1'/'foo'/'bar.Txt' in files
@pytest.mark.skipif(sys.version_info < (2, 6),
reason="in_place requires io module in Python 2.6")
class TestInPlace:
reference_content = textwrap.dedent("""
The quick brown fox jumped over the lazy dog.
""".lstrip())
reversed_content = textwrap.dedent("""
.god yzal eht revo depmuj xof nworb kciuq ehT
""".lstrip())
alternate_content = textwrap.dedent("""
Lorem ipsum dolor sit amet, consectetur adipisicing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna
aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat.
Duis aute irure dolor in reprehenderit in voluptate velit
esse cillum dolore eu fugiat nulla pariatur. Excepteur
sint occaecat cupidatat non proident, sunt in culpa qui
officia deserunt mollit anim id est laborum.
""".lstrip())
@classmethod
def create_reference(cls, tmpdir):
p = Path(tmpdir)/'document'
with p.open('w') as stream:
stream.write(cls.reference_content)
return p
def test_line_by_line_rewrite(self, tmpdir):
doc = self.create_reference(tmpdir)
# reverse all the text in the document, line by line
with doc.in_place() as (reader, writer):
for line in reader:
r_line = ''.join(reversed(line.strip())) + '\n'
writer.write(r_line)
with doc.open() as stream:
data = stream.read()
assert data == self.reversed_content
def test_exception_in_context(self, tmpdir):
doc = self.create_reference(tmpdir)
with pytest.raises(RuntimeError) as exc:
with doc.in_place() as (reader, writer):
writer.write(self.alternate_content)
raise RuntimeError("some error")
assert "some error" in str(exc)
with doc.open() as stream:
data = stream.read()
assert not 'Lorem' in data
assert 'lazy dog' in data
class TestSpecialPaths:
@pytest.fixture(autouse=True, scope='class')
def appdirs_installed(cls):
pytest.importorskip('appdirs')
@pytest.fixture
def feign_linux(self, monkeypatch):
monkeypatch.setattr("platform.system", lambda: "Linux")
monkeypatch.setattr("sys.platform", "linux")
monkeypatch.setattr("os.pathsep", ":")
# remove any existing import of appdirs, as it sets up some
# state during import.
sys.modules.pop('appdirs')
def test_basic_paths(self):
appdirs = importlib.import_module('appdirs')
expected = appdirs.user_config_dir()
assert SpecialResolver(Path).user.config == expected
expected = appdirs.site_config_dir()
assert SpecialResolver(Path).site.config == expected
expected = appdirs.user_config_dir('My App', 'Me')
assert SpecialResolver(Path, 'My App', 'Me').user.config == expected
def test_unix_paths(self, tmpdir, monkeypatch, feign_linux):
fake_config = tmpdir / '_config'
monkeypatch.setitem(os.environ, 'XDG_CONFIG_HOME', str(fake_config))
expected = str(tmpdir / '_config')
assert SpecialResolver(Path).user.config == expected
def test_unix_paths_fallback(self, tmpdir, monkeypatch, feign_linux):
"Without XDG_CONFIG_HOME set, ~/.config should be used."
fake_home = tmpdir / '_home'
monkeypatch.setitem(os.environ, 'HOME', str(fake_home))
expected = str(tmpdir / '_home' / '.config')
assert SpecialResolver(Path).user.config == expected
def test_property(self):
assert isinstance(Path.special().user.config, Path)
assert isinstance(Path.special().user.data, Path)
assert isinstance(Path.special().user.cache, Path)
def test_other_parameters(self):
"""
Other parameters should be passed through to appdirs function.
"""
res = Path.special(version="1.0", multipath=True).site.config
assert isinstance(res, Path)
def test_multipath(self, feign_linux, monkeypatch, tmpdir):
"""
If multipath is provided, on Linux return the XDG_CONFIG_DIRS
"""
fake_config_1 = str(tmpdir / '_config1')
fake_config_2 = str(tmpdir / '_config2')
config_dirs = os.pathsep.join([fake_config_1, fake_config_2])
monkeypatch.setitem(os.environ, 'XDG_CONFIG_DIRS', config_dirs)
res = Path.special(multipath=True).site.config
assert isinstance(res, Multi)
assert fake_config_1 in res
assert fake_config_2 in res
assert '_config1' in str(res)
def test_reused_SpecialResolver(self):
"""
Passing additional args and kwargs to SpecialResolver should be
passed through to each invocation of the function in appdirs.
"""
appdirs = importlib.import_module('appdirs')
adp = SpecialResolver(Path, version="1.0")
res = adp.user.config
expected = appdirs.user_config_dir(version="1.0")
assert res == expected
class TestMultiPath:
def test_for_class(self):
"""
Multi.for_class should return a subclass of the Path class provided.
"""
cls = Multi.for_class(Path)
assert issubclass(cls, Path)
assert issubclass(cls, Multi)
assert cls.__name__ == 'MultiPath'
def test_detect_no_pathsep(self):
"""
If no pathsep is provided, multipath detect should return an instance
of the parent class with no Multi mix-in.
"""
path = Multi.for_class(Path).detect('/foo/bar')
assert isinstance(path, Path)
assert not isinstance(path, Multi)
def test_detect_with_pathsep(self):
"""
If a pathsep appears in the input, detect should return an instance
of a Path with the Multi mix-in.
"""
inputs = '/foo/bar', '/baz/bing'
input = os.pathsep.join(inputs)
path = Multi.for_class(Path).detect(input)
assert isinstance(path, Multi)
def test_iteration(self):
"""
Iterating over a MultiPath should yield instances of the
parent class.
"""
inputs = '/foo/bar', '/baz/bing'
input = os.pathsep.join(inputs)
path = Multi.for_class(Path).detect(input)
items = iter(path)
first = next(items)
assert first == '/foo/bar'
assert isinstance(first, Path)
assert not isinstance(first, Multi)
assert next(items) == '/baz/bing'
assert path == input
if __name__ == '__main__':
pytest.main()
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/test_path.py | Python | artistic-2.0 | 36,713 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
# Authors: Christoph Klein
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import tempfile, os
import numpy as np
import mdtraj as md
from mdtraj.formats import XYZTrajectoryFile
from mdtraj.testing import get_fn, eq
fd, temp = tempfile.mkstemp(suffix='.xyz')
fd_gz, temp_gz = tempfile.mkstemp(suffix='.xyz.gz')
def teardown_module(module):
"""Remove the temporary file created by tests in this file
this gets automatically called by nose. """
os.close(fd)
os.unlink(temp)
def test_read_0():
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
xyz = f.read()
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
xyz3 = f.read(stride=3)
eq(xyz[::3], xyz3)
def test_read_1():
reference = md.load(get_fn('frame0.dcd'), top=get_fn('native.pdb'))
traj = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
eq(reference.xyz[0], traj.xyz[0], decimal=3)
def test_read_gz():
reference = md.load(get_fn('frame0.dcd'), top=get_fn('native.pdb'))
traj = md.load(get_fn('frame0.xyz.gz'), top=get_fn('native.pdb'))
eq(reference.xyz[0], traj.xyz[0], decimal=3)
def test_read_write():
xyz = np.around(10 * np.random.randn(100, 11, 3), decimals=3)
with XYZTrajectoryFile(temp, mode='w') as f:
f.write(xyz)
with XYZTrajectoryFile(temp) as f:
xyz2 = f.read()
eq(xyz, xyz2)
def test_mdwrite():
t = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
t.save(temp)
t.save(temp_gz)
def test_multiread():
reference = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
xyz0 = f.read(n_frames=1)
xyz1 = f.read(n_frames=1)
eq(reference.xyz[0], xyz0[0]/10)
eq(reference.xyz[1], xyz1[0]/10)
def test_seek():
reference = md.load(get_fn('frame0.xyz'), top=get_fn('native.pdb'))
with XYZTrajectoryFile(get_fn('frame0.xyz')) as f:
f.seek(1)
eq(1, f.tell())
xyz1 = f.read(n_frames=1)
eq(reference.xyz[1], xyz1[0]/10)
f.seek(10)
eq(10, f.tell())
xyz10 = f.read(n_frames=1)
eq(reference.xyz[10], xyz10[0]/10)
eq(11, f.tell())
f.seek(-8, 1)
xyz3 = f.read(n_frames=1)
eq(reference.xyz[3], xyz3[0]/10)
f.seek(4, 1)
xyz8 = f.read(n_frames=1)
eq(reference.xyz[8], xyz8[0]/10)
def test_len():
with md.open(get_fn('frame0.xyz')) as fh:
assert len(fh) == 501
assert fh._frame_index == 0
assert len(fh.read()) == 501
| swails/mdtraj | mdtraj/tests/test_xyz.py | Python | lgpl-2.1 | 3,492 |
import StringIO
from binascii import unhexlify
def dejunk(pyew,doprint=True):
""" Remove junkcode like mov xx, call xx,jmp """
dis = pyew.disassemble(pyew.buf, pyew.processor, pyew.type, pyew.lines, pyew.bsize, baseoffset=pyew.offset)
print "Warning! The operation may change file, please backup before use this function"
maxsize = pyew.maxsize
s = StringIO.StringIO()
s.write(dis)
s.seek(0)
junk_patterns = ['e8', 'b8', 'be'] #[call, mov]
jmp_patterns = ['0f84', '0f85', '0f82', '0f83', '0f86', '0f87', '0f8f', '0f8e','0f88','0f89']
lines = s.readlines()
flag = False
for i in lines:
if '--------------' not in i:
sp = i.split()
if sp[1] in junk_patterns:
if sp[1] == 'e8':
if int(sp[4], 16) > maxsize:
print sp
pyew.f.seek(int(sp[0], 16))
pyew.f.write(unhexlify('90'))
pyew.seek(int(sp[0], 16))
# deal with mov
elif int(sp[2][4:6] + sp[2][2:4] + sp[2][:2], 16) > maxsize:
print sp
pyew.f.seek(int(sp[0], 16))
pyew.f.write(unhexlify('90'))
pyew.seek(int(sp[0], 16))
# deal with jmp
elif sp[1] in jmp_patterns:
if not flag:
flag = True
addroffset = sp[0]
offvalue = sp[4]
mclocal = sp[1]
elif sp[4] ==offvalue:
flag = False
if jmp_patterns.index(mclocal) %2 == 0:
if sp[1] == jmp_patterns[jmp_patterns.index(mclocal)+1]:
pyew.f.seek(int(addroffset, 16))
print sp
pyew.f.write(unhexlify('90eb'))
pyew.seek(int(addroffset, 16))
elif sp[1] == jmp_patterns[jmp_patterns.index(mclocal)-1]:
pyew.f.seek(int(addroffset, 16))
pyew.f.write(unhexlify('90eb'))
pyew.seek(int(addroffset, 16))
functions = {"dejunk": dejunk} | emptyiscolor/pyew-2.0-linux | plugins/dejunk.py | Python | gpl-2.0 | 1,796 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT_PATH)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
SITE_BRANDING = 'OpenStack'
SITE_NAME = 'openstack'
ENABLE_VNC = True
LOGIN_URL = '/auth/login'
LOGIN_REDIRECT_URL = '/'
MEDIA_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.abspath(os.path.join(ROOT_PATH, '..', 'static'))
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
CREDENTIAL_AUTHORIZATION_DAYS = '5'
ROOT_URLCONF = 'dashboard.urls'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django_openstack.middleware.keystone.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'dashboard.middleware.DashboardLogUnhandledExceptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django_openstack.context_processors.object_store',
'django_openstack.context_processors.tenants',
'django_openstack.context_processors.quantum',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
STATICFILES_DIRS = (
os.path.join(ROOT_PATH, 'static'),
)
INSTALLED_APPS = (
'dashboard',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_openstack',
'django_openstack.templatetags',
'mailer',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
TIME_ZONE = None
gettext_noop = lambda s: s
LANGUAGES = (
('en', gettext_noop('English')),
('it', gettext_noop('Italiano')),
('es', gettext_noop('Spanish')),
('fr', gettext_noop('French')),
('ja', gettext_noop('Japanese')),
('pt', gettext_noop('Portuguese')),
('pl', gettext_noop('Polish')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
LANGUAGE_CODE = 'en'
USE_I18N = True
ACCOUNT_ACTIVATION_DAYS = 7
TOTAL_CLOUD_RAM_GB = 10
try:
from local.local_settings import *
except Exception, e:
logging.exception(e)
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
try:
import debug_toolbar
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',)
except ImportError:
logging.info('Running in debug mode without debug_toolbar.')
OPENSTACK_KEYSTONE_DEFAULT_ROLE = 'Member'
| daniel-hou0/horizon | openstack-dashboard/dashboard/settings.py | Python | apache-2.0 | 4,094 |
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "",
"(active)": "",
"Also available in:": "",
"Archive": "",
"Categories": "",
"Comments": "",
"LANGUAGE": "Ingles",
"Languages:": "Mga Wika:",
"More posts about %s": "",
"Newer posts": "",
"Next post": "Susunod",
"No posts found.": "",
"Nothing found.": "",
"Older posts": "",
"Original site": "",
"Posted:": "",
"Posts about %s": "",
"Posts for year %s": "",
"Posts for {month} {day}, {year}": "",
"Posts for {month} {year}": "",
"Previous post": "",
"Publication date": "",
"RSS feed": "",
"Read in English": "",
"Read more": "",
"Skip to main content": "",
"Source": "",
"Subcategories:": "",
"Tags and Categories": "",
"Tags": "Mga Tag",
"old posts, page %d": "",
"page %d": "",
}
| JohnTroony/nikola | nikola/data/themes/base/messages/messages_tl.py | Python | mit | 921 |
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="sankey", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/sankey/_hoverlabel.py | Python | mit | 2,055 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
from aausat_parser import aausat_parser
class qa_aausat_parser (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_aausat_parser, "qa_aausat_parser.xml")
| daniestevez/gr-aausat | python/qa_aausat_parser.py | Python | mit | 1,238 |
"""This module defines the class DesiData to load DESI data
"""
import multiprocessing
import logging
import fitsio
import healpy
import numpy as np
from picca.delta_extraction.data_catalogues.desi_healpix import DesiHealpix, defaults, accepted_options
from picca.delta_extraction.errors import DataError
class DesisimMocks(DesiHealpix):
"""Reads the spectra from DESI using healpix mode and formats its data as a
list of Forest instances.
Should work for both data and mocks. This is specified using the 'mode'
keyword. It is required to set the in_nside member.
Methods
-------
filter_forests (from Data)
set_blinding (from Data)
read_file (from DesiHealpix)
__init__
read_data
Attributes
----------
analysis_type: str (from Data)
Selected analysis type. Current options are "BAO 3D" or "PK 1D"
forests: list of Forest (from Data)
A list of Forest from which to compute the deltas.
min_num_pix: int (from Data)
Minimum number of pixels in a forest. Forests with less pixels will be dropped.
blinding: str (from DesiData)
A string specifying the chosen blinding strategies. Must be one of the
accepted values in ACCEPTED_BLINDING_STRATEGIES
catalogue: astropy.table.Table (from DesiData)
The quasar catalogue
input_directory: str (from DesiData)
Directory to spectra files.
logger: logging.Logger
Logger object
"""
def __init__(self, config):
"""Initialize class instance
Arguments
---------
config: configparser.SectionProxy
Parsed options to initialize class
"""
self.logger = logging.getLogger(__name__)
super().__init__(config)
def read_data(self):
"""Read the spectra and formats its data as Forest instances.
Method used to read healpix-based survey data.
Return
------
is_mock: bool
True as we are loading mocks
is_sv: bool
False as mocks data is not part of DESI SV data
Raise
-----
DataError if no quasars were found
"""
in_nside = 16
healpix = [
healpy.ang2pix(in_nside, np.pi / 2 - row["DEC"], row["RA"], nest=True)
for row in self.catalogue
]
self.catalogue["HEALPIX"] = healpix
self.catalogue.sort("HEALPIX")
#Current mocks don't have this "SURVEY" column in the catalog
#but its not clear future ones will not have it, so I think is good to leave it for now.
if not "SURVEY" in self.catalogue.colnames:
self.catalogue["SURVEY"]=np.ma.masked
grouped_catalogue = self.catalogue.group_by(["HEALPIX", "SURVEY"])
arguments=[]
self.num_processors = multiprocessing.cpu_count() // 2
context = multiprocessing.get_context('fork')
pool = context.Pool(processes=self.num_processors)
manager = multiprocessing.Manager()
forests_by_targetid = manager.dict()
for (index,
(healpix, survey)), group in zip(enumerate(grouped_catalogue.groups.keys),
grouped_catalogue.groups):
filename = (
f"{self.input_directory}/{healpix//100}/{healpix}/spectra-"
f"{in_nside}-{healpix}.fits")
arguments.append((filename,group,forests_by_targetid))
self.logger.info(f"reading data from {len(arguments)} files")
pool.starmap(self.read_file,arguments)
pool.close()
if len(forests_by_targetid) == 0:
raise DataError("No Quasars found, stopping here")
self.forests = list(forests_by_targetid.values())
return True, False
| igmhub/picca | py/picca/delta_extraction/data_catalogues/desisim_mocks.py | Python | gpl-3.0 | 3,743 |
# ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from __future__ import absolute_import, unicode_literals
from functools import reduce
import json
import logging
from django.contrib.admin.views import main
from django.contrib.admin.actions import delete_selected
from django.contrib.auth import get_permission_codename
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db.models import Q
from django.http import (
HttpResponse, HttpResponseBadRequest,
HttpResponseForbidden, HttpResponseNotFound, HttpResponseServerError)
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.encoding import force_text
from mptt.exceptions import InvalidMove
from mptt.forms import MPTTAdminForm
from feincms import settings
from feincms.extensions import ExtensionModelAdmin
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------
def django_boolean_icon(field_val, alt_text=None, title=None):
"""
Return HTML code for a nice representation of true/false.
"""
# Origin: contrib/admin/templatetags/admin_list.py
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
alt_text = alt_text or BOOLEAN_MAPPING[field_val]
if title is not None:
title = 'title="%s" ' % title
else:
title = ''
icon_url = static('feincms/img/icon-%s.gif' % BOOLEAN_MAPPING[field_val])
return mark_safe(
'<img src="%s" alt="%s" %s/>' % (icon_url, alt_text, title))
def _build_tree_structure(queryset):
"""
Build an in-memory representation of the item tree, trying to keep
database accesses down to a minimum. The returned dictionary looks like
this (as json dump):
{"6": [7, 8, 10]
"7": [12],
"8": [],
...
}
"""
all_nodes = {}
mptt_opts = queryset.model._mptt_meta
items = queryset.order_by(
mptt_opts.tree_id_attr,
mptt_opts.left_attr,
).values_list(
"pk",
"%s_id" % mptt_opts.parent_attr,
)
for p_id, parent_id in items:
all_nodes.setdefault(
str(parent_id) if parent_id else 0,
[],
).append(p_id)
return all_nodes
# ------------------------------------------------------------------------
def ajax_editable_boolean_cell(item, attr, text='', override=None):
"""
Generate a html snippet for showing a boolean value on the admin page.
Item is an object, attr is the attribute name we should display. Text
is an optional explanatory text to be included in the output.
This function will emit code to produce a checkbox input with its state
corresponding to the item.attr attribute if no override value is passed.
This input is wired to run a JS ajax updater to toggle the value.
If override is passed in, ignores the attr attribute and returns a
static image for the override boolean with no user interaction possible
(useful for "disabled and you can't change it" situations).
"""
if text:
text = ' (%s)' % text
if override is not None:
a = [django_boolean_icon(override, text), text]
else:
value = getattr(item, attr)
a = [
'<input type="checkbox" data-inplace data-inplace-id="%s"'
' data-inplace-attribute="%s" %s>' % (
item.pk,
attr,
'checked="checked"' if value else '',
)]
a.insert(0, '<div id="wrap_%s_%d">' % (attr, item.pk))
a.append('</div>')
return ''.join(a)
# ------------------------------------------------------------------------
def ajax_editable_boolean(attr, short_description):
"""
Convenience function: Assign the return value of this method to a variable
of your ModelAdmin class and put the variable name into list_display.
Example::
class MyTreeEditor(TreeEditor):
list_display = ('__str__', 'active_toggle')
active_toggle = ajax_editable_boolean('active', _('is active'))
"""
def _fn(self, item):
return ajax_editable_boolean_cell(item, attr)
_fn.allow_tags = True
_fn.short_description = short_description
_fn.editable_boolean_field = attr
return _fn
# ------------------------------------------------------------------------
class ChangeList(main.ChangeList):
"""
Custom ``ChangeList`` class which ensures that the tree entries are always
ordered in depth-first order (order by ``tree_id``, ``lft``).
"""
def __init__(self, request, *args, **kwargs):
self.user = request.user
super(ChangeList, self).__init__(request, *args, **kwargs)
def get_queryset(self, *args, **kwargs):
mptt_opts = self.model._mptt_meta
qs = super(ChangeList, self).get_queryset(*args, **kwargs).\
order_by(mptt_opts.tree_id_attr, mptt_opts.left_attr)
# Force has_filters, so that the expand/collapse in sidebar is visible
self.has_filters = True
return qs
def get_results(self, request):
mptt_opts = self.model._mptt_meta
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
clauses = [
Q(**{
mptt_opts.tree_id_attr: tree_id,
mptt_opts.left_attr + '__lte': lft,
mptt_opts.right_attr + '__gte': rght,
}) for lft, rght, tree_id in self.queryset.values_list(
mptt_opts.left_attr,
mptt_opts.right_attr,
mptt_opts.tree_id_attr,
)
]
# We could optimise a bit here by explicitely filtering out
# any clauses that are for parents of nodes included in the
# queryset anyway. (ie: drop all clauses that refer to a node
# that is a parent to another node)
if clauses:
# Note: Django ORM is smart enough to drop additional
# clauses if the initial query set is unfiltered. This
# is good.
self.queryset |= self.model._default_manager.filter(
reduce(lambda p, q: p | q, clauses),
)
super(ChangeList, self).get_results(request)
# Pre-process permissions because we still have the request here,
# which is not passed in later stages in the tree editor
for item in self.result_list:
item.feincms_changeable = self.model_admin.has_change_permission(
request, item)
item.feincms_addable = (
item.feincms_changeable and
self.model_admin.has_add_permission(request, item))
# ------------------------------------------------------------------------
class TreeEditor(ExtensionModelAdmin):
"""
The ``TreeEditor`` modifies the standard Django administration change list
to a drag-drop enabled interface for django-mptt_-managed Django models.
.. _django-mptt: https://github.com/django-mptt/django-mptt/
"""
form = MPTTAdminForm
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
# Make sure that no pagination is displayed. Slicing is disabled
# anyway, therefore this value does not have an influence on the
# queryset
list_per_page = 999999999
def __init__(self, *args, **kwargs):
super(TreeEditor, self).__init__(*args, **kwargs)
self.list_display = list(self.list_display)
if 'indented_short_title' not in self.list_display:
if self.list_display[0] == 'action_checkbox':
self.list_display[1] = 'indented_short_title'
else:
self.list_display[0] = 'indented_short_title'
self.list_display_links = ('indented_short_title',)
opts = self.model._meta
self.change_list_template = [
'admin/feincms/%s/%s/tree_editor.html' % (
opts.app_label, opts.object_name.lower()),
'admin/feincms/%s/tree_editor.html' % opts.app_label,
'admin/feincms/tree_editor.html',
]
self.object_change_permission =\
opts.app_label + '.' + get_permission_codename('change', opts)
self.object_add_permission =\
opts.app_label + '.' + get_permission_codename('add', opts)
self.object_delete_permission =\
opts.app_label + '.' + get_permission_codename('delete', opts)
def changeable(self, item):
return getattr(item, 'feincms_changeable', True)
def indented_short_title(self, item):
"""
Generate a short title for an object, indent it depending on
the object's depth in the hierarchy.
"""
mptt_opts = item._mptt_meta
r = ''
try:
url = item.get_absolute_url()
except (AttributeError,):
url = None
if url:
r = (
'<input type="hidden" class="medialibrary_file_path"'
' value="%s" id="_refkey_%d" />') % (url, item.pk)
changeable_class = ''
if not self.changeable(item):
changeable_class = ' tree-item-not-editable'
r += (
'<span id="page_marker-%d" class="page_marker%s"'
' style="width: %dpx;"> </span> ') % (
item.pk,
changeable_class,
14 + getattr(item, mptt_opts.level_attr) * 18)
# r += '<span tabindex="0">'
if hasattr(item, 'short_title') and callable(item.short_title):
r += escape(item.short_title())
else:
r += escape('%s' % item)
# r += '</span>'
return mark_safe(r)
indented_short_title.short_description = _('title')
indented_short_title.allow_tags = True
def _collect_editable_booleans(self):
"""
Collect all fields marked as editable booleans. We do not
want the user to be able to edit arbitrary fields by crafting
an AJAX request by hand.
"""
if hasattr(self, '_ajax_editable_booleans'):
return
self._ajax_editable_booleans = {}
for field in self.list_display:
# The ajax_editable_boolean return value has to be assigned
# to the ModelAdmin class
try:
item = getattr(self.__class__, field)
except (AttributeError, TypeError):
continue
attr = getattr(item, 'editable_boolean_field', None)
if attr:
if hasattr(item, 'editable_boolean_result'):
result_func = item.editable_boolean_result
else:
def _fn(attr):
return lambda self, instance: [
ajax_editable_boolean_cell(instance, attr)]
result_func = _fn(attr)
self._ajax_editable_booleans[attr] = result_func
def _toggle_boolean(self, request):
"""
Handle an AJAX toggle_boolean request
"""
try:
item_id = int(request.POST.get('item_id', None))
attr = str(request.POST.get('attr', None))
except:
return HttpResponseBadRequest("Malformed request")
if not request.user.is_staff:
logger.warning(
"Denied AJAX request by non-staff \"%s\" to toggle boolean"
" %s for object #%s", request.user, attr, item_id)
return HttpResponseForbidden(
_("You do not have permission to modify this object"))
self._collect_editable_booleans()
if attr not in self._ajax_editable_booleans:
return HttpResponseBadRequest("not a valid attribute %s" % attr)
try:
obj = self.model._default_manager.get(pk=item_id)
except self.model.DoesNotExist:
return HttpResponseNotFound("Object does not exist")
if not self.has_change_permission(request, obj=obj):
logger.warning(
"Denied AJAX request by \"%s\" to toggle boolean %s for"
" object %s", request.user, attr, item_id)
return HttpResponseForbidden(
_("You do not have permission to modify this object"))
new_state = not getattr(obj, attr)
logger.info(
"Toggle %s on #%d %s to %s by \"%s\"",
attr, obj.pk, obj, "on" if new_state else "off", request.user)
try:
before_data = self._ajax_editable_booleans[attr](self, obj)
setattr(obj, attr, new_state)
obj.save()
# Construct html snippets to send back to client for status update
data = self._ajax_editable_booleans[attr](self, obj)
except Exception:
logger.exception(
"Unhandled exception while toggling %s on %s", attr, obj)
return HttpResponseServerError(
"Unable to toggle %s on %s" % (attr, obj))
# Weed out unchanged cells to keep the updates small. This assumes
# that the order a possible get_descendents() returns does not change
# before and after toggling this attribute. Unlikely, but still...
return HttpResponse(
json.dumps([b for a, b in zip(before_data, data) if a != b]),
content_type="application/json")
def get_changelist(self, request, **kwargs):
return ChangeList
def changelist_view(self, request, extra_context=None, *args, **kwargs):
"""
Handle the changelist view, the django view for the model instances
change list/actions page.
"""
if 'actions_column' not in self.list_display:
self.list_display.append('actions_column')
# handle common AJAX requests
if request.is_ajax():
cmd = request.POST.get('__cmd')
if cmd == 'toggle_boolean':
return self._toggle_boolean(request)
elif cmd == 'move_node':
return self._move_node(request)
return HttpResponseBadRequest('Oops. AJAX request not understood.')
extra_context = extra_context or {}
extra_context['tree_structure'] = mark_safe(
json.dumps(_build_tree_structure(self.get_queryset(request))))
extra_context['node_levels'] = mark_safe(json.dumps(
dict(self.get_queryset(request).order_by().values_list(
'pk', self.model._mptt_meta.level_attr
))
))
return super(TreeEditor, self).changelist_view(
request, extra_context, *args, **kwargs)
def has_add_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_add_permission, but also passes the obj parameter in.
"""
perm = self.object_add_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super(TreeEditor, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_change_permission, but also passes the obj parameter in.
"""
perm = self.object_change_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super(TreeEditor, self).has_change_permission(
request, obj)
def has_delete_permission(self, request, obj=None):
"""
Implement a lookup for object level permissions. Basically the same as
ModelAdmin.has_delete_permission, but also passes the obj parameter in.
"""
perm = self.object_delete_permission
if settings.FEINCMS_TREE_EDITOR_OBJECT_PERMISSIONS:
r = request.user.has_perm(perm, obj)
else:
r = request.user.has_perm(perm)
return r and super(TreeEditor, self).has_delete_permission(
request, obj)
def _move_node(self, request):
if hasattr(self.model.objects, 'move_node'):
tree_manager = self.model.objects
else:
tree_manager = self.model._tree_manager
queryset = self.get_queryset(request)
cut_item = queryset.get(pk=request.POST.get('cut_item'))
pasted_on = queryset.get(pk=request.POST.get('pasted_on'))
position = request.POST.get('position')
if not self.has_change_permission(request, cut_item):
self.message_user(request, _('No permission'))
return HttpResponse('FAIL')
if position in ('last-child', 'left', 'right'):
try:
tree_manager.move_node(cut_item, pasted_on, position)
except InvalidMove as e:
self.message_user(request, '%s' % e)
return HttpResponse('FAIL')
# Ensure that model save methods have been run (required to
# update Page._cached_url values, might also be helpful for other
# models inheriting MPTTModel)
for item in queryset.filter(id__in=(cut_item.pk, pasted_on.pk)):
item.save()
self.message_user(
request,
ugettext('%s has been moved to a new position.') % cut_item)
return HttpResponse('OK')
self.message_user(request, _('Did not understand moving instruction.'))
return HttpResponse('FAIL')
def _actions_column(self, instance):
if self.changeable(instance):
return ['<div class="drag_handle"></div>']
return []
def actions_column(self, instance):
return ' '.join(self._actions_column(instance))
actions_column.allow_tags = True
actions_column.short_description = _('actions')
def delete_selected_tree(self, modeladmin, request, queryset):
"""
Deletes multiple instances and makes sure the MPTT fields get
recalculated properly. (Because merely doing a bulk delete doesn't
trigger the post_delete hooks.)
"""
# If this is True, the confirmation page has been displayed
if request.POST.get('post'):
n = 0
# TODO: The disable_mptt_updates / rebuild is a work around
# for what seems to be a mptt problem when deleting items
# in a loop. Revisit this, there should be a better solution.
with queryset.model.objects.disable_mptt_updates():
for obj in queryset:
if self.has_delete_permission(request, obj):
obj.delete()
n += 1
obj_display = force_text(obj)
self.log_deletion(request, obj, obj_display)
else:
logger.warning(
"Denied delete request by \"%s\" for object #%s",
request.user, obj.id)
if n > 0:
queryset.model.objects.rebuild()
self.message_user(
request,
_("Successfully deleted %(count)d items.") % {"count": n})
# Return None to display the change list page again
return None
else:
# (ab)using the built-in action to display the confirmation page
return delete_selected(self, request, queryset)
def get_actions(self, request):
actions = super(TreeEditor, self).get_actions(request)
if 'delete_selected' in actions:
actions['delete_selected'] = (
self.delete_selected_tree,
'delete_selected',
_("Delete selected %(verbose_name_plural)s"))
return actions
| mcmaxwell/idea_digital_agency | idea/feincms/admin/tree_editor.py | Python | mit | 20,243 |
import purestorage
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from base64 import b64encode
import os
import sys
import json
import getpass
from optparse import OptionParser
from datetime import datetime, timedelta
import time
from time import gmtime, strftime, strptime
from operator import itemgetter, attrgetter
# Global Variables
VERSION = '1.1.0'
HEADER = 'Pure Storage Create Volume (' + VERSION + ')'
BANNER = ('=' * 132)
DEBUG_LEVEL = 0
VERBOSE_FLAG = False
COOKIE = ''
def create_session(flashArray, user, password):
jsonData = purestorage.FlashArray(flashArray, user, password)
return(jsonData)
def parsecl():
usage = 'usage: %prog [options]'
version = '%prog ' + VERSION
description = "This application has been developed using Pure Storage v1.4 RESTful Web Service interfaces. Developed and tested using Python 3.6 on Mac OS 10.12. Please contact [email protected] for assistance."
parser = OptionParser(usage=usage, version=version, description=description)
parser.add_option('-d', '--debug',
type = 'int',
dest = 'DEBUG_LEVEL',
default = 0,
help = 'Debug level, used for HTTP debugging')
parser.add_option('-p', '--password',
action = 'store',
type = 'string',
dest = 'password',
help = 'Pure password')
parser.add_option('-s', '--server',
action = 'store',
type = 'string',
dest = 'flashArray',
help = 'Pure FlashArray')
parser.add_option('-S', '--size',
action = 'store',
type = 'string',
dest = 'size',
help = 'Volume size S,K,M,G,T,P')
parser.add_option('-u', '--user',
action = 'store',
type = 'string',
dest = 'user',
help = 'Pure user name')
parser.add_option('-v', '--verbose',
action = 'store_true',
dest = 'VERBOSE_FLAG',
default = False,
help = 'Verbose [default: %default]')
parser.add_option('-V', '--volume',
action = 'store',
dest = 'volume',
default = False,
help = 'Volume name')
(options, args) = parser.parse_args()
'''
print("Options:", options)
print("Args:", args)
'''
return(options)
def main():
# Setup variables
global DEBUG_LEVEL
exit_code = 0
# Check for command line parameters
options = parsecl()
password = options.password
user = options.user
flashArray = options.flashArray
volsize = options.size
volume = options.volume
DEBUG_LEVEL = options.DEBUG_LEVEL
VERBOSE_FLAG = options.VERBOSE_FLAG
if DEBUG_LEVEL != 0:
print('Password', password)
print('User', user)
print('Flash Array', flashArray)
print('Volume name', volume)
print('Volume size', volsize)
print('Debug Level:', DEBUG_LEVEL)
if flashArray == None:
sys.exit('Exiting: You must provide FlashArray details')
if user and password == None:
sys.exit('Exiting: You must provide password if using username')
print(BANNER)
print(HEADER + ' - ' + flashArray)
print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))
print(BANNER)
# Create session
array = create_session(flashArray, user, password)
# Create Volume
jsonData = array.create_volume(volume, size=volsize)
if VERBOSE_FLAG:
print(BANNER)
print(json.dumps(jsonData, sort_keys=False, indent=4))
name = (jsonData['name'])
size = (jsonData['size'])
cdate = (jsonData['created'])
c1 = cdate[0:10]
c2 = cdate[11:19]
c3 = c1 + ' ' + c2
c4 = strptime(c3,'%Y-%m-%d %H:%M:%S')
created = strftime('%d/%m/%Y %H:%M:%S', c4)
print('{0:20} {1:>20} {2:20}'.format('Name', 'Size', 'Created'))
print('{0:20} {1:20} {2:20}'.format(name, size, created))
# Close API session
array.invalidate_cookie()
print(BANNER)
print(strftime('%d/%m/%Y %H:%M:%S %Z', gmtime()))
print(BANNER)
sys.exit(exit_code)
main()
| PureStorage-OpenConnect/python-scripts | Pure_Create_Volume.py | Python | apache-2.0 | 4,551 |
from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm, EditForm, PostForm
from .models import User, Post
from datetime import datetime
from config import POSTS_PER_PAGE
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@app.route('/index/<int:page>', methods = ['GET', 'POST'])
@login_required
def index(page = 1):
form = PostForm()
if form.validate_on_submit():
post = Post(body=form.post.data, timestamp=datetime.utcnow(), author=g.user)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
posts = g.user.followed_posts().paginate(1, 3, False).items
return render_template('index.html',
title='Home',
form=form,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
nickname = User.make_unique_nickname(nickname)
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember=remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user == None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('index'))
posts = [
{'author': user, 'body': 'Test post #1'},
{'author': user, 'body': 'Test post #2'}
]
return render_template('user.html',
user=user,
posts=posts)
@app.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
@app.errorhandler(404)
def internal_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
@app.route('/follow/<nickname>')
@login_required
def follow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
if user == g.user:
flash('You can\'t follow yourself!')
return redirect(url_for('user', nickname=nickname))
u = g.user.follow(user)
if u is None:
flash('Cannot follow ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash('You are now following ' + nickname + '!')
return redirect(url_for('user', nickname=nickname))
@app.route('/unfollow/<nickname>')
@login_required
def unfollow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
if user == g.user:
flash('You can\'t unfollow yourself!')
return redirect(url_for('user', nickname=nickname))
u = g.user.unfollow(user)
if u is None:
flash('Cannot unfollow ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash('You have stopped following ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
@app.route('/love')
def love():
return render_template('love.html')
| cclinet/flaskr | app/views.py | Python | gpl-3.0 | 5,446 |
modRoles = ["222359639575101442","225444761039798272"] #The id of the mod role(s). Can be found by typing "\@MOD-ROLE-MENTION"
backerRoles = ["225042074775322624", "225444761039798272"]
forbiddenChannels = ["220502476850200595","222739924313440257"] #Channel id(s) for channels the bot is wanted in. Ids can be found by typing "\#forbidden-channel"
#What you use to trigger a command (EG: !help OR /help)
operator = "!"
#timezones to output on !time from TZ row of https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
timezones = [
"US/Pacific",
"Europe/Berlin",
"Australia/Victoria"
]
#The id of the giveaway channel(s). Can be found by typing "\#giveaway-channel"
giveawayChannels = ["222739948153995264"]
#The text displayed for !rules
rulesText = "Please see <#222739924313440257>"
specs = """**Hardware & System Specs**:
Case: Corsair 650D
CPU: Intel i7 5820K
RAM: Kingston HyperX 16GB DDR4 2133 Mhz
GPU: Gigabtye G1 Gaming GTX 980
Motherboard: MSI X99A SLI Plus
SSD: Samsung 840 EVO 120GB
HDD: 2x Seagate Barracuda 2TB
OS: Windows 10
Mic: Blue Yeti"""
links = """<https://www.youtube.com/idiotechgaming>
<https://www.twitch.tv/idiotechgaming>
<https://www.patreon.com/IdiotechGaming>
<https://www.twitchalerts.com/donate/idiotechgaming>
<https://twitter.com/IdiotechGaming>
<https://www.facebook.com/idiotechgaming>
<https://www.reddit.com/r/idiotechgaming/>"""
helpText = """Here are all the current commands:
`{0}time place*` - Says the time in several timezones or the one supplied
`{0}joke` - Tells you a joke
`{0}roll sides*` - Rolls a die with the amount of sides you ask for. Default is 6
`{0}youtube` - Shows Idiotech's latest video
`{0}games` - Lists the next 10 games to be released that Idiotech has his eyes on.
**Backer only commands**
`{0}burn` - Sick burn, bro!
`{0}fight @username` - Fight your foes *with spam*.
`{0}hug @username` - Show your afffection for someone.
**Mod only commands**
`{0}giveaway start Giveaway-Name` - Starts a giveaway with the supplied name
`{0}giveaway stop` - Stops the current giveaway
`{0}poll title; option 1; option 2`\nYou can set up to 10 options`
`{0}purge number` - Deletes that many mesages above it.
Arguments with an * after it aren't needed""".format(operator)
#No idea, devon did this.
pollwebsite = "https://strawpoll.me/api/v2/polls"
| SolarPolarMan/idiotech-gaming-bot | settings.py | Python | mit | 2,321 |
# Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from ..helpers.command import Command
from ..helpers.textutils import gen_paraphrase
@Command('paraphrase')
def cmd(send, msg, args):
"""Paraphrase a sentence.
Syntax: {command} <text>
"""
if not msg:
send("Paraphrase what?")
return
send(gen_paraphrase(msg))
| N6UDP/cslbot | cslbot/commands/paraphrase.py | Python | gpl-2.0 | 1,141 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Customized QWebInspector for QtWebEngine."""
import os
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEngineSettings
from qutebrowser.browser import inspector
class WebEngineInspector(inspector.AbstractWebInspector):
"""A web inspector for QtWebEngine."""
def __init__(self, parent=None):
super().__init__(parent)
self.port = None
view = QWebEngineView()
settings = view.settings()
settings.setAttribute(QWebEngineSettings.JavascriptEnabled, True)
self._set_widget(view)
def inspect(self, _page):
"""Set up the inspector."""
try:
port = int(os.environ['QTWEBENGINE_REMOTE_DEBUGGING'])
except KeyError:
raise inspector.WebInspectorError(
"QtWebEngine inspector is not enabled. See "
"'qutebrowser --help' for details.")
url = QUrl('http://localhost:{}/'.format(port))
self._widget.load(url)
self.show()
| toofar/qutebrowser | qutebrowser/browser/webengine/webengineinspector.py | Python | gpl-3.0 | 1,816 |
"""This is a exmaple of conventional random walke simultion. There is
no resampling after the random walk dynamics.
"""
import sys
import os
import os.path as osp
from pathlib import Path
import numpy as np
from wepy.resampling.resamplers.resampler import NoResampler
from wepy.resampling.distances.randomwalk import RandomWalkDistance
from wepy.runners.randomwalk import RandomWalkRunner, UNIT_NAMES
from wepy.walker import Walker, WalkerState
from wepy_tools.sim_makers.toys.randomwalk import RandomwalkProfiler
SAVE_FIELDS = ('positions')
# Name of field's unit in the HDF5
UNITS = UNIT_NAMES
outputs_dir = Path('_output')
if not osp.exists(outputs_dir):
os.makedirs(outputs_dir)
# sets the input paths
hdf5_filename = 'rw_results.wepy.h5'
reporter_filename = 'randomwalk_conventional.org'
hdf5_path= outputs_dir / hdf5_filename
reporter_path = outputs_dir / reporter_filename
if __name__=="__main__":
if sys.argv[1] == "--help" or sys.argv[1] == '-h':
print("arguments: n_cycles, n_walkers, dimension")
else:
n_runs = int(sys.argv[1])
n_cycles = int(sys.argv[2])
n_walkers = int(sys.argv[3])
dimension = int(sys.argv[4])
# set up the distance function
distance = RandomWalkDistance();
# set up the NOResampler
resampler = NoResampler()
# set up a RandomWalkProfilier
rw_profiler = RandomwalkProfiler(resampler,
dimension,
hdf5_filename=str(hdf5_path),
reporter_filename=str(reporter_path))
# runs the simulations and gets the result
rw_profiler.run(
num_runs=n_runs,
num_cycles=n_cycles,
num_walkers=n_walkers,
)
| ADicksonLab/wepy | info/examples/RandomWalk/source/rw_conventional.py | Python | mit | 1,761 |
from bottle import Bottle, route, run, template, static_file, get, jinja2_template as template, post, request, response, redirect
import json
import requests
import runtime
import actions
"""
DisplayEvent
"""
defaultInputParams = {}
defaultOutputParams = {}
sim_parameters = dict()
# Register actions
def registerAction(user, project, version, sim_id):
sim_parameters['user'] = user
sim_parameters['project'] = project
sim_parameters['version'] = version
sim_parameters['sim_id'] = sim_id
runtime.register_webActions(user, project, version, sim_id, 'DisplayEvent', '/home/actions/DisplayEvent/')
def start():
inputParams = request.json['input']
request_Id = request.json['requestId']
runtime.eventQueue.put(json.dumps(inputParams['event']))
success_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/success'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(success_url)
return
def cancel():
request_Id = request.json['requestId']
cancel_url = '{}/api/v1/{}/{}/{}/{}/actions/{}/cancelation'.format(runtime.CRAFT_RUNTIME_SERVER_URL, sim_parameters['user'],sim_parameters['project'],sim_parameters['version'],sim_parameters['sim_id'], request_Id)
r = requests.post(cancel_url)
return
| cloderic/SmartAlarmClock | src/actions/DisplayEvent.py | Python | bsd-3-clause | 1,317 |
__author__ = 'baranbartu'
import logging
import threading
from memory_profiler import LineProfiler
from memgraph.profile import determine_memory_info
logger = logging.getLogger(__name__)
def observe(func=None, precision=1):
if func is not None:
def wrapper(*args, **kwargs):
prof = LineProfiler()
val = prof(func)(*args, **kwargs)
logger.info(
'Please wait... Line graph will be ready in few seconds.')
job = threading.Thread(target=determine_memory_info, args=(prof,),
kwargs={'precision': precision})
job.start()
return val
return wrapper
else:
def inner_wrapper(f):
return observe(f, precision=precision)
return inner_wrapper
| baranbartu/memgraph | memgraph/decorator.py | Python | bsd-2-clause | 810 |
# Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from barbican.tests import utils
from functionaltests.api import base
from functionaltests.api.v1.behaviors import container_behaviors
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.models import container_models
from functionaltests.api.v1.models import secret_models
from functionaltests.common import config
CONF = config.get_config()
admin_a = CONF.rbac_users.admin_a
creator_a = CONF.rbac_users.creator_a
creator_a_2 = CONF.rbac_users.creator_a_2
observer_a = CONF.rbac_users.observer_a
auditor_a = CONF.rbac_users.auditor_a
test_data_rbac_store_container = {
'with_admin_a': {'user': admin_a, 'admin': admin_a,
'expected_return': 201},
'with_creator_a': {'user': creator_a, 'admin': admin_a,
'expected_return': 201},
'with_observer_a': {'user': observer_a, 'admin': admin_a,
'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'admin': admin_a,
'expected_return': 403},
}
test_data_rbac_update_container = {
'with_admin_a': {'user': admin_a, 'admin': admin_a,
'expected_return': 405},
'with_creator_a': {'user': creator_a, 'admin': admin_a,
'expected_return': 405},
'with_observer_a': {'user': observer_a, 'admin': admin_a,
'expected_return': 405},
'with_auditor_a': {'user': auditor_a, 'admin': admin_a,
'expected_return': 405},
}
test_data_rbac_delete_container = {
'with_admin_a': {'user': admin_a, 'admin': admin_a,
'expected_return': 204},
'with_creator_a': {'user': creator_a, 'admin': creator_a,
'expected_return': 204},
'with_creator_a_2': {'user': creator_a_2, 'admin': creator_a,
'expected_return': 403},
'with_observer_a': {'user': observer_a, 'admin': admin_a,
'expected_return': 403},
'with_auditor_a': {'user': auditor_a, 'admin': admin_a,
'expected_return': 403},
}
test_data_rbac_get_container = {
'with_admin_a': {'user': admin_a, 'admin': admin_a,
'expected_return': 200},
'with_creator_a': {'user': creator_a, 'admin': admin_a,
'expected_return': 200},
'with_observer_a': {'user': observer_a, 'admin': admin_a,
'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'admin': admin_a,
'expected_return': 200},
}
test_data_rbac_get_list_of_containers = {
'with_admin_a': {'user': admin_a, 'admin': admin_a,
'expected_return': 200},
'with_creator_a': {'user': creator_a, 'admin': admin_a,
'expected_return': 200},
'with_observer_a': {'user': observer_a, 'admin': admin_a,
'expected_return': 200},
'with_auditor_a': {'user': auditor_a, 'admin': admin_a,
'expected_return': 403},
}
def get_default_secret_data():
return {
"name": "AES key",
"expiration": "2050-02-28T19:14:44.180394",
"algorithm": "aes",
"bit_length": 256,
"mode": "cbc",
"payload": 'Z0Y2K2xMb0Yzb2hBOWFQUnB0KzZiUT09',
"payload_content_type": "application/octet-stream",
"payload_content_encoding": "base64",
}
def get_container_req(secret_ref):
return {"name": "testcontainer",
"type": "generic",
"secret_refs": [{'name': 'secret1', 'secret_ref': secret_ref}]}
@utils.parameterized_test_case
class RBACContainersTestCase(base.TestCase):
"""Functional tests exercising RBAC Policies"""
def setUp(self):
super(RBACContainersTestCase, self).setUp()
self.secret_behaviors = secret_behaviors.SecretBehaviors(self.client)
self.container_behaviors = container_behaviors.ContainerBehaviors(
self.client)
def tearDown(self):
self.secret_behaviors.delete_all_created_secrets()
self.container_behaviors.delete_all_created_containers()
super(RBACContainersTestCase, self).tearDown()
@utils.parameterized_dataset(test_data_rbac_store_container)
def test_rbac_store_container(self, user, admin, expected_return):
"""Test RBAC for container store
Issue a container store and verify that the correct
http return code comes back for the specified user.
:param user: the user who will attempt to do the store
:param admin: the admin of the group containing the user
:param expected_return: the expected http return code
"""
test_model = secret_models.SecretModel(
**get_default_secret_data())
resp, secret_ref = self.secret_behaviors.create_secret(
test_model, user_name=admin, admin=admin)
self.assertEqual(201, resp.status_code)
test_model = container_models.ContainerModel(
**get_container_req(secret_ref))
resp, container_ref = self.container_behaviors.create_container(
test_model, user_name=user, admin=admin)
self.assertEqual(expected_return, resp.status_code)
@utils.parameterized_dataset(test_data_rbac_update_container)
def test_rbac_update_container(self, user, admin, expected_return):
"""Test RBAC for container update
Issue a container update and verify that the correct
http return code comes back for the specified user.
The initial container will be stored with the admin user to ensure
that it gets created successfully. We don't want the container
store to fail since we are only testing container update here.
:param user: the user who will attempt to do the update
:param admin: the admin of the group containing the user
:param expected_return: the expected http return code
"""
container_ref = self._create_initial_container(admin=admin)
resp = self.container_behaviors.update_container(container_ref,
user_name=user)
self.assertEqual(expected_return, resp.status_code)
@utils.parameterized_dataset(test_data_rbac_get_container)
def test_rbac_get_container(self, user, admin, expected_return):
"""Test RBAC for container get
Issue a container get and verify that the correct
http return code comes back for the specified user.
The initial container will be stored with the admin user to ensure
that it gets created successfully. We don't want the container
store to fail since we are only testing container get here.
:param user: the user who will attempt to do the get metadata
:param admin: the admin of the group containing the user
:param expected_return: the expected http return code
"""
container_href = self._create_initial_container(admin=admin)
resp = self.container_behaviors.get_container(
container_href, user_name=user)
self.assertEqual(expected_return, resp.status_code)
self.assertEqual(expected_return == 200, resp.content is not None)
@utils.parameterized_dataset(test_data_rbac_delete_container)
def test_rbac_delete_container(self, user, admin, expected_return):
"""Test RBAC for container delete
Issue a container delete and verify that the correct
http return code comes back for the specified user.
The initial container will be stored with the admin user to ensure
that it gets created successfully. We don't want the container
store to fail since we are only testing container delete here.
:param user: the user who will attempt to do the delete
:param admin: the admin of the group containing the user
:param expected_return: the expected http return code
"""
container_href = self._create_initial_container(admin=admin)
resp = self.container_behaviors.delete_container(
container_href, user_name=user)
self.assertEqual(expected_return, resp.status_code)
def _create_initial_container(self, admin=admin_a):
"""Utility function to create a container with a contained secret
Some tests require a container to exist before they test certain
things, so this function can be used to do that setup. First a secret
will be created, then a container will be created which contains
that secret.
:param admin: the admin user who will create store the container
:param secret_data: the data for the container
:return: href to the newly stored container
"""
test_model = secret_models.SecretModel(**get_default_secret_data())
resp, secret_ref = self.secret_behaviors.create_secret(
test_model, user_name=admin, admin=admin)
self.assertEqual(201, resp.status_code)
test_model = container_models.ContainerModel(
**get_container_req(secret_ref))
resp, container_ref = self.container_behaviors.create_container(
test_model, user_name=admin, admin=admin)
self.assertEqual(201, resp.status_code)
return container_ref
| openstack/barbican | functionaltests/api/v1/functional/test_containers_rbac.py | Python | apache-2.0 | 9,901 |
"""
Linkedin OAuth support
No extra configurations are needed to make this work.
"""
import urlparse
from xml.etree import ElementTree
from tendenci.apps.social_auth.backends import ConsumerBasedOAuth, OAuthBackend
LINKEDIN_SERVER = 'linkedin.com'
LINKEDIN_REQUEST_TOKEN_URL = 'https://api.%s/uas/oauth/requestToken' % \
LINKEDIN_SERVER
LINKEDIN_ACCESS_TOKEN_URL = 'https://api.%s/uas/oauth/accessToken' % \
LINKEDIN_SERVER
LINKEDIN_AUTHORIZATION_URL = 'https://www.%s/uas/oauth/authenticate' % \
LINKEDIN_SERVER
LINKEDIN_CHECK_AUTH = 'https://api.%s/v1/people/~' % LINKEDIN_SERVER
class LinkedinBackend(OAuthBackend):
"""Linkedin OAuth authentication backend"""
name = 'linkedin'
def get_user_details(self, response):
"""Return user details from Linkedin account"""
return {
'first_name': response['first-name'],
'last_name': response['last-name'],
'email': '', # not supplied
}
class LinkedinAuth(ConsumerBasedOAuth):
"""Linkedin OAuth authentication mechanism"""
AUTHORIZATION_URL = LINKEDIN_AUTHORIZATION_URL
REQUEST_TOKEN_URL = LINKEDIN_REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = LINKEDIN_ACCESS_TOKEN_URL
SERVER_URL = 'api.%s' % LINKEDIN_SERVER
AUTH_BACKEND = LinkedinBackend
SETTINGS_KEY_NAME = 'LINKEDIN_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'LINKEDIN_CONSUMER_SECRET'
def user_data(self, access_token):
"""Return user data provided"""
request = self.oauth_request(access_token, LINKEDIN_CHECK_AUTH)
raw_xml = self.fetch_response(request)
try:
xml = ElementTree.fromstring(raw_xml)
data = _xml_to_dict(xml)
url = data['site-standard-profile-request']['url']
url = url.replace('&', '&')
data['id'] = urlparse.parse_qs(url)['key'][0]
return data
except (xml.parsers.expat.ExpatError, KeyError, IndexError):
return None
@classmethod
def enabled(cls):
return True
def _xml_to_dict(xml):
"""Convert xml structure to dict"""
data = {}
for child in xml.getchildren():
if child.getchildren():
data[child.tag] = _xml_to_dict(child)
else:
data[child.tag] = child.text
return data
# Backend definition
BACKENDS = {
'linkedin': LinkedinAuth,
}
| alirizakeles/tendenci | tendenci/apps/social_auth/backends/contrib/linkedin.py | Python | gpl-3.0 | 2,471 |
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action donner_xp."""
from primaires.scripting.action import Action
from primaires.format.fonctions import supprimer_accents
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Donne de l'XP absolue à un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.xp_principal, "Personnage", "Fraction")
cls.ajouter_types(cls.xp_secondaire, "Personnage", "str", "Fraction")
@staticmethod
def xp_principal(personnage, xp):
"""Donne l'XP absolue au personnage dans le niveau principal."""
personnage.gagner_xp(None, int(xp))
@staticmethod
def xp_secondaire(personnage, niveau_secondaire, xp):
"""Donne l'XP absolue au personnage dans le niveau secondaire.
Le nom du niveau doit être donné en son entier.
Une partie de l'XP est automatiquement transmise au niveau principal.
"""
niveaux = [n for n in importeur.perso.niveaux.values() if \
supprimer_accents(n.nom).lower() == supprimer_accents(
niveau_secondaire)]
if not niveaux:
raise ErreurExecution("le niveau {} est introuvable".format(
niveau_secondaire))
personnage.gagner_xp(niveaux[0].cle, int(xp))
| stormi/tsunami | src/primaires/scripting/actions/donner_xp.py | Python | bsd-3-clause | 2,880 |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import codecs
import errno
import logging
import os
import ntpath
import posixpath
import shutil
import stat
import tempfile
from commoncode import system
from commoncode import text
from commoncode import filetype
from commoncode.filetype import is_rwx
# this exception is not available on posix
try:
WindowsError # @UndefinedVariable
except NameError:
WindowsError = None # @ReservedAssignment
DEBUG = False
logger = logging.getLogger(__name__)
"""
File, paths and directory utility functions.
"""
#
# DIRECTORIES
#
def create_dir(location):
"""
Create directory and all sub-directories recursively at location ensuring these
are readable and writeable.
Raise Exceptions if it fails to create the directory.
"""
if os.path.exists(location):
if not os.path.isdir(location):
err = ('Cannot create directory: existing file '
'in the way ''%(location)s.')
raise OSError(err % locals())
else:
# may fail on win if the path is too long
# FIXME: consider using UNC ?\\ paths
try:
os.makedirs(location)
chmod(location, RW, recurse=False)
# avoid multi-process TOCTOU conditions when creating dirs
# the directory may have been created since the exist check
except WindowsError, e:
# [Error 183] Cannot create a file when that file already exists
if e and e.winerror == 183:
if not os.path.isdir(location):
raise
else:
raise
except (IOError, OSError), o:
if o.errno == errno.EEXIST:
if not os.path.isdir(location):
raise
else:
raise
def system_temp_dir():
"""
Return the global temp directory for the current user.
"""
temp_dir = os.getenv('SCANCODE_TMP')
if not temp_dir:
sc = text.python_safe_name('scancode_' + system.username)
temp_dir = os.path.join(tempfile.gettempdir(), sc)
create_dir(temp_dir)
return temp_dir
def get_temp_dir(base_dir, prefix=''):
"""
Return the path to a new unique temporary directory, created under
the system-wide `system_temp_dir` temp directory as a subdir of the
base_dir path (a path relative to the `system_temp_dir`).
"""
base = os.path.join(system_temp_dir(), base_dir)
create_dir(base)
return tempfile.mkdtemp(prefix=prefix, dir=base)
#
# FILE READING
#
def file_chunks(file_object, chunk_size=1024):
"""
Yield a file piece by piece. Default chunk size: 1k.
"""
while 1:
data = file_object.read(chunk_size)
if data:
yield data
else:
break
# FIXME: reading a whole file could be an issue: could we stream by line?
def _text(location, encoding, universal_new_lines=True):
"""
Read file at `location` as a text file with the specified `encoding`. If
`universal_new_lines` is True, update lines endings to be posix LF \n.
Return a unicode string.
Note: Universal newlines in the codecs package was removed in
Python2.6 see http://bugs.python.org/issue691291
"""
with codecs.open(location, 'r', encoding) as f:
text = f.read()
if universal_new_lines:
text = u'\n'.join(text.splitlines(False))
return text
def read_text_file(location, universal_new_lines=True):
"""
Return the text content of file at `location` trying to find the best
encoding.
"""
try:
text = _text(location, 'utf-8', universal_new_lines)
except:
text = _text(location, 'latin-1', universal_new_lines)
return text
#
# PATHS AND NAMES MANIPULATIONS
#
# TODO: move these functions to paths.py
def is_posixpath(location):
"""
Return True if the `location` path is likely a POSIX-like path using POSIX path
separators (slash or "/")or has no path separator.
Return False if the `location` path is likely a Windows-like path using backslash
as path separators (e.g. "\").
"""
has_slashes = '/' in location
has_backslashes = '\\' in location
# windows paths with drive
if location:
drive, _ = ntpath.splitdrive(location)
if drive:
return False
# a path is always POSIX unless it contains ONLY backslahes
# which is a rough approximation (it could still be posix)
is_posix = True
if has_backslashes and not has_slashes:
is_posix = False
return is_posix
def as_posixpath(location):
"""
Return a POSIX-like path using POSIX path separators (slash or "/") for a
`location` path. This converts Windows paths to look like POSIX paths: Python
accepts gracefully POSIX paths on Windows.
"""
return location.replace(ntpath.sep, posixpath.sep)
def as_winpath(location):
"""
Return a Windows-like path using Windows path separators (backslash or "\") for a
`location` path.
"""
return location.replace(posixpath.sep, ntpath.sep)
def split_parent_resource(path, force_posix=False):
"""
Return a tuple of (parent directory path, resource name).
"""
use_posix = force_posix or is_posixpath(path)
splitter = use_posix and posixpath or ntpath
path = path.rstrip('/\\')
return splitter.split(path)
def resource_name(path, force_posix=False):
"""
Return the resource name (file name or directory name) from `path` which
is the last path segment.
"""
_left, right = split_parent_resource(path,force_posix)
return right or ''
def file_name(path, force_posix=False):
"""
Return the file name (or directory name) of a path.
"""
return resource_name(path, force_posix)
def parent_directory(path, force_posix=False):
"""
Return the parent directory path of a file or directory `path`.
"""
left, _right = split_parent_resource(path, force_posix)
use_posix = force_posix or is_posixpath(path)
sep = use_posix and '/' or '\\'
trail = sep if left != sep else ''
return left + trail
def file_base_name(path, force_posix=False):
"""
Return the file base name for a path. The base name is the base name of
the file minus the extension. For a directory return an empty string.
"""
return splitext(path, force_posix)[0]
def file_extension(path, force_posix=False):
"""
Return the file extension for a path.
"""
return splitext(path, force_posix)[1]
def splitext(path, force_posix=False):
"""
Return a tuple of strings (basename, extension) for a path. The basename is
the file name minus its extension. Return an empty extension string for a
directory. A directory is identified by ending with a path separator. Not
the same as os.path.splitext.
For example:
>>> splitext('C:\\dir\path.ext')
('path', '.ext')
Directories even with dotted names have no extension:
>>> import ntpath
>>> splitext('C:\\dir\\path.ext' + ntpath.sep)
('path.ext', '')
>>> splitext('/dir/path.ext/')
('path.ext', '')
>>> splitext('/some/file.txt')
('file', '.txt')
Composite extensions for tarballs are properly handled:
>>> splitext('archive.tar.gz')
('archive', '.tar.gz')
"""
base_name = ''
extension = ''
if not path:
return base_name, extension
ppath= as_posixpath(path)
name = resource_name(path, force_posix)
name = name.strip('\\/')
if ppath.endswith('/'):
# directories never have an extension
base_name = name
extension = ''
elif name.startswith('.') and '.' not in name[1:]:
# .dot files base name is the full name and they do not have an extension
base_name = name
extension = ''
else:
base_name, extension = posixpath.splitext(name)
# handle composed extensions of tar.gz, bz, zx,etc
if base_name.endswith('.tar'):
base_name, extension2 = posixpath.splitext(base_name)
extension = extension2 + extension
return base_name, extension
#
# DIRECTORY AND FILES WALKING/ITERATION
#
ignore_nothing = lambda _: False
def walk(location, ignored=ignore_nothing):
"""
Walk location returning the same tuples as os.walk but with a different
behavior:
- always walk top-down, breadth-first.
- always ignore and never follow symlinks, .
- always ignore special files (FIFOs, etc.)
- optionally ignore files and directories by invoking the `ignored`
callable on files and directories returning True if it should be ignored.
- location is a directory or a file: for a file, the file is returned.
"""
# TODO: consider using the new "scandir" module for some speed-up.
if DEBUG:
ign = ignored(location)
logger.debug('walk: ignored:', location, ign)
if ignored(location):
return
if filetype.is_file(location) :
yield parent_directory(location), [], [file_name(location)]
elif filetype.is_dir(location):
dirs = []
files = []
# TODO: consider using scandir
for name in os.listdir(location):
loc = os.path.join(location, name)
if filetype.is_special(loc) or ignored(loc):
if DEBUG:
ign = ignored(loc)
logger.debug('walk: ignored:', loc, ign)
continue
# special files and symlinks are always ignored
if filetype.is_dir(loc):
dirs.append(name)
elif filetype.is_file(loc):
files.append(name)
yield location, dirs, files
for dr in dirs:
for tripple in walk(os.path.join(location, dr), ignored):
yield tripple
def file_iter(location, ignored=ignore_nothing):
"""
Return an iterable of files at `location` recursively.
:param location: a file or a directory.
:param ignored: a callable accepting a location argument and returning True
if the location should be ignored.
:return: an iterable of file locations.
"""
return resource_iter(location, ignored, with_dirs=False)
def dir_iter(location, ignored=ignore_nothing):
"""
Return an iterable of directories at `location` recursively.
:param location: a directory.
:param ignored: a callable accepting a location argument and returning True
if the location should be ignored.
:return: an iterable of directory locations.
"""
return resource_iter(location, ignored, with_files=False)
def resource_iter(location, ignored=ignore_nothing, with_files=True, with_dirs=True):
"""
Return an iterable of resources at `location` recursively.
:param location: a file or a directory.
:param ignored: a callable accepting a location argument and returning True
if the location should be ignored.
:param with_dirs: If True, include the directories.
:param with_files: If True, include the files.
:return: an iterable of file and directory locations.
"""
assert with_dirs or with_files, "fileutils.resource_iter: One or both of 'with_dirs' and 'with_files' is required"
for top, dirs, files in walk(location, ignored):
if with_files:
for f in files:
yield os.path.join(top, f)
if with_dirs:
for d in dirs:
yield os.path.join(top, d)
#
# COPY
#
def copytree(src, dst):
"""
Copy recursively the `src` directory to the `dst` directory. If `dst` is an
existing directory, files in `dst` may be overwritten during the copy.
Preserve timestamps.
Ignores:
-`src` permissions: `dst` files are created with the default permissions.
- all special files such as FIFO or character devices and symlinks.
Raise an shutil.Error with a list of reasons.
This function is similar to and derived from the Python shutil.copytree
function. See fileutils.py.ABOUT for details.
"""
if not filetype.is_readable(src):
chmod(src, R, recurse=False)
names = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
errors.extend(copytime(src, dst))
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
# skip anything that is not a regular file, dir or link
if not filetype.is_regular(srcname):
continue
if not filetype.is_readable(srcname):
chmod(srcname, R, recurse=False)
try:
if os.path.isdir(srcname):
copytree(srcname, dstname)
elif filetype.is_file(srcname):
copyfile(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
if errors:
raise shutil.Error, errors
def copyfile(src, dst):
"""
Copy src file to dst file preserving timestamps.
Ignore permissions and special files.
Similar to and derived from Python shutil module. See fileutils.py.ABOUT
for details.
"""
if not filetype.is_regular(src):
return
if not filetype.is_readable(src):
chmod(src, R, recurse=False)
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
shutil.copyfile(src, dst)
copytime(src, dst)
def copytime(src, dst):
"""
Copy timestamps from `src` to `dst`.
Similar to and derived from Python shutil module. See fileutils.py.ABOUT
for details.
"""
errors = []
st = os.stat(src)
if hasattr(os, 'utime'):
try:
os.utime(dst, (st.st_atime, st.st_mtime))
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# File access times cannot be copied on Windows
pass
else:
errors.append((src, dst, str(why)))
return errors
#
# PERMISSIONS
#
# modes: read, write, executable
R = stat.S_IRUSR
RW = stat.S_IRUSR | stat.S_IWUSR
RX = stat.S_IRUSR | stat.S_IXUSR
RWX = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
# FIXME: This was an expensive operation that used to recurse of the parent directory
def chmod(location, flags, recurse=False):
"""
Update permissions for `location` with with `flags`. `flags` is one of R,
RW, RX or RWX with the same semantics as in the chmod command. Update is
done recursively if `recurse`.
"""
if not location or not os.path.exists(location):
return
location = os.path.abspath(location)
new_flags = flags
if filetype.is_dir(location):
# POSIX dirs need to be executable to be readable,
# and to be writable so we can change perms of files inside
new_flags = RWX
# FIXME: do we really need to change the parent directory perms?
# FIXME: may just check them instead?
parent = os.path.dirname(location)
current_stat = stat.S_IMODE(os.stat(parent).st_mode)
if not is_rwx(parent):
os.chmod(parent, current_stat | RWX)
if filetype.is_regular(location):
current_stat = stat.S_IMODE(os.stat(location).st_mode)
os.chmod(location, current_stat | new_flags)
if recurse:
chmod_tree(location, flags)
def chmod_tree(location, flags):
"""
Update permissions recursively in a directory tree `location`.
"""
if filetype.is_dir(location):
for top, dirs, files in walk(location):
for d in dirs:
chmod(os.path.join(top, d), flags, recurse=False)
for f in files:
chmod(os.path.join(top, f), flags, recurse=False)
#
# DELETION
#
def _rm_handler(function, path, excinfo): # @UnusedVariable
"""
shutil.rmtree handler invoked on error when deleting a directory tree.
This retries deleting once before giving up.
"""
if function == os.rmdir:
try:
chmod(path, RW, recurse=True)
shutil.rmtree(path, True)
except Exception:
pass
if os.path.exists(path):
logger.warning('Failed to delete directory %s', path)
elif function == os.remove:
try:
delete(path, _err_handler=None)
except:
pass
if os.path.exists(path):
logger.warning('Failed to delete file %s', path)
def delete(location, _err_handler=_rm_handler):
"""
Delete a directory or file at `location` recursively. Similar to "rm -rf"
in a shell or a combo of os.remove and shutil.rmtree.
"""
if not location:
return
if os.path.exists(location) or filetype.is_broken_link(location):
chmod(os.path.dirname(location), RW, recurse=False)
if filetype.is_dir(location):
shutil.rmtree(location, False, _rm_handler)
else:
os.remove(location)
| yasharmaster/scancode-toolkit | src/commoncode/fileutils.py | Python | apache-2.0 | 18,567 |
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for ParameterShift specific C++ ops."""
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
import numpy as np
import tensorflow as tf
import sympy
import cirq
from tensorflow_quantum.core.ops import tfq_ps_util_ops
from tensorflow_quantum.python import util
def _complex_test_circuit():
t = sympy.Symbol('t')
r = sympy.Symbol('r')
qubits = cirq.GridQubit.rect(1, 6)
circuit_batch = [
cirq.Circuit(
cirq.Moment([cirq.H(q) for q in qubits]),
cirq.Moment([
cirq.X(qubits[4]),
cirq.PhasedXPowGate(phase_exponent=np.random.random() * t).on(
qubits[5]),
cirq.ISwapPowGate(exponent=np.random.random() * t).on(
qubits[0], qubits[1]),
cirq.FSimGate(theta=np.random.random() * t,
phi=np.random.random() * r).on(
qubits[2], qubits[3])
]), cirq.Moment([cirq.H(q) for q in qubits])),
cirq.Circuit(
cirq.FSimGate(theta=np.random.random() * t,
phi=np.random.random() * r).on(*qubits[:2]),
cirq.FSimGate(theta=np.random.random() * r,
phi=np.random.random() * t).on(qubits[1], qubits[0])),
cirq.Circuit(
cirq.Moment([
cirq.ISwapPowGate(exponent=np.random.random() *
t).on(*qubits[:2]),
cirq.PhasedXPowGate(phase_exponent=np.random.random() * r).on(
qubits[2]),
cirq.ISwapPowGate(exponent=np.random.random() *
r).on(*qubits[3:5])
]))
]
return circuit_batch
class PSDecomposeTest(tf.test.TestCase):
"""Tests on tfq_ps_decompose"""
def test_iswap_gate_test(self):
"""Test 1 ISwapPowGate decomposition."""
t = sympy.Symbol('t')
qubits = cirq.GridQubit.rect(1, 2)
circuit = cirq.Circuit(
cirq.ISwapPowGate(exponent=np.random.random() * t).on(*qubits))
inputs = util.convert_to_tensor([circuit])
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
rand_resolver = {'t': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit, rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_phased_x_pow_gate_test(self):
"""Test 1 PhasedXPowGate decomposition."""
t = sympy.Symbol('t')
r = sympy.Symbol('r')
q = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.PhasedXPowGate(phase_exponent=np.random.random() * r,
exponent=np.random.random() * t).on(q))
inputs = util.convert_to_tensor([circuit])
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit, rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_fsim_gate_test(self):
"""Test 1 FSimPowGate decomposition."""
t = sympy.Symbol('t')
r = sympy.Symbol('r')
qubits = cirq.GridQubit.rect(1, 2)
circuit = cirq.Circuit(
cirq.FSimGate(theta=np.random.random() * r,
phi=np.random.random() * t).on(*qubits))
inputs = util.convert_to_tensor([circuit])
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit, rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_decompose_with_complex_circuit(self):
"""Test decompose with complex circuit."""
names = ['CLAE', 'HRYV', 'IRKB', 'LKRV', 'PJOU', 'CJKX', 'NASW']
# Test circuit has a Moment with 1) FSimGate & PhasedXPowGate,
# 2) PhasedXPowGate & ISwapPowGate and 3) FSimGate & ISwapPowGate.
# Be careful, they are not decomposed if not parameterized.
circuit_batch = [
cirq.Circuit([
cirq.Moment([
cirq.FSimGate(
theta=0.10338130973488413 * sympy.Symbol('CLAE'),
phi=0.10338130973488413 * sympy.Symbol('IRKB')).on(
cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
cirq.PhasedXPowGate(phase_exponent=1.0,
exponent=0.86426029696045281 *
sympy.Symbol('HRYV')).on(
cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.Y.on(cirq.GridQubit(0, 3)),
cirq.Z.on(cirq.GridQubit(0, 0)),
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 1),
cirq.GridQubit(0, 2)),
]),
cirq.Moment([
(cirq.CNOT**(0.92874230274398684 *
sympy.Symbol('IRKB'))).on(
cirq.GridQubit(0, 1), cirq.GridQubit(0,
2)),
]),
cirq.Moment([
cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('PJOU'),
exponent=0.2081415255258906 *
sympy.Symbol('LKRV')).on(
cirq.GridQubit(0, 2)),
(cirq.ISWAP**(0.32860954996781722 *
sympy.Symbol('PJOU'))).on(
cirq.GridQubit(0, 1),
cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('CJKX')).on(
cirq.GridQubit(0, 1)),
cirq.ZZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 3)),
(cirq.X**(0.6826594585474709 * sympy.Symbol('HRYV'))).on(
cirq.GridQubit(0, 2)),
]),
cirq.Moment([
(cirq.ZZ**(0.18781276022427218 * sympy.Symbol('PJOU'))).on(
cirq.GridQubit(0, 0), cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.Y.on(cirq.GridQubit(0, 0)),
]),
cirq.Moment([
cirq.FSimGate(
theta=0.13793763138552417 * sympy.Symbol('CJKX'),
phi=0.13793763138552417 * sympy.Symbol('PJOU')).on(
cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
(cirq.ISWAP**(0.028165738453673095 *
sympy.Symbol('NASW'))).on(
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.FSimGate(
theta=0.74356520426349459 * sympy.Symbol('CJKX'),
phi=0.74356520426349459 * sympy.Symbol('NASW')).on(
cirq.GridQubit(0, 3), cirq.GridQubit(0, 0)),
]),
cirq.Moment([
cirq.CNOT.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)),
cirq.SWAP.on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.H.on(cirq.GridQubit(0, 3)),
cirq.H.on(cirq.GridQubit(0, 2)),
cirq.CNOT.on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 0)),
]),
cirq.Moment([
cirq.CNOT.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)),
cirq.YY.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.CZ.on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 0)),
cirq.CNOT.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 0),
cirq.GridQubit(0, 2)),
cirq.CNOT.on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 0),
cirq.GridQubit(0, 3)),
cirq.SWAP.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.Y.on(cirq.GridQubit(0, 0)),
cirq.PhasedXPowGate(phase_exponent=1.0).on(
cirq.GridQubit(0, 2)),
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 1),
cirq.GridQubit(0, 3)),
]),
])
]
# Decompose programs.
inputs = util.convert_to_tensor(circuit_batch)
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
self.assertEqual(len(decomposed_programs), len(circuit_batch))
# Original programs has parameterized ISP, PXP, FSIM, but this result
# has no such gates at all. All parameterized gates have at most two
# eigenvalues. There are still ISwap and PhasedX(1.0) because they are
# not parameterized, which doesn't affect ParameterShift differentiation
# at all.
for program in decomposed_programs:
for moment in program:
for gate_op in moment:
# Consider parameterized gates only
if cirq.is_parameterized(gate_op.gate):
# Check I. The gate should have _eigen_components.
self.assertTrue(
hasattr(gate_op.gate, '_eigen_components'))
# Check II. The gate should have two eigen values.
self.assertEqual(len(gate_op.gate._eigen_components()),
2, gate_op.gate)
# Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has
# 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components.
# Check if two programs are identical.
rand_resolver = {name: np.random.random() for name in names}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit_batch[0], rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_moment_preservation(self):
"""Test Moment-structure preservation."""
t = sympy.Symbol('t')
r = sympy.Symbol('r')
qubits = cirq.LineQubit.range(6)
circuit_batch = [
cirq.Circuit(
cirq.Moment([cirq.H(q) for q in qubits]),
cirq.Moment([
cirq.X(qubits[4]),
cirq.PhasedXPowGate(phase_exponent=np.random.random() *
t).on(qubits[5]),
cirq.ISwapPowGate(exponent=np.random.random() * t).on(
qubits[0], qubits[1]),
cirq.FSimGate(theta=np.random.random() * t,
phi=np.random.random() * r).on(
qubits[2], qubits[3])
]), cirq.Moment([cirq.H(q) for q in qubits]))
]
inputs = util.convert_to_tensor(circuit_batch)
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
# Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has
# 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components.
# Check if two programs are identical.
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit_batch[0], rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
# Check if the Moments are conserved.
max_decomposed_length = 3
n_non_decomposed_moments = 2
self.assertEqual(len(decomposed_programs[0]),
n_non_decomposed_moments + max_decomposed_length)
# Total length of Moments = 5
# The non-decomposed moments should be the same.
self.assertEqual(decomposed_programs[0][0], circuit_batch[0][0])
self.assertEqual(decomposed_programs[0][-1], circuit_batch[0][-1])
# Check paralellized decompose gates in Moment[1]~[3].
# The target ops are replaced by the first decomposition gates. It means
# the first Moment has exactly the same number of gate ops.
self.assertEqual(len(decomposed_programs[0][1]),
len(circuit_batch[0][1]))
# From the second Moments, the Moments only have decomposition gates.
# In this example, two ISwapPowGate & one PhasedXPowGate are located.
# Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates
# Moment[2] have 3 gate ops and Moment[3] have 2 gate ops.
self.assertEqual(len(decomposed_programs[0][2]), 3)
self.assertEqual(len(decomposed_programs[0][3]), 2)
def test_more_complex_moment_preservation(self):
"""Test Moment-structure preservation."""
circuit_batch = _complex_test_circuit()
inputs = util.convert_to_tensor(circuit_batch)
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
# Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has
# 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components.
# Check if two programs are identical.
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
for i in range(3):
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit_batch[i], rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(
decomposed_programs[i], rand_resolver)),
atol=1e-5)
# Check if the Moments are conserved.
# Circuit 1.
max_decomposed_length = 3
n_non_decomposed_moments = 2
self.assertEqual(len(decomposed_programs[0]),
n_non_decomposed_moments + max_decomposed_length)
# Total length of Moments = 5
# The non-decomposed moments should be the same.
self.assertEqual(decomposed_programs[0][0], circuit_batch[0][0])
self.assertEqual(decomposed_programs[0][-1], circuit_batch[0][-1])
# Check paralellized decompose gates in Moment[1]~[3].
# The target ops are replaced by the first decomposition gates. It means
# the first Moment has exactly the same number of gate ops.
self.assertEqual(len(decomposed_programs[0][1]),
len(circuit_batch[0][1]))
# From the second Moments, the Moments only have decomposition gates.
# In this example, two ISwapPowGate & one PhasedXPowGate are located.
# Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates
# Moment[2] have 3 gate ops and Moment[3] have 2 gate ops.
self.assertEqual(len(decomposed_programs[0][2]), 3)
self.assertEqual(len(decomposed_programs[0][3]), 2)
# Circuit 2. two FSimGates.
self.assertEqual(len(decomposed_programs[1]), 2 * max_decomposed_length)
# Circuit 3. one PXP between two ISwapPowGates.
self.assertEqual(len(decomposed_programs[2]), max_decomposed_length)
class PSSymbolReplaceTest(tf.test.TestCase):
"""Tests tfq_ps_symbol_replace."""
def test_simple_case(self):
"""Test trivial case."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
new = tf.convert_to_tensor(['new'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_00 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('new'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_01 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('new'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_02 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('new'),
)
self.assertEqual(correct_00, output[0][0][0])
self.assertEqual(correct_01, output[0][0][1])
self.assertEqual(correct_02, output[0][0][2])
def test_error(self):
"""Ensure that errors happen with bad inputs."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2))
inputs = util.convert_to_tensor([[circuit]])
symbols = tf.convert_to_tensor(['test'])
replacements = tf.convert_to_tensor(['nothing'])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
inputs = tf.convert_to_tensor(['junk'])
with self.assertRaisesRegex(Exception,
expected_regex='Unparseable proto:'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor([['test']])
replacements = tf.convert_to_tensor(['nothing'])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
symbols = tf.convert_to_tensor(['test'])
replacements = tf.convert_to_tensor([['nothing']])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
symbols = tf.convert_to_tensor(['test'])
replacements = tf.convert_to_tensor(['nothing', 'too long'])
with self.assertRaisesRegex(
Exception,
expected_regex=
'symbols.shape is not equal to replacement_symbols.shape'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
def test_weight_coefficient(self):
"""Test that scalar multiples of trivial case work."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.4),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.4),
cirq.Z(bit)**(sympy.Symbol('alpha') * 4.4),
)
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
new = tf.convert_to_tensor(['new'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_00 = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('new') * 2.4),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.4),
cirq.Z(bit)**(sympy.Symbol('alpha') * 4.4),
)
correct_01 = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.4),
cirq.Y(bit)**(sympy.Symbol('new') * 3.4),
cirq.Z(bit)**(sympy.Symbol('alpha') * 4.4),
)
correct_02 = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.4),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.4),
cirq.Z(bit)**(sympy.Symbol('new') * 4.4),
)
for i, c in enumerate([correct_00, correct_01, correct_02]):
u1 = cirq.unitary(
cirq.resolve_parameters(c,
param_resolver={
'alpha': 1.23,
'new': 4.56
}))
u2 = cirq.unitary(
cirq.resolve_parameters(output[0][0][i],
param_resolver={
'alpha': 1.23,
'new': 4.56
}))
self.assertTrue(cirq.approx_eq(u1, u2, atol=1e-5))
def test_simple_pad(self):
"""Test simple padding."""
bit = cirq.LineQubit(1)
circuit = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
circuit2 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
)
circuit3 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
inputs = util.convert_to_tensor([circuit, circuit2, circuit3])
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
new = tf.convert_to_tensor(['new', 'old', 'nothing'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_00 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('new'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_01 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('new'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_02 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('new'),
)
self.assertEqual(correct_00, output[0][0][0])
self.assertEqual(correct_01, output[0][0][1])
self.assertEqual(correct_02, output[0][0][2])
self.assertEqual(correct_00, output[2][0][0])
self.assertEqual(correct_01, output[2][0][1])
self.assertEqual(correct_02, output[2][0][2])
correct_10 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('old'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
)
correct_11 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('old'),
cirq.Z(bit)**sympy.Symbol('beta'),
)
correct_12 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('old'),
)
self.assertEqual(correct_10, output[1][1][0])
self.assertEqual(correct_11, output[1][1][1])
self.assertEqual(correct_12, output[1][1][2])
correct_20 = cirq.Circuit()
correct_21 = cirq.Circuit()
correct_22 = cirq.Circuit()
self.assertEqual(correct_20, output[2][2][0])
self.assertEqual(correct_21, output[2][2][1])
self.assertEqual(correct_22, output[2][2][2])
correct = cirq.Circuit()
for i in range(3):
for j in range(3):
for k in range(3):
if i != j and (not (i == 2 and j == 0)):
self.assertEqual(correct, output[i][j][k])
def test_complex_pad(self):
"""Test trickier padding."""
bit = cirq.GridQubit(0, 0)
bit2 = cirq.GridQubit(0, 1)
circuit = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
circuit2 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
circuit3 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
inputs = util.convert_to_tensor([circuit, circuit2, circuit3])
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
new = tf.convert_to_tensor(['new', 'old', 'nothing'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_000 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('new'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_001 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('new'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_002 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('new'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_003 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('new'))
self.assertEqual(correct_000, output[0][0][0])
self.assertEqual(correct_001, output[0][0][1])
self.assertEqual(correct_002, output[0][0][2])
self.assertEqual(correct_003, output[0][0][3])
self.assertEqual(correct_000, output[2][0][0])
self.assertEqual(correct_001, output[2][0][1])
self.assertEqual(correct_002, output[2][0][2])
self.assertEqual(correct_003, output[2][0][3])
correct_110 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('old'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_111 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('old'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_112 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('old'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_113 = cirq.Circuit()
self.assertEqual(correct_110, output[1][1][0])
self.assertEqual(correct_111, output[1][1][1])
self.assertEqual(correct_112, output[1][1][2])
self.assertEqual(correct_113, output[1][1][3])
correct_100 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('new'))
correct_101 = cirq.Circuit()
correct_102 = cirq.Circuit()
correct_103 = cirq.Circuit()
self.assertEqual(correct_100, output[1][0][0])
self.assertEqual(correct_101, output[1][0][1])
self.assertEqual(correct_102, output[1][0][2])
self.assertEqual(correct_103, output[1][0][3])
correct_220 = cirq.Circuit()
correct_221 = cirq.Circuit()
correct_222 = cirq.Circuit()
correct_223 = cirq.Circuit()
self.assertEqual(correct_220, output[2][2][0])
self.assertEqual(correct_221, output[2][2][1])
self.assertEqual(correct_222, output[2][2][2])
self.assertEqual(correct_223, output[2][2][3])
correct = cirq.Circuit()
for i in range(3):
for j in range(3):
for k in range(3):
if i != j and (not (i == 2 and j == 0)) \
and (not (i == 1 and j == 0)):
self.assertEqual(correct, output[i][j][k])
class PSWeightsFromSymbolTest(tf.test.TestCase):
"""Tests tfq_ps_weights_from_symbols."""
def test_simple(self):
"""Ensure that weight extraction works."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[2.0]]]))
def test_empty(self):
"""Test empty circuit. and symbol free circuit. does nothing."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit))
circuit2 = cirq.Circuit()
inputs = util.convert_to_tensor([circuit, circuit2])
symbols = tf.convert_to_tensor(['alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[]], [[]]]))
def test_rotation_gates(self):
"""Test that rotation gates work."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.rx(sympy.Symbol('alpha') * 5.0)(bit))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[5.0 / np.pi]]]))
def test_error(self):
"""Ensure if a symbol can't be found the op errors."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('delta') * 2))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha', 'delta'])
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
symbols = tf.convert_to_tensor(['alpha'])
with self.assertRaisesRegex(Exception, expected_regex='sympy.Symbol'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
symbols = tf.convert_to_tensor([['delta']])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
inputs = tf.convert_to_tensor(['junk'])
symbols = tf.convert_to_tensor(['delta'])
with self.assertRaisesRegex(Exception,
expected_regex='Unparseable proto:'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
inputs = util.convert_to_tensor([[circuit]])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
def test_many_values(self):
"""Ensure that padding with few symbols and many values works."""
bit = cirq.LineQubit(1)
circuits = [
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0),
cirq.Z(bit)**(sympy.Symbol('alpha')),
cirq.X(bit)**(sympy.Symbol('alpha') * 4.0)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 9.0)),
cirq.Circuit(cirq.X(bit)**sympy.Symbol('beta'))
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0, 3.0, 1.0, 4.0], [0.0, 0.0, 0.0, 0.0]],
[[9.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]]))
def test_many_symbols(self):
"""Ensure that padding with few values and many symbols works."""
bit = cirq.GridQubit(0, 0)
circuits = [
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2.0)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('beta') * 6)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 5.0)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('gamma') * 8)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('delta') * 9))
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma', 'delta'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0], [0.0], [0.0], [0.0]], [[0.0], [6.0], [0.0],
[0.0]],
[[5.0], [0.0], [0.0], [0.0]], [[0.0], [0.0], [8.0],
[0.0]],
[[0.0], [0.0], [0.0], [9.0]]]))
def test_out_of_order(self):
"""Test that discovery order of symbols in circuits doesn't matter."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2),
cirq.Y(bit)**(sympy.Symbol('beta') * 3))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha', 'beta'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[2.0], [3.0]]]))
symbols = tf.convert_to_tensor(['beta', 'alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[3.0], [2.0]]]))
def test_padding(self):
"""Ensure that the padding is correct in a complex example."""
bit = cirq.GridQubit(0, 0)
circuits = [
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0),
cirq.Z(bit)**(sympy.Symbol('beta') * 4.0),
),
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**(sympy.Symbol('beta') * 4.0),
),
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**(sympy.Symbol('gamma') * 4.0),
)
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0, 3.0], [4.0, 0.0], [0.0, 0.0]],
[[2.0, 0.0], [3.0, 4.0], [0.0, 0.0]],
[[2.0, 0.0], [3.0, 0.0], [4.0, 0.0]]]))
def test_padding_with_non_parameterized_gates(self):
"""Ensure that the padding is correct in a complex example."""
bit = cirq.GridQubit(0, 0)
circuits = [
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**3.0,
cirq.Z(bit)**(sympy.Symbol('beta') * 4.0),
),
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**4.0,
),
cirq.Circuit(
cirq.X(bit)**2.0,
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**(sympy.Symbol('gamma') * 4.0),
)
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0], [4.0], [0.0]], [[2.0], [3.0], [0.0]],
[[0.0], [3.0], [4.0]]]))
def test_ignorance(self):
"""Test ignorance of ISP, PXP, FSIM gates."""
circuit_batch = _complex_test_circuit()
inputs = util.convert_to_tensor(circuit_batch)
symbols = tf.convert_to_tensor(['r', 't'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
# Because there are no weights to be gathered, the last dimension = 0
self.assertAllClose(tf.shape(res), [len(circuit_batch), 2, 0])
if __name__ == "__main__":
tf.test.main()
| tensorflow/quantum | tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py | Python | apache-2.0 | 39,614 |
from eisoil.core.exception import CoreException
class OSliceAuthorityException(CoreException):
def __init__(self, desc):
self._desc = desc
def __str__(self):
return "OSliceAuthority: %s" % (self._desc,)
| EICT/C-BAS | src/plugins/osliceauthorityrm/osliceauthorityexceptions.py | Python | bsd-3-clause | 236 |
#!/usr/bin/env python
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import subprocess
import time
import urllib
import signal
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
from selenium.test.selenium.webdriver.common import visibility_tests
from selenium import webdriver
SERVER_ADDR = "localhost"
DEFAULT_PORT = 4444
def wait_for_server(url, timeout):
start = time.time()
while time.time() - start < timeout:
try:
urllib.urlopen(url)
return 1
except IOError:
time.sleep(0.2)
return 0
def setup_module(module):
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
url = "http://%s:%d/wd/hub" % (SERVER_ADDR, DEFAULT_PORT)
try:
_socket.connect((SERVER_ADDR, DEFAULT_PORT))
print ("The remote driver server is already running or something else"
"is using port %d, continuing..." % DEFAULT_PORT)
except:
print ("Starting the remote driver server")
RemoteVisibilityTests.server_proc = subprocess.Popen(
"java -jar build/java/server/src/org/openqa/selenium/server/server-standalone.jar",
shell=True)
assert wait_for_server(url, 10), "can't connect"
print "Server should be online"
webserver = SimpleWebServer()
webserver.start()
RemoteVisibilityTests.webserver = webserver
RemoteVisibilityTests.driver = webdriver.Remote(desired_capabilities = webdriver.DesiredCapabilities.FIREFOX)
class RemoteVisibilityTests(visibility_tests.VisibilityTests):
pass
def teardown_module(module):
try:
RemoteVisibilityTests.driver.quit()
except AttributeError:
pass
try:
RemoteVisibilityTests.webserver.stop()
except AttributeError:
pass
try:
os.kill(RemoteVisibilityTests.server_proc.pid, signal.SIGTERM)
time.sleep(3)
except:
pass
| gx1997/chrome-loongson | third_party/webdriver/python/test/selenium/webdriver/remote/test_remote_visibility_tests.py | Python | bsd-3-clause | 2,535 |
'''
Created on Jan 3, 2015
@author: pixo
'''
from badass.plugin import PluginCmd
class PushCmd(PluginCmd):
author = "Rachid Chikh"
version = "0.0.1"
name = "PushCmd"
hook = "badass.core.push"
def preCmd(self, **kwargs):
print "pre cmd:", kwargs
def postCmd(self, **kwargs):
print "post cmd:", kwargs
def initialize(**kwargs):
plugin = PushCmd(**kwargs)
return plugin
| pixo/badtools | plugins/PushCmd.py | Python | lgpl-3.0 | 422 |
import logging
from ..utilities.domain import parse_domain, get_domain_registry
import idna
from .entity import EppEntity
log = logging.getLogger(__name__)
class Domain(EppEntity):
"""
Query operations for domains.
"""
def __init__(self, queryset=None):
"""
Initialise Domain object.
"""
super().__init__(queryset)
def process_contact_set(self, contacts):
"""
Process a set of 1 or more contacts
:contacts: dict or list of contacts
:returns: list of dict objects
"""
if isinstance(contacts, list):
return [self.process_contact_item(i) for i in contacts]
return [self.process_contact_item(contacts)]
def process_contact_item(self, contact):
"""
Process a single contact item.
:contact: dict object containing contact data
:returns: dict with contact-type mapping to id
"""
if '$t' in contact:
processed = {}
contact_type = contact["type"]
contact_id = contact["$t"]
processed[contact_type] = contact_id
return processed
return None
def check_domain(self, *args):
"""
Send a check domain request to the registry.
:*args: one or more domain names
:returns: dict with set of results indicating availability
"""
registry = get_domain_registry(args[0])
data = {"domain": [idna.encode(i, uts46=True).decode('ascii')
for i in args]}
log.debug("{!r}".format(data))
response_data = self.rpc_client.call(registry.slug, 'checkDomain', data)
log.debug("response data {!r}".format(response_data))
check_data = response_data["domain:chkData"]["domain:cd"]
results = []
if isinstance(check_data, list):
for item in check_data:
results.append(self.process_availability_item(item, "domain"))
else:
results.append(self.process_availability_item(check_data, "domain"))
availability = {
"result": results
}
return availability
def process_nameservers(self, raw_ns):
"""
Process nameserver information in info domain
:raw_ns: dict with raw nameserver info from EPP
:returns: list of nameservers
"""
nameservers = []
if isinstance(raw_ns, dict) and "domain:hostObj" in raw_ns:
ns_host = raw_ns["domain:hostObj"]
if isinstance(ns_host, list):
for host in ns_host:
nameservers.append(host)
elif isinstance(ns_host, str):
nameservers.append(ns_host)
elif isinstance(raw_ns, list):
for host_obj in raw_ns:
nameservers.append(host_obj["domain:hostObj"])
return nameservers
def info(self, domain, user=None):
"""
Get info for a domain
:registry: str registry to query
:domain: str domain name to query
:returns: dict with info about domain
"""
registry = get_domain_registry(domain)
parsed_domain = parse_domain(domain)
registered_domain_set = self.queryset.filter(
name=parsed_domain["domain"],
tld__zone=parsed_domain["zone"],
active=True
)
registered_domain = None
if registered_domain_set.exists():
registered_domain = registered_domain_set.first()
data = {"domain": domain}
response_data = self.rpc_client.call(registry.slug, 'infoDomain', data)
info_data = response_data["domain:infData"]
return_data = {
"domain": info_data["domain:name"],
"registrant": info_data["domain:registrant"],
"contacts": self.process_contact_set(info_data["domain:contact"]),
"status": self.process_status(info_data["domain:status"])
}
if "domain:ns" in info_data:
return_data["nameservers"] = self.process_nameservers(
info_data["domain:ns"]
)
if "domain:authInfo" in info_data:
return_data["authcode"] = info_data["domain:authInfo"]["domain:pw"]
return_data["roid"] = info_data["domain:roid"]
log.info("Returning registered domain info")
return return_data
class ContactQuery(EppEntity):
"""
Contact EPP operations.
"""
def __init__(self, queryset=None):
super().__init__(queryset)
def process_postal_info(self, postal_info):
"""
Process postal info part of info contact response
:returns: list of postal info objects
"""
processed_postal = []
if isinstance(postal_info, list):
processed_postal += [self.postal_info_item(i) for i in postal_info]
else:
processed_postal.append(self.postal_info_item(postal_info))
return processed_postal
def postal_info_item(self, item):
"""
Process individual postal info item
:item: dict containing raw EPP postalInfo data
:returns: dict containing info with namespaces removed
"""
addr = item["contact:addr"]
contact_street = []
if "contact:street" in addr:
raw_street = addr["contact:street"]
if isinstance(raw_street, list):
contact_street += raw_street
else:
contact_street.append(raw_street)
return {
"name": item["contact:name"],
"company": item.get("contact:org", ""),
"postal_info_type": item["type"],
"street": contact_street[0:3],
"country": addr["contact:cc"],
"state": addr["contact:sp"],
"city": addr["contact:city"],
"postcode": addr["contact:pc"]
}
def process_disclose(self, raw_disclose_data):
"""
Extract information about disclosed attributes
:raw_disclose_data: dict with disclose information from EPP
:returns: dict processed with true/false
"""
log.info("Processing disclose data {!r}".format(raw_disclose_data))
flag = raw_disclose_data.get("flag", 1)
contact_attributes = {
"contact:voice": "telephone",
"contact:fax": "fax",
"contact:email": "email",
"contact:name": "name",
"contact:addr": "address",
"contact:org": "company"
}
processed_data = []
for (k, v) in contact_attributes.items():
if int(flag) == 0:
if k in raw_disclose_data:
processed_data.append(v)
elif k not in raw_disclose_data:
processed_data.append(v)
return processed_data
def info(self, contact):
"""
Fetch info for a contact
:registry: Registry to query
:contact: ID of contact
:returns: dict of contact information
"""
# Fetch contact from registry.
data = {"contact": contact.registry_id}
registry = contact.provider.slug
response_data = self.rpc_client.call(registry, 'infoContact', data)
log.debug("Received info response")
info_data = response_data["contact:infData"]
processed_postal_info = self.process_postal_info(
info_data["contact:postalInfo"]
)[0]
processed_info_data = {
"email": info_data["contact:email"],
"fax": info_data.get("contact:fax", ""),
"registry_id": info_data["contact:id"],
"telephone": info_data["contact:voice"],
}
extra_fields = {}
extra_fields["status"] = self.process_status(
info_data["contact:status"]
)
extra_fields["roid"] = info_data["contact:roid"]
if "contact:authInfo" in info_data:
extra_fields["authcode"] = info_data["contact:authInfo"]["contact:pw"]
processed_info_data.update(extra_fields)
try:
contact_info_data = {}
contact_info_data.update(processed_postal_info)
contact_info_data.update(processed_info_data)
for item, value in contact_info_data.items():
if isinstance(value, dict):
contact_info_data[item] = ""
if "contact:disclose" in info_data:
contact_info_data["non_disclose"] = self.process_disclose(
info_data["contact:disclose"]
)
return contact_info_data
except Exception as e:
log.error("", exc_info=True)
raise e
class HostQuery(EppEntity):
"""
Nameserver EPP operations
"""
def __init__(self, queryset=None):
super().__init__(queryset)
def check_host(self, *args):
"""
Send a check host request to the registry
:*args: list of host names to check
:returns: dict EPP check host response
"""
registry = get_domain_registry(args[0])
data = {"host": [idna.encode(i, uts46=True).decode('ascii')
for i in args]}
response_data = self.rpc_client.call(
registry.slug,
'checkHost',
data
)
check_data = response_data["host:chkData"]["host:cd"]
results = []
if isinstance(check_data, list):
for item in check_data:
results.append(self.process_availability_item(item, "host"))
else:
results.append(self.process_availability_item(check_data, "host"))
availability = {
"result": results
}
return availability
def process_addr_item(self, item):
"""
Process a host info address item
:item: dict IP addr item
:returns: dict with stuff parsed out
"""
processed = {}
if "$t" in item:
processed["type"] = item["ip"]
processed["ip"] = item["$t"]
return processed
def process_addresses(self, addresses):
"""
Process of a set host addresses
:addresses: list of dict items
:returns: TODO
"""
if isinstance(addresses, list):
return [self.process_addr_item(i) for i in addresses]
return [self.process_addr_item(addresses)]
def info(self, registered_host, user=None):
"""
Get info for a host
:host: str host name to query
:returns: dict with info about host
"""
data = {"name": registered_host.host}
registry = registered_host.tld_provider.provider
response_data = self.rpc_client.call(registry.slug, 'infoHost', data)
info_data = response_data["host:infData"]
return_data = {
"idn_host": info_data["host:name"],
"addr": self.process_addresses(info_data["host:addr"]),
"status": self.process_status(info_data["host:status"]),
"roid": info_data["host:roid"]
}
if "host:authInfo" in info_data:
return_data["authcode"] = info_data["host:authInfo"]["host:pw"]
return return_data
| heytrav/drs-project | domain_api/epp/queries.py | Python | mit | 11,313 |
import json
from optparse import make_option
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.core.validators import URLValidator
from provider.constants import CONFIDENTIAL, PUBLIC
from provider.oauth2.models import Client
from ...models import TrustedClient
try:
from django.contrib.auth import get_user_model
except ImportError: # Django <1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
ARG_STRING = '<url> <redirect_uri> <client_type: "confidential" | "public">'
class Command(BaseCommand):
help = 'Create a new OAuth2 Client. Outputs a serialized representation of the newly-created Client.'
args = ARG_STRING
fields = None
option_list = BaseCommand.option_list + (
make_option(
'-u',
'--username',
action='store',
type='string',
dest='username',
help="Username of a user to associate with the Client."
),
make_option(
'-n',
'--client_name',
action='store',
type='string',
dest='client_name',
help="String to assign as the Client name."
),
make_option(
'-i',
'--client_id',
action='store',
type='string',
dest='client_id',
help="String to assign as the Client ID."
),
make_option(
'-s',
'--client_secret',
action='store',
type='string',
dest='client_secret',
help="String to assign as the Client secret. Should not be shared."
),
make_option(
'-t',
'--trusted',
action='store_true',
dest='trusted',
default=False,
help="Designate the Client as trusted. Trusted Clients bypass the user consent "
"form typically displayed after validating the user's credentials."
),
)
def handle(self, *args, **options):
self._clean_args(args)
self._parse_options(options)
client_id = self.fields.get('client_id')
trusted = self.fields.pop('trusted')
# Check if client ID is already in use. If so, fetch existing Client and update fields.
client_id_claimed = Client.objects.filter(client_id=client_id).exists()
if client_id_claimed:
client = Client.objects.get(client_id=client_id)
for key, value in self.fields.items():
setattr(client, key, value)
client.save()
else:
client = Client.objects.create(**self.fields)
if trusted:
TrustedClient.objects.get_or_create(client=client)
else:
try:
TrustedClient.objects.get(client=client).delete()
except TrustedClient.DoesNotExist:
pass
serialized = json.dumps(client.serialize(), indent=4)
self.stdout.write(serialized)
def _clean_args(self, args):
"""Validate and clean the command's arguments.
These arguments must include the Client application's URL, the Client application's
OAuth2 callback URL, and the Client's type, indicating whether the Client application
is capable of maintaining the confidentiality of its credentials (e.g., running on a
secure server) or is incapable of doing so (e.g., running in a browser).
Arguments:
args (tuple): Arguments with which the command was called.
Raises:
CommandError, if the number of arguments provided is invalid, if the URLs provided
are invalid, or if the client type provided is invalid.
"""
if len(args) != 3:
raise CommandError(
"Number of arguments provided is invalid. "
"This command requires the following arguments: {}.".format(ARG_STRING)
)
url, redirect_uri, client_type = args
# Validate URLs
for u in (url, redirect_uri):
try:
URLValidator()(u)
except ValidationError:
raise CommandError("URLs provided are invalid. Please provide valid application and redirect URLs.")
# Validate and map client type to the appropriate django-oauth2-provider constant
client_type = client_type.lower()
client_type = {
'confidential': CONFIDENTIAL,
'public': PUBLIC
}.get(client_type)
if client_type is None:
raise CommandError("Client type provided is invalid. Please use one of 'confidential' or 'public'.")
self.fields = {
'url': url,
'redirect_uri': redirect_uri,
'client_type': client_type,
}
def _parse_options(self, options):
"""Parse the command's options.
Arguments:
options (dict): Options with which the command was called.
Raises:
CommandError, if a user matching the provided username does not exist.
"""
for key in ('username', 'client_name', 'client_id', 'client_secret', 'trusted'):
value = options.get(key)
if value is not None:
self.fields[key] = value
username = self.fields.pop('username', None)
if username is not None:
try:
self.fields['user'] = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("User matching the provided username does not exist.")
# The keyword argument 'name' conflicts with that of `call_command()`. We instead
# use 'client_name' up to this point, then swap it out for the expected field, 'name'.
client_name = self.fields.pop('client_name', None)
if client_name is not None:
self.fields['name'] = client_name
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/edx_oauth2_provider/management/commands/create_oauth2_client.py | Python | agpl-3.0 | 6,029 |
import re
import simplejson
import datetime
import urllib2
from BeautifulSoup import BeautifulSoup
from goscale import models as goscale_models
from goscale import utils
from goscale import conf
from django.db import models
from django.db.models import signals
from django.utils.translation import ugettext as _
RATIO_CHOICES = (
('4:3', _('4:3 (screen)')),
('16:9', _('16:9 (widescreen)')),
('1.4142:1', _('A4 (paper, PDF)')),
)
class Presentation(goscale_models.GoscaleCMSPlugin):
"""
Presentation Base Class
"""
embed = models.TextField(verbose_name=_('Embed code'), help_text=_('From the "</> Embed" link.'))
width = models.SmallIntegerField(null=True, blank=True, verbose_name=_('Width'),
help_text=_('Width of a presentation container.'))
height = models.SmallIntegerField(null=True, blank=True, verbose_name=_('Height'),
help_text=_('Height of a presentation container.'))
ratio = models.CharField(max_length=50,
default=RATIO_CHOICES[0][0], choices=RATIO_CHOICES, verbose_name=_('Aspect ratio'),
help_text=_('Ratio of width:height used for the presentation if manual size isn\'t set.'))
embed_as_is = models.BooleanField(default=False, verbose_name=_('Embed "as is"'),
help_text=_('If set embed code will not be changed.'))
class Meta:
abstract = True
def save(self, no_signals=False, *args, **kwargs):
if not self.embed_as_is:
self.embed = self._get_embed_code()
super(Presentation, self).save(no_signals=no_signals, *args, **kwargs)
def _get_data(self):
return []
def _get_embed_code(self):
return self.embed
def _get_size(self, default_width=480, extra_height=61):
ratio = [float(side) for side in self.ratio.split(':')]
width = self.width or default_width
height = self.height or int((width / ratio[0]) * ratio[1]) + extra_height
return width, height
#signals.post_save.connect(goscale_models.update_posts, sender=Form)
class Speakerdeck(Presentation):
"""
Speakerdeck presentation
"""
start = models.SmallIntegerField(default=1, verbose_name=_('Start slide'),
help_text=_('Number of the first slide.'))
def copy_relations(self, oldinstance):
# FIXME: remove after this issue is resolved: https://github.com/divio/django-cms/issues/1723
super(Speakerdeck, self).copy_relations(oldinstance)
def _regex_id(self):
try:
id = re.search('(data-id=")([\d\w.]+)(")', self.embed).group(2)
return id
except AttributeError:
# raise goscale_models.WrongAttribute(attribute='embed')
return None
def _get_embed_code(self):
id = self._regex_id()
if not id:
return self.embed
width, height = self._get_size(extra_height=61)
return '<iframe class="speakerdeck-iframe goscale-presentation" style="width: %spx; height: %spx; \
border-top-left-radius: 5px; \
border-top-right-radius: 5px; border-bottom-right-radius: 5px; border-bottom-left-radius: 5px; \
border: 0px; background-color: transparent; margin: 0px; padding: 0px; \
background-position: initial initial; background-repeat: initial initial; " frameborder="0" \
src="//speakerdeck.com/player/%s?slide=%s" \
allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>' % (
width,
height,
id,
self.start,
)
class Slideshare(Presentation):
"""
Slideshare presentation
"""
start = models.SmallIntegerField(default=1, verbose_name=_('Start slide'),
help_text=_('Number of the first slide.'))
without_related_content = models.BooleanField(default=True, verbose_name=_('Without related content'),
help_text=_('If set related slideshows will not be displayed.'))
def copy_relations(self, oldinstance):
# FIXME: remove after this issue is resolved: https://github.com/divio/django-cms/issues/1723
super(Slideshare, self).copy_relations(oldinstance)
def _regex_id(self):
try:
id = re.search('(\/embed_code\/)([\d\w.]+)(\?)', self.embed).group(2)
return id
except AttributeError:
return None
# raise goscale_models.WrongAttribute(attribute='embed')
def _get_embed_code(self):
id = self._regex_id()
if not id:
return self.embed
width, height = self._get_size(default_width=599, extra_height=38)
return '<iframe src="http://www.slideshare.net/slideshow/embed_code/%s?rel=%s&startSlide=%s" width="%s" \
height="%s" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" style="\
border:1px solid #CCC;border-width:1px 1px 0;margin-bottom:5px" allowfullscreen webkitallowfullscreen \
mozallowfullscreen> </iframe>' % (
id,
0 if self.without_related_content else 1,
self.start,
width,
height,
)
DELAY_CHOISES = (
(1000, _('every second')),
(2000, _('every 2 seconds')),
(3000, _('every 3 seconds (default)')),
(5000, _('every 5 seconds')),
(10000, _('every 10 seconds')),
(15000, _('every 15 seconds')),
(30000, _('every 30 seconds')),
(60000, _('every minute')),
)
class GooglePresentation(Presentation):
"""
Google presentation
"""
delay = models.SmallIntegerField(default=DELAY_CHOISES[2][0], choices=DELAY_CHOISES,
verbose_name=_('Delay between slides'),
help_text=_('Automatically advance presentation to the next slide after set delay.'))
autoplay = models.BooleanField(default=False, verbose_name=_('Autoplay'),
help_text=_('If set presentation will start automatically.'))
loop = models.BooleanField(default=False, verbose_name=_('Loop'),
help_text=_('If set presentation will restart after the last slide.'))
def copy_relations(self, oldinstance):
# FIXME: remove after this issue is resolved: https://github.com/divio/django-cms/issues/1723
super(GooglePresentation, self).copy_relations(oldinstance)
def _regex_id(self):
try:
id = re.search('(\/d\/)([\d\w.]+)(\/embed)', self.embed).group(2)
return id
except AttributeError:
return None
# raise goscale_models.WrongAttribute(attribute='embed')
def _get_embed_code(self):
id = self._regex_id()
if not id:
return self.embed
width, height = self._get_size(extra_height=29)
return '<iframe src="https://docs.google.com/presentation/d/%s/embed?start=%s&loop=%s&delayms=%s" \
frameborder="0" width="%s" height="%s" allowfullscreen="true" \
mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>' % (
id,
'true' if self.autoplay else 'false',
'true' if self.loop else 'false',
self.delay,
width,
height,
) | sternoru/goscalecms | goscale/plugins/presentations/models.py | Python | bsd-3-clause | 7,209 |
# Import data about canvas and it's objects as well as the frame
# for the program
from Display import *
from Server import *
# River and SM are the "model" of this
# design
from Tkinter import *
class riverController():
def __init__(self, master, canvasData, client):
self.master = master
self.canvasData = canvasData
self.setUpButtons()
self.client = client
self.riverdb = self.sendAndRecieve('db')
def setUpButtons(self):
a = Button(self.master, text="Get In", command=self.getIn)
a.pack(side=LEFT)
b= Button(self.master, text="Get out", command=self.getOut)
b.pack(side=LEFT)
c = Button(self.master, text="Chicken in", command=self.chickenIn)
c.pack(side=LEFT)
f= Button(self.master, text="Grain in", command=self.grainIn)
f.pack(side=LEFT)
e= Button(self.master, text ="Fox in", command=self.foxIn)
e.pack(side=LEFT)
d= Button(self.master, text="Drive boat right", command=self.moveBoatRight)
d.pack(side=LEFT)
h = Button(self.master, text="Drive boat left", command=self.moveBoatLeft)
h.pack(side=LEFT)
g= Button(self.master, text="chicken out", command=self.chickenOut)
g.pack(side=LEFT)
j=Button(self.master, text="Fox Out", command=self.foxOut)
j.pack(side=LEFT)
i=Button(self.master, text="Grain Out", command=self.grainOut)
i.pack(side = LEFT)
def getOut(self):
self.riverdb = self.sendAndRecieve('getout')
def getIn(self):
self.riverdb = self.sendAndRecieve('getin')
def moveBoatRight(self):
self.riverdb = self.sendAndRecieve('db')
if ('chicken isat boat' or 'fox isat boat' or 'grain isat boat' in self.riverdb and ('boat isat left' in self.riverdb)):
if ('chicken isat boat' in self.riverdb):
self.canvasData.chicken.move(390,1)
self.canvasData.boat.move(390,1)
self.canvasData.man.move(390,1)
self.river.crossriver()
if ('grain isat boat' in self.riverdb):
self.canvasData.grain.move(390,1)
self.canvasData.boat.move(390,1)
self.canvasData.man.move(390,1)
self.river.crossriver()
if ('fox isat boat' in self.riverdb):
self.canvasData.fox.move(390, 1)
self.canvasData.boat.move(390, 1)
self.canvasData.man.move(390,1)
self.river.crossriver()
def moveBoatLeft(self):
self.riverdb = self.sendAndRecieve('db')
if ('chicken isat boat' or 'fox isat boat' or 'grain isat boat' in self.riverdb and 'boat isat right' in self.riverdb):
if ('chicken isat boat' in self.riverdb):
self.river.crossriver()
self.canvasData.chicken.move(-390, -1)
self.canvasData.boat.move(-390,-1)
self.canvasData.man.move(-390,-1)
else:
self.canvasData.boat.move(-390,-1)
self.canvasData.man.move(-390,-1)
self.river.crossriver()
def chickenOut(self):
self.riverdb = self.sendAndRecieve('db')
if (['chicken isat boat'] and 'boat isat right'in self.riverdb):
self.river.takeOut("chicken")
self.canvasData.chicken.move(220, 20)
if ('chicken isat boat' and 'boat isat left' in self.riverdb):
self.river.takeOut("chicken")
self.canvasData.chicken.move(-130, 20)
def chickenIn(self):
self.riverdb = self.sendAndRecieve('db')
if ('grain isat boat' in self.riverdb):
print "boat is full"
elif ('fox isat boat'in self.riverdb):
print "boat is full"
elif ('chicken isat right' in self.riverdb):
self.river.putIn("chicken")
self.canvasData.chicken.move(-180,-20)
elif (self.river.statusCheck == "s1" or "s14" or "s20"):
self.river.putIn("chicken")
self.canvasData.chicken.move(130,-20)
def foxIn(self):
self.riverdb = self.sendAndRecieve('db')
if ('chicken isat boat' in self.riverdb):
print "boat is full"
elif ('grain isat boat' in self.riverdb):
print "boat is full"
elif (self.river.statusCheck == "s1" or "s6" or "s13" or "s14" ):
self.river.putIn("fox")
self.canvasData.fox.move(220, -20)
else:
print"Noo noon noo"
def foxOut(self):
stat = self.river.statusCheck()
if ('fox isat boat' and 'boat isat right' in self.riverdb):
self.river.takeOut("fox")
self.canvasData.fox.move(210, 20)
def grainIn(self):
self.riverdb = self.sendAndRecieve('db')
if ('chicken isat boat' in self.riverdb):
print "boat is full"
elif ('fox isat boat'in self.riverdb):
print "boat is full"
elif (['grain isat left'] and 'boat isat left' in self.riverdb):
self.river.putIn("grain")
self.canvasData.grain.move(180, -20)
def grainOut(self):
self.riverdb = self.sendAndRecieve('db')
if ('grain isat boat' and 'boat isat right' in self.riverdb):
self.river.takeOut("grain")
self.canvasData.grain.move(+160, 20)
def sendAndRecieve(self, message):
msg = message
answer = self.client.send(msg)
answer = answer.split(',')
return answer
def close(self):
self.client.close()
| Mathih13/IS-105_2016_Gruppe-5 | Uke-15-ICA-9/ICA-8-Python/Controller/riverController.py | Python | mit | 6,305 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FisherBlock definitions.
This library contains classes for estimating blocks in a model's Fisher
Information matrix. Suppose one has a model that parameterizes a posterior
distribution over 'y' given 'x' with parameters 'params', p(y | x, params). Its
Fisher Information matrix is given by,
$$F(params) = E[ v(x, y, params) v(x, y, params)^T ]$$
where,
$$v(x, y, params) = (d / d params) log p(y | x, params)$$
and the expectation is taken with respect to the data's distribution for 'x' and
the model's posterior distribution for 'y',
x ~ p(x)
y ~ p(y | x, params)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum # pylint: disable=g-bad-import-order
import numpy as np
import six
from tensorflow.contrib.kfac.python.ops import fisher_factors
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
# For blocks corresponding to convolutional layers, or any type of block where
# the parameters can be thought of as being replicated in time or space,
# we want to adjust the scale of the damping by
# damping /= num_replications ** NORMALIZE_DAMPING_POWER
NORMALIZE_DAMPING_POWER = 1.0
# Methods for adjusting damping for FisherBlocks. See
# compute_pi_adjusted_damping() for details.
PI_OFF_NAME = "off"
PI_TRACENORM_NAME = "tracenorm"
PI_TYPE = PI_TRACENORM_NAME
def set_global_constants(normalize_damping_power=None, pi_type=None):
"""Sets various global constants used by the classes in this module."""
global NORMALIZE_DAMPING_POWER
global PI_TYPE
if normalize_damping_power is not None:
NORMALIZE_DAMPING_POWER = normalize_damping_power
if pi_type is not None:
PI_TYPE = pi_type
def normalize_damping(damping, num_replications):
"""Normalize damping after adjusting scale by NORMALIZE_DAMPING_POWER."""
if NORMALIZE_DAMPING_POWER:
return damping / (num_replications ** NORMALIZE_DAMPING_POWER)
return damping
def compute_pi_tracenorm(left_cov, right_cov):
"""Computes the scalar constant pi for Tikhonov regularization/damping.
$$\pi = \sqrt{ (trace(A) / dim(A)) / (trace(B) / dim(B)) }$$
See section 6.3 of https://arxiv.org/pdf/1503.05671.pdf for details.
Args:
left_cov: The left Kronecker factor "covariance".
right_cov: The right Kronecker factor "covariance".
Returns:
The computed scalar constant pi for these Kronecker Factors (as a Tensor).
"""
def _trace(cov):
if len(cov.shape) == 1:
# Diagonal matrix.
return math_ops.reduce_sum(cov)
elif len(cov.shape) == 2:
# Full matrix.
return math_ops.trace(cov)
else:
raise ValueError(
"What's the trace of a Tensor of rank %d?" % len(cov.shape))
# Instead of dividing by the dim of the norm, we multiply by the dim of the
# other norm. This works out the same in the ratio.
left_norm = _trace(left_cov) * right_cov.shape.as_list()[0]
right_norm = _trace(right_cov) * left_cov.shape.as_list()[0]
return math_ops.sqrt(left_norm / right_norm)
def compute_pi_adjusted_damping(left_cov, right_cov, damping):
if PI_TYPE == PI_TRACENORM_NAME:
pi = compute_pi_tracenorm(left_cov, right_cov)
return (damping * pi, damping / pi)
elif PI_TYPE == PI_OFF_NAME:
return (damping, damping)
class PackagedFunc(object):
"""A Python thunk with a stable ID.
Enables stable names for lambdas.
"""
def __init__(self, func, func_id):
"""Initializes PackagedFunc.
Args:
func: a zero-arg Python function.
func_id: a hashable, function that produces a hashable, or a list/tuple
thereof.
"""
self._func = func
func_id = func_id if isinstance(func_id, (tuple, list)) else (func_id,)
self._func_id = func_id
def __call__(self):
return self._func()
@property
def func_id(self):
"""A hashable identifier for this function."""
return tuple(elt() if callable(elt) else elt for elt in self._func_id)
def _package_func(func, func_id):
return PackagedFunc(func, func_id)
@six.add_metaclass(abc.ABCMeta)
class FisherBlock(object):
"""Abstract base class for objects modeling approximate Fisher matrix blocks.
Subclasses must implement register_matpower, multiply_matpower,
instantiate_factors, tensors_to_compute_grads, and num_registered_towers
methods.
"""
def __init__(self, layer_collection):
self._layer_collection = layer_collection
@abc.abstractmethod
def instantiate_factors(self, grads_list, damping):
"""Creates and registers the component factors of this Fisher block.
Args:
grads_list: A list gradients (each a Tensor or tuple of Tensors) with
respect to the tensors returned by tensors_to_compute_grads() that
are to be used to estimate the block.
damping: The damping factor (float or Tensor).
"""
pass
@abc.abstractmethod
def register_matpower(self, exp):
"""Registers a matrix power to be computed by the block.
Args:
exp: A float representing the power to raise the block by.
"""
pass
def register_inverse(self):
"""Registers a matrix inverse to be computed by the block."""
self.register_matpower(-1)
@abc.abstractmethod
def multiply_matpower(self, vector, exp):
"""Multiplies the vector by the (damped) matrix-power of the block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
exp: A float representing the power to raise the block by before
multiplying it by the vector.
Returns:
The vector left-multiplied by the (damped) matrix-power of the block.
"""
pass
def multiply_inverse(self, vector):
"""Multiplies the vector by the (damped) inverse of the block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
Returns:
The vector left-multiplied by the (damped) inverse of the block.
"""
return self.multiply_matpower(vector, -1)
def multiply(self, vector):
"""Multiplies the vector by the (damped) block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
Returns:
The vector left-multiplied by the (damped) block.
"""
return self.multiply_matpower(vector, 1)
@abc.abstractmethod
def tensors_to_compute_grads(self):
"""Returns the Tensor(s) with respect to which this FisherBlock needs grads.
"""
pass
@abc.abstractproperty
def num_registered_towers(self):
"""Number of towers registered for this FisherBlock.
Typically equal to the number of towers in a multi-tower setup.
"""
pass
class FullFB(FisherBlock):
"""FisherBlock using a full matrix estimate (no approximations).
FullFB uses a full matrix estimate (no approximations), and should only ever
be used for very low dimensional parameters.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self, layer_collection, params):
"""Creates a FullFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters of this layer (Tensor or tuple of Tensors).
"""
self._batch_sizes = []
self._params = params
super(FullFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping_func = _package_func(lambda: damping, (damping,))
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullFactor, (grads_list, self._batch_size))
def register_matpower(self, exp):
self._factor.register_matpower(exp, self._damping_func)
def multiply_matpower(self, vector, exp):
vector_flat = utils.tensors_to_column(vector)
out_flat = self._factor.left_multiply_matpower(
vector_flat, exp, self._damping_func)
return utils.column_to_tensors(vector, out_flat)
def full_fisher_block(self):
"""Explicitly constructs the full Fisher block."""
return self._factor.get_cov()
def tensors_to_compute_grads(self):
return self._params
def register_additional_tower(self, batch_size):
"""Register an additional tower.
Args:
batch_size: The batch size, used in the covariance estimator.
"""
self._batch_sizes.append(batch_size)
@property
def num_registered_towers(self):
return len(self._batch_sizes)
@property
def _batch_size(self):
return math_ops.reduce_sum(self._batch_sizes)
class NaiveDiagonalFB(FisherBlock):
"""FisherBlock using a diagonal matrix approximation.
This type of approximation is generically applicable but quite primitive.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self, layer_collection, params):
"""Creates a NaiveDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters of this layer (Tensor or tuple of Tensors).
"""
self._params = params
self._batch_sizes = []
super(NaiveDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping_func = _package_func(lambda: damping, (damping,))
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.NaiveDiagonalFactor, (grads_list, self._batch_size))
def register_matpower(self, exp):
# Not needed for this. Matrix powers are computed on demand in the
# diagonal case
pass
def multiply_matpower(self, vector, exp):
vector_flat = utils.tensors_to_column(vector)
out_flat = self._factor.left_multiply_matpower(
vector_flat, exp, self._damping_func)
return utils.column_to_tensors(vector, out_flat)
def full_fisher_block(self):
return self._factor.get_cov()
def tensors_to_compute_grads(self):
return self._params
def register_additional_tower(self, batch_size):
"""Register an additional tower.
Args:
batch_size: The batch size, used in the covariance estimator.
"""
self._batch_sizes.append(batch_size)
@property
def num_registered_towers(self):
return len(self._batch_sizes)
@property
def _batch_size(self):
return math_ops.reduce_sum(self._batch_sizes)
class InputOutputMultiTower(object):
"""Mix-in class for blocks with inputs & outputs and multiple mini-batches."""
def __init__(self, *args, **kwargs):
self.__inputs = []
self.__outputs = []
super(InputOutputMultiTower, self).__init__(*args, **kwargs)
def _process_data(self, grads_list):
"""Process data into the format used by the factors.
This function takes inputs and grads_lists data and processes it into
one of the formats expected by the FisherFactor classes (depending on
the value of the global configuration variable TOWER_STRATEGY).
The initial format of self._inputs is expected to be a list of Tensors
over towers. Similarly grads_lists is expected to be a list over sources
of such lists.
If TOWER_STRATEGY is "concat", 'inputs' becomes a tuple containing a single
tensor (represented as a PartitionedTensor object) equal to the
concatenation (across towers) of all of the elements of self._inputs. And
similarly grads_list is formatted into a tuple (over sources) of such
tensors (also represented as PartitionedTensors).
If TOWER_STRATEGY is "separate", formatting of inputs and grads_list
remains unchanged from the initial format (although possibly converting
from lists into tuples).
Args:
grads_list: grads_list in its initial format (see above).
Returns:
inputs: self._inputs transformed into the appropriate format (see
above).
grads_list: grads_list transformed into the appropriate format (see
above).
Raises:
ValueError: if TOWER_STRATEGY is not one of "separate" or "concat".
"""
inputs = self._inputs
# inputs is a list over towers of Tensors
# grads_list is a list of list with the first index being sources and the
# second being towers.
if fisher_factors.TOWER_STRATEGY == "concat":
# Merge towers together into a PartitionedTensor. We package it in
# a singleton tuple since the factors will expect a list over towers
inputs = (utils.PartitionedTensor(inputs),)
# Do the same for grads_list but preserve leading sources dimension
grads_list = tuple((utils.PartitionedTensor(grads),)
for grads in grads_list)
elif fisher_factors.TOWER_STRATEGY == "separate":
inputs = tuple(inputs)
grads_list = tuple(grads_list)
else:
raise ValueError("Global config variable TOWER_STRATEGY must be one of "
"'concat' or 'separate'.")
return inputs, grads_list
def tensors_to_compute_grads(self):
"""Tensors to compute derivative of loss with respect to."""
return tuple(self._outputs)
def register_additional_tower(self, inputs, outputs):
self._inputs.append(inputs)
self._outputs.append(outputs)
@property
def num_registered_towers(self):
result = len(self._inputs)
assert result == len(self._outputs)
return result
@property
def _inputs(self):
return self.__inputs
@property
def _outputs(self):
return self.__outputs
class FullyConnectedDiagonalFB(InputOutputMultiTower, FisherBlock):
"""FisherBlock for fully-connected (dense) layers using a diagonal approx.
Estimates the Fisher Information matrix's diagonal entries for a fully
connected layer. Unlike NaiveDiagonalFB this uses the low-variance "sum of
squares" estimator.
Let 'params' be a vector parameterizing a model and 'i' an arbitrary index
into it. We are interested in Fisher(params)[i, i]. This is,
$$Fisher(params)[i, i] = E[ v(x, y, params) v(x, y, params)^T ][i, i]
= E[ v(x, y, params)[i] ^ 2 ]$$
Consider fully connected layer in this model with (unshared) weight matrix
'w'. For an example 'x' that produces layer inputs 'a' and output
preactivations 's',
$$v(x, y, w) = vec( a (d loss / d s)^T )$$
This FisherBlock tracks Fisher(params)[i, i] for all indices 'i' corresponding
to the layer's parameters 'w'.
"""
def __init__(self, layer_collection, has_bias=False):
"""Creates a FullyConnectedDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
has_bias: Whether the component Kronecker factors have an additive bias.
(Default: False)
"""
self._has_bias = has_bias
super(FullyConnectedDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
inputs, grads_list = self._process_data(grads_list)
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedDiagonalFactor,
(inputs, grads_list, self._has_bias))
self._damping_func = _package_func(lambda: damping, (damping,))
def register_matpower(self, exp):
# Not needed for this. Matrix powers are computed on demand in the
# diagonal case
pass
def multiply_matpower(self, vector, exp):
"""Multiplies the vector by the (damped) matrix-power of the block.
Args:
vector: Tensor or 2-tuple of Tensors. if self._has_bias, Tensor of shape
[input_size, output_size] corresponding to layer's weights. If not, a
2-tuple of the former and a Tensor of shape [output_size] corresponding
to the layer's bias.
exp: A scalar representing the power to raise the block before multiplying
it by the vector.
Returns:
The vector left-multiplied by the (damped) matrix-power of the block.
"""
reshaped_vec = utils.layer_params_to_mat2d(vector)
reshaped_out = self._factor.left_multiply_matpower(
reshaped_vec, exp, self._damping_func)
return utils.mat2d_to_layer_params(vector, reshaped_out)
class ConvDiagonalFB(InputOutputMultiTower, FisherBlock):
"""FisherBlock for 2-D convolutional layers using a diagonal approx.
Estimates the Fisher Information matrix's diagonal entries for a convolutional
layer. Unlike NaiveDiagonalFB this uses the low-variance "sum of squares"
estimator.
Let 'params' be a vector parameterizing a model and 'i' an arbitrary index
into it. We are interested in Fisher(params)[i, i]. This is,
$$Fisher(params)[i, i] = E[ v(x, y, params) v(x, y, params)^T ][i, i]
= E[ v(x, y, params)[i] ^ 2 ]$$
Consider a convoluational layer in this model with (unshared) filter matrix
'w'. For an example image 'x' that produces layer inputs 'a' and output
preactivations 's',
$$v(x, y, w) = vec( sum_{loc} a_{loc} (d loss / d s_{loc})^T )$$
where 'loc' is a single (x, y) location in an image.
This FisherBlock tracks Fisher(params)[i, i] for all indices 'i' corresponding
to the layer's parameters 'w'.
"""
def __init__(self,
layer_collection,
params,
strides,
padding,
data_format=None,
dilations=None):
"""Creates a ConvDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [kernel_height, kernel_width,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (e.g. "SAME").
data_format: str or None. Format of input data.
dilations: List of 4 ints or None. Rate for dilation along all dimensions.
Raises:
ValueError: if strides is not length-4.
ValueError: if dilations is not length-4.
ValueError: if channel is not last dimension.
"""
if len(strides) != 4:
raise ValueError("strides must contain 4 numbers.")
if dilations is None:
dilations = [1, 1, 1, 1]
if len(dilations) != 4:
raise ValueError("dilations must contain 4 numbers.")
if not utils.is_data_format_channel_last(data_format):
raise ValueError("data_format must be channels-last.")
self._strides = maybe_tuple(strides)
self._padding = padding
self._data_format = data_format
self._dilations = maybe_tuple(dilations)
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
if len(self._filter_shape) != 4:
raise ValueError(
"Convolution filter must be of shape"
" [filter_height, filter_width, in_channels, out_channels].")
super(ConvDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
inputs, grads_list = self._process_data(grads_list)
# Infer number of locations upon which convolution is applied.
self._num_locations = num_conv_locations(inputs[0].shape.as_list(),
self._strides)
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvDiagonalFactor,
(inputs, grads_list, self._filter_shape, self._strides, self._padding,
self._data_format, self._dilations, self._has_bias))
def damping_func():
return self._num_locations * normalize_damping(damping,
self._num_locations)
damping_id = (self._num_locations, "mult", "normalize_damping", damping,
self._num_locations)
self._damping_func = _package_func(damping_func, damping_id)
def register_matpower(self, exp):
# Not needed for this. Matrix powers are computed on demand in the
# diagonal case
pass
def multiply_matpower(self, vector, exp):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = self._factor.left_multiply_matpower(
reshaped_vect, exp, self._damping_func)
return utils.mat2d_to_layer_params(vector, reshaped_out)
class KroneckerProductFB(FisherBlock):
"""A base class for blocks with separate input and output Kronecker factors.
The Fisher block is approximated as a Kronecker product of the input and
output factors.
"""
def __init__(self, layer_collection):
super(KroneckerProductFB, self).__init__(layer_collection)
def _setup_damping(self, damping, normalization=None):
"""Makes functions that compute the damping values for both factors."""
def compute_damping():
if normalization is not None:
maybe_normalized_damping = normalize_damping(damping, normalization)
else:
maybe_normalized_damping = damping
return compute_pi_adjusted_damping(self._input_factor.get_cov(),
self._output_factor.get_cov(),
maybe_normalized_damping**0.5)
if normalization is not None:
damping_id = ("compute_pi_adjusted_damping",
"cov", self._input_factor.name,
"cov", self._output_factor.name,
"normalize_damping", damping, normalization, "power", 0.5)
else:
damping_id = ("compute_pi_adjusted_damping",
"cov", self._input_factor.name,
"cov", self._output_factor.name,
damping, "power", 0.5)
self._input_damping_func = _package_func(lambda: compute_damping()[0],
damping_id + ("ref", 0))
self._output_damping_func = _package_func(lambda: compute_damping()[1],
damping_id + ("ref", 1))
def register_matpower(self, exp):
self._input_factor.register_matpower(exp, self._input_damping_func)
self._output_factor.register_matpower(exp, self._output_damping_func)
@property
def _renorm_coeff(self):
"""Kronecker factor multiplier coefficient.
If this FisherBlock is represented as 'FB = c * kron(left, right)', then
this is 'c'.
Returns:
0-D Tensor.
"""
return 1.0
def multiply_matpower(self, vector, exp):
reshaped_vector = utils.layer_params_to_mat2d(vector)
reshaped_out = self._output_factor.right_multiply_matpower(
reshaped_vector, exp, self._output_damping_func)
reshaped_out = self._input_factor.left_multiply_matpower(
reshaped_out, exp, self._input_damping_func)
if self._renorm_coeff != 1.0:
renorm_coeff = math_ops.cast(self._renorm_coeff, dtype=reshaped_out.dtype)
reshaped_out *= math_ops.cast(renorm_coeff**exp, dtype=reshaped_out.dtype)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def full_fisher_block(self):
"""Explicitly constructs the full Fisher block.
Used for testing purposes. (In general, the result may be very large.)
Returns:
The full Fisher block.
"""
left_factor = self._input_factor.get_cov()
right_factor = self._output_factor.get_cov()
return self._renorm_coeff * utils.kronecker_product(left_factor,
right_factor)
class EmbeddingKFACFB(InputOutputMultiTower, KroneckerProductFB):
"""K-FAC FisherBlock for embedding layers.
This FisherBlock is similar to FullyConnectedKFACBasicFB, except that its
input factor is approximated by a diagonal matrix. In the case that each
example references exactly one embedding, this approximation is exact.
Does not support bias parameters.
"""
def __init__(self, layer_collection, vocab_size):
"""Creates a EmbeddingKFACFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
vocab_size: int. Size of vocabulary for this embedding layer.
"""
self._vocab_size = vocab_size
super(EmbeddingKFACFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
"""Instantiate Kronecker Factors for this FisherBlock.
Args:
grads_list: List of list of Tensors. grads_list[i][j] is the
gradient of the loss with respect to 'outputs' from source 'i' and
tower 'j'. Each Tensor has shape [tower_minibatch_size, output_size].
damping: 0-D Tensor or float. 'damping' * identity is approximately added
to this FisherBlock's Fisher approximation.
"""
inputs, grads_list = self._process_data(grads_list)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.EmbeddingInputKroneckerFactor,
(inputs, self._vocab_size))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedKroneckerFactor, (grads_list,))
self._setup_damping(damping)
class FullyConnectedKFACBasicFB(InputOutputMultiTower, KroneckerProductFB):
"""K-FAC FisherBlock for fully-connected (dense) layers.
This uses the Kronecker-factorized approximation from the original
K-FAC paper (https://arxiv.org/abs/1503.05671)
"""
def __init__(self, layer_collection, has_bias=False):
"""Creates a FullyConnectedKFACBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
has_bias: Whether the component Kronecker factors have an additive bias.
(Default: False)
"""
self._has_bias = has_bias
super(FullyConnectedKFACBasicFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
"""Instantiate Kronecker Factors for this FisherBlock.
Args:
grads_list: List of list of Tensors. grads_list[i][j] is the
gradient of the loss with respect to 'outputs' from source 'i' and
tower 'j'. Each Tensor has shape [tower_minibatch_size, output_size].
damping: 0-D Tensor or float. 'damping' * identity is approximately added
to this FisherBlock's Fisher approximation.
"""
inputs, grads_list = self._process_data(grads_list)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedKroneckerFactor,
((inputs,), self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedKroneckerFactor,
(grads_list,))
self._setup_damping(damping)
class ConvKFCBasicFB(InputOutputMultiTower, KroneckerProductFB):
"""FisherBlock for convolutional layers using the basic KFC approx.
Estimates the Fisher Information matrix's blog for a convolutional
layer.
Consider a convoluational layer in this model with (unshared) filter matrix
'w'. For a minibatch that produces inputs 'a' and output preactivations 's',
this FisherBlock estimates,
$$F(w) = \#locations * kronecker(E[flat(a) flat(a)^T],
E[flat(ds) flat(ds)^T])$$
where
$$ds = (d / ds) log p(y | x, w)$$
#locations = number of (x, y) locations where 'w' is applied.
where the expectation is taken over all examples and locations and flat()
concatenates an array's leading dimensions.
See equation 23 in https://arxiv.org/abs/1602.01407 for details.
"""
def __init__(self,
layer_collection,
params,
padding,
strides=None,
dilation_rate=None,
data_format=None,
extract_patches_fn=None):
"""Creates a ConvKFCBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [..spatial_filter_shape..,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
padding: str. Padding method.
strides: List of ints or None. Contains [..spatial_filter_strides..] if
'extract_patches_fn' is compatible with tf.nn.convolution(), else
[1, ..spatial_filter_strides, 1].
dilation_rate: List of ints or None. Rate for dilation along each spatial
dimension if 'extract_patches_fn' is compatible with
tf.nn.convolution(), else [1, ..spatial_dilation_rates.., 1].
data_format: str or None. Format of input data.
extract_patches_fn: str or None. Name of function that extracts image
patches. One of "extract_convolution_patches", "extract_image_patches",
"extract_pointwise_conv2d_patches".
"""
self._padding = padding
self._strides = maybe_tuple(strides)
self._dilation_rate = maybe_tuple(dilation_rate)
self._data_format = data_format
self._extract_patches_fn = extract_patches_fn
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
super(ConvKFCBasicFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
inputs, grads_list = self._process_data(grads_list)
# Infer number of locations upon which convolution is applied.
self._num_locations = num_conv_locations(inputs[0].shape.as_list(),
self._strides)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvInputKroneckerFactor,
(inputs, self._filter_shape, self._padding, self._strides,
self._dilation_rate, self._data_format, self._extract_patches_fn,
self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvOutputKroneckerFactor, (grads_list,))
self._setup_damping(damping, normalization=self._num_locations)
@property
def _renorm_coeff(self):
return self._num_locations
class DepthwiseConvDiagonalFB(ConvDiagonalFB):
"""FisherBlock for depthwise_conv2d().
Equivalent to ConvDiagonalFB applied to each input channel in isolation.
"""
def __init__(self,
layer_collection,
params,
strides,
padding,
rate=None,
data_format=None):
"""Creates a DepthwiseConvKFCBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: Tensor of shape [filter_height, filter_width, in_channels,
channel_multiplier].
strides: List of 4 ints. Strides along all dimensions.
padding: str. Padding method.
rate: List of 4 ints or None. Rate for dilation along all dimensions.
data_format: str or None. Format of input data.
Raises:
NotImplementedError: If parameters contains bias.
ValueError: If filter is not 4-D.
ValueError: If strides is not length-4.
ValueError: If rates is not length-2.
ValueError: If channels are not last dimension.
"""
if isinstance(params, (tuple, list)):
raise NotImplementedError("Bias not yet supported.")
if params.shape.ndims != 4:
raise ValueError("Filter must be 4-D.")
if len(strides) != 4:
raise ValueError("strides must account for 4 dimensions.")
if rate is not None:
if len(rate) != 2:
raise ValueError("rate must only account for spatial dimensions.")
rate = [1, rate[0], rate[1], 1] # conv2d expects 4-element rate.
if not utils.is_data_format_channel_last(data_format):
raise ValueError("data_format must be channels-last.")
super(DepthwiseConvDiagonalFB, self).__init__(
layer_collection=layer_collection,
params=params,
strides=strides,
padding=padding,
dilations=rate,
data_format=data_format)
# This is a hack to overwrite the same setting in ConvKFCBasicFB.__init__().
filter_height, filter_width, in_channels, channel_multiplier = (
params.shape.as_list())
self._filter_shape = (filter_height, filter_width, in_channels,
in_channels * channel_multiplier)
def multiply_matpower(self, vector, exp):
conv2d_vector = depthwise_conv2d_filter_to_conv2d_filter(vector)
conv2d_result = super(DepthwiseConvDiagonalFB, self).multiply_matpower(
conv2d_vector, exp)
return conv2d_filter_to_depthwise_conv2d_filter(conv2d_result)
class DepthwiseConvKFCBasicFB(ConvKFCBasicFB):
"""FisherBlock for depthwise_conv2d().
Equivalent to ConvKFCBasicFB applied to each input channel in isolation.
"""
def __init__(self,
layer_collection,
params,
strides,
padding,
rate=None,
data_format=None):
"""Creates a DepthwiseConvKFCBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: Tensor of shape [filter_height, filter_width, in_channels,
channel_multiplier].
strides: List of 4 ints. Strides along all dimensions.
padding: str. Padding method.
rate: List of 4 ints or None. Rate for dilation along all dimensions.
data_format: str or None. Format of input data.
Raises:
NotImplementedError: If parameters contains bias.
ValueError: If filter is not 4-D.
ValueError: If strides is not length-4.
ValueError: If rates is not length-2.
ValueError: If channels are not last dimension.
"""
if isinstance(params, (tuple, list)):
raise NotImplementedError("Bias not yet supported.")
if params.shape.ndims != 4:
raise ValueError("Filter must be 4-D.")
if len(strides) != 4:
raise ValueError("strides must account for 4 dimensions.")
if rate is not None:
if len(rate) != 2:
raise ValueError("rate must only account for spatial dimensions.")
rate = [1, rate[0], rate[1], 1] # conv2d expects 4-element rate.
if not utils.is_data_format_channel_last(data_format):
raise ValueError("data_format must be channels-last.")
super(DepthwiseConvKFCBasicFB, self).__init__(
layer_collection=layer_collection,
params=params,
padding=padding,
strides=strides,
dilation_rate=rate,
data_format=data_format,
extract_patches_fn="extract_image_patches")
# This is a hack to overwrite the same setting in ConvKFCBasicFB.__init__().
filter_height, filter_width, in_channels, channel_multiplier = (
params.shape.as_list())
self._filter_shape = (filter_height, filter_width, in_channels,
in_channels * channel_multiplier)
def multiply_matpower(self, vector, exp):
conv2d_vector = depthwise_conv2d_filter_to_conv2d_filter(vector)
conv2d_result = super(DepthwiseConvKFCBasicFB, self).multiply_matpower(
conv2d_vector, exp)
return conv2d_filter_to_depthwise_conv2d_filter(conv2d_result)
def depthwise_conv2d_filter_to_conv2d_filter(filter, name=None): # pylint: disable=redefined-builtin
"""Converts a convolution filter for use with conv2d.
Transforms a filter for use with tf.nn.depthwise_conv2d() to one that's
compatible with tf.nn.conv2d().
Args:
filter: Tensor of shape [height, width, in_channels, channel_multiplier].
name: None or str. Name of Op.
Returns:
Tensor of shape [height, width, in_channels, out_channels].
"""
with ops.name_scope(name, "depthwise_conv2d_filter_to_conv2d_filter",
[filter]):
filter = ops.convert_to_tensor(filter)
filter_height, filter_width, in_channels, channel_multiplier = (
filter.shape.as_list())
results = []
for i in range(in_channels):
# Slice out one in_channel's filter. Insert zeros around it to force it
# to affect that channel and that channel alone.
elements = []
if i > 0:
elements.append(
array_ops.zeros(
[filter_height, filter_width, i, channel_multiplier]))
elements.append(filter[:, :, i:(i + 1), :])
if i + 1 < in_channels:
elements.append(
array_ops.zeros([
filter_height, filter_width, in_channels - (i + 1),
channel_multiplier
]))
# Concat along in_channel.
results.append(
array_ops.concat(elements, axis=-2, name="in_channel_%d" % i))
# Concat along out_channel.
return array_ops.concat(results, axis=-1, name="out_channel")
def conv2d_filter_to_depthwise_conv2d_filter(filter, name=None): # pylint: disable=redefined-builtin
"""Converts a convolution filter for use with depthwise_conv2d.
Transforms a filter for use with tf.nn.conv2d() to one that's
compatible with tf.nn.depthwise_conv2d(). Ignores all filters but those along
the diagonal.
Args:
filter: Tensor of shape [height, width, in_channels, out_channels].
name: None or str. Name of Op.
Returns:
Tensor of shape,
[height, width, in_channels, channel_multiplier]
Raises:
ValueError: if out_channels is not evenly divisible by in_channels.
"""
with ops.name_scope(name, "conv2d_filter_to_depthwise_conv2d_filter",
[filter]):
filter = ops.convert_to_tensor(filter)
filter_height, filter_width, in_channels, out_channels = (
filter.shape.as_list())
if out_channels % in_channels != 0:
raise ValueError("out_channels must be evenly divisible by in_channels.")
channel_multiplier = out_channels // in_channels
results = []
filter = array_ops.reshape(filter, [
filter_height, filter_width, in_channels, in_channels,
channel_multiplier
])
for i in range(in_channels):
# Slice out output corresponding to the correct filter.
filter_slice = array_ops.reshape(
filter[:, :, i, i, :],
[filter_height, filter_width, 1, channel_multiplier])
results.append(filter_slice)
# Concat along out_channel.
return array_ops.concat(results, axis=-2, name="in_channels")
def maybe_tuple(obj):
if not isinstance(obj, list):
return obj
return tuple(obj)
def num_conv_locations(input_shape, strides):
"""Returns the number of spatial locations a 2D Conv kernel is applied to.
Args:
input_shape: List of ints representing shape of inputs to
tf.nn.convolution().
strides: List of ints representing strides along spatial dimensions as
passed in to tf.nn.convolution().
Returns:
A scalar |T| denoting the number of spatial locations for the Conv layer.
"""
spatial_input_locations = np.prod(input_shape[1:-1])
if strides is None:
spatial_strides_divisor = 1
else:
spatial_strides_divisor = np.prod(strides)
return spatial_input_locations // spatial_strides_divisor
class InputOutputMultiTowerMultiUse(InputOutputMultiTower):
"""Adds methods for multi-use/time-step case to InputOutputMultiTower."""
def __init__(self, num_uses=None, *args, **kwargs):
self._num_uses = num_uses
super(InputOutputMultiTowerMultiUse, self).__init__(*args, **kwargs)
def _process_data(self, grads_list):
"""Process temporal/multi-use data into the format used by the factors.
This function takes inputs and grads_lists data and processes it into
one of the formats expected by the FisherFactor classes (depending on
the value of the global configuration variable TOWER_STRATEGY).
It accepts the data in one of two initial formats. The first possible
format is where self._inputs is a list of list of Tensors. The first index
is tower, the second is use/time-step. grads_list, meanwhile, is a list
over sources of such lists of lists.
The second possible data format is where self._inputs is a Tensor with
uses/times-steps folded into the batch dimension. i.e. it is a Tensor
of shape [num_uses * size_batch, ...] which represents a reshape of a
Tensor of shape [num_uses, size_batch, ...]. And similarly grads_list is
a list over sources of such Tensors.
There are two possible formats which inputs and grads_list are transformed
into.
If TOWER_STRATEGY is "concat", 'inputs' becomes a tuple containing
a single tensor (represented as a PartitionedTensor object) with all of
the data from the towers, as well as the uses/time-steps, concatenated
together. In this tensor the leading dimension is the batch and
use/time-step dimensions folded together (with 'use' being the major of
these two, so that the tensors can be thought of as reshapes of ones of
shape [num_uses, batch_size, ...]). grads_list is similarly formatted as a
tuple over sources of such tensors.
If TOWER_STRATEGY is "separate" the inputs are formatted into lists of
tensors over towers. Each of these tensors has a similar format to
the tensor produced by the "concat" option, except that each contains
only the data from a single tower. grads_list is similarly formatted
into a tuple over sources of such tuples.
Args:
grads_list: grads_list in its initial format (see above).
Returns:
inputs: self._inputs transformed into the appropriate format (see
above).
grads_list: grads_list transformed into the appropriate format (see
above).
Raises:
ValueError: If TOWER_STRATEGY is not one of "separate" or "concat".
ValueError: If the given/initial format of self._inputs and grads_list
isn't recognized, or doesn't agree with self._num_uses.
"""
inputs = self._inputs
if isinstance(inputs[0], (list, tuple)):
num_uses = len(inputs[0])
if self._num_uses is not None and self._num_uses != num_uses:
raise ValueError("num_uses argument doesn't match length of inputs.")
else:
self._num_uses = num_uses
# Check that all mini-batches/towers have the same number of uses
if not all(len(input_) == num_uses for input_ in inputs):
raise ValueError("Length of inputs argument is inconsistent across "
"towers.")
if fisher_factors.TOWER_STRATEGY == "concat":
# Reverse the tower and use/time-step indices, so that use is now first,
# and towers is second
inputs = tuple(zip(*inputs))
# Flatten the two dimensions
inputs = nest.flatten(inputs)
# Merge everything together into a PartitionedTensor. We package it in
# a singleton tuple since the factors will expect a list over towers
inputs = (utils.PartitionedTensor(inputs),)
elif fisher_factors.TOWER_STRATEGY == "separate":
# Merge together the uses/time-step dimension into PartitionedTensors,
# but keep the leading dimension (towers) intact for the factors to
# process individually.
inputs = tuple(utils.PartitionedTensor(input_) for input_ in inputs)
else:
raise ValueError("Global config variable TOWER_STRATEGY must be one of "
"'concat' or 'separate'.")
# Now we perform the analogous processing for grads_list
if isinstance(grads_list[0][0], (list, tuple)):
num_uses = len(grads_list[0][0])
if self._num_uses is not None and self._num_uses != num_uses:
raise ValueError("num_uses argument doesn't match length of outputs, "
"or length of outputs is inconsistent with length of "
"inputs.")
else:
self._num_uses = num_uses
if not all(len(grad) == num_uses for grads in grads_list
for grad in grads):
raise ValueError("Length of outputs argument is inconsistent across "
"towers.")
if fisher_factors.TOWER_STRATEGY == "concat":
# Reverse the tower and use/time-step indices, so that use is now first,
# and towers is second
grads_list = tuple(tuple(zip(*grads)) for grads in grads_list)
# Flatten the two dimensions, leaving the leading dimension (source)
# intact
grads_list = tuple(nest.flatten(grads) for grads in grads_list)
# Merge inner dimensions together into PartitionedTensors. We package
# them in a singleton tuple since the factors will expect a list over
# towers
grads_list = tuple((utils.PartitionedTensor(grads),)
for grads in grads_list)
elif fisher_factors.TOWER_STRATEGY == "separate":
# Merge together the uses/time-step dimension into PartitionedTensors,
# but keep the leading dimension (towers) intact for the factors to
# process individually.
grads_list = tuple(tuple(utils.PartitionedTensor(grad)
for grad in grads)
for grads in grads_list)
else:
raise ValueError("Global config variable TOWER_STRATEGY must be one of "
"'concat' or 'separate'.")
if self._num_uses is None:
raise ValueError("You must supply a value for the num_uses argument if "
"the number of uses cannot be inferred from inputs or "
"outputs arguments (e.g. if they are both given in the "
"single Tensor format, instead of as lists of Tensors.")
return inputs, grads_list
class FullyConnectedMultiIndepFB(InputOutputMultiTowerMultiUse,
KroneckerProductFB):
"""FisherBlock for fully-connected layers that share parameters.
This class implements the "independence across time" approximation from the
following paper:
https://openreview.net/pdf?id=HyMTkQZAb
"""
def __init__(self, layer_collection, has_bias=False, num_uses=None):
"""Creates a FullyConnectedMultiIndepFB block.
Args:
layer_collection: LayerCollection instance.
has_bias: bool. If True, estimates Fisher with respect to a bias
parameter as well as the layer's parameters.
num_uses: int or None. Number of uses of the layer in the model's graph.
Only required if the data is formatted with uses/time folded into the
batch dimension (instead of uses/time being a list dimension).
(Default: None)
"""
self._has_bias = has_bias
super(FullyConnectedMultiIndepFB, self).__init__(
layer_collection=layer_collection,
num_uses=num_uses)
def instantiate_factors(self, grads_list, damping):
inputs, grads_list = self._process_data(grads_list)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF,
((inputs,), self._num_uses, self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF, (grads_list, self._num_uses))
self._setup_damping(damping, normalization=self._num_uses)
@property
def _renorm_coeff(self):
return float(self._num_uses)
class ConvKFCBasicMultiIndepFB(InputOutputMultiTowerMultiUse,
KroneckerProductFB):
"""FisherBlock for 2D convolutional layers using the basic KFC approx.
Similar to ConvKFCBasicFB except that this version supports multiple
uses/time-steps via a standard independence approximation. Similar to the
"independence across time" used in FullyConnectedMultiIndepFB but generalized
in the obvious way to conv layers.
"""
def __init__(self,
layer_collection,
params,
padding,
strides=None,
dilation_rate=None,
data_format=None,
extract_patches_fn=None,
num_uses=None):
"""Creates a ConvKFCBasicMultiIndepFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [..spatial_filter_shape..,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
padding: str. Padding method.
strides: List of ints or None. Contains [..spatial_filter_strides..] if
'extract_patches_fn' is compatible with tf.nn.convolution(), else
[1, ..spatial_filter_strides, 1].
dilation_rate: List of ints or None. Rate for dilation along each spatial
dimension if 'extract_patches_fn' is compatible with
tf.nn.convolution(), else [1, ..spatial_dilation_rates.., 1].
data_format: str or None. Format of input data.
extract_patches_fn: str or None. Name of function that extracts image
patches. One of "extract_convolution_patches", "extract_image_patches",
"extract_pointwise_conv2d_patches".
num_uses: int or None. Number of uses of the layer in the model's graph.
Only required if the data is formatted with uses/time folded into the
batch dimension (instead of uses/time being a list dimension).
(Default: None)
"""
self._padding = padding
self._strides = maybe_tuple(strides)
self._dilation_rate = maybe_tuple(dilation_rate)
self._data_format = data_format
self._extract_patches_fn = extract_patches_fn
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
super(ConvKFCBasicMultiIndepFB, self).__init__(
layer_collection=layer_collection,
num_uses=num_uses)
def instantiate_factors(self, grads_list, damping):
inputs, grads_list = self._process_data(grads_list)
# Infer number of locations upon which convolution is applied.
self._num_locations = num_conv_locations(inputs[0].shape.as_list(),
self._strides)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvInputKroneckerFactor,
(inputs, self._filter_shape, self._padding, self._strides,
self._dilation_rate, self._data_format, self._extract_patches_fn,
self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvOutputKroneckerFactor, (grads_list,))
self._setup_damping(damping, normalization=
(self._num_locations * self._num_uses))
@property
def _renorm_coeff(self):
return self._num_locations * self._num_uses
class EmbeddingKFACMultiIndepFB(InputOutputMultiTowerMultiUse,
KroneckerProductFB):
"""K-FAC FisherBlock for embedding layers used multiple times in the graph.
Similar to EmbeddingKFACFB except that this version supports multiple uses
of the parameter within a single model. These uses could correspond to time
steps in an RNN architecture, but they don't have to.
Does not support bias parameters.
"""
def __init__(self, layer_collection, vocab_size, num_uses=None):
"""Creates a EmbeddingKFACMultiIndepFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
vocab_size: int. Size of vocabulary for this embedding layer.
num_uses: int or None. Number of uses of the layer in the model's graph.
Only required if the data is formatted with time folded into the batch
dimension (instead of time being a list dimension). (Default: None)
"""
self._vocab_size = vocab_size
super(EmbeddingKFACMultiIndepFB, self).__init__(
layer_collection=layer_collection,
num_uses=num_uses)
def instantiate_factors(self, grads_list, damping):
"""Instantiate Kronecker Factors for this FisherBlock.
Args:
grads_list: List of list of list of Tensors. grads_list[i][j][k] is the
gradient of the loss with respect to 'outputs' from source 'i',
tower/mini-batch 'j', and use/time-step 'k'. Each Tensor has shape
[tower_minibatch_size, output_size].
damping: 0-D Tensor or float. 'damping' * identity is approximately added
to this FisherBlock's Fisher approximation.
"""
inputs, grads_list = self._process_data(grads_list)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.EmbeddingInputKroneckerFactor,
(inputs, self._vocab_size))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF, (grads_list, self._num_uses))
self._setup_damping(damping, normalization=self._num_uses)
@property
def _renorm_coeff(self):
return float(self._num_uses)
class SeriesFBApproximation(enum.IntEnum):
"""See FullyConnectedSeriesFB.__init__ for description and usage."""
option1 = 1
option2 = 2
class FullyConnectedSeriesFB(InputOutputMultiTowerMultiUse,
KroneckerProductFB):
"""FisherBlock for fully-connected layers that share parameters across time.
This class implements the "Option 1" and "Option 2" approximation from the
following paper:
https://openreview.net/pdf?id=HyMTkQZAb
See the end of the appendix of the paper for a pseudo-code of the
algorithm being implemented by multiply_matpower here. Note that we are
using pre-computed versions of certain matrix-matrix products to speed
things up. This is explicitly explained wherever it is done.
"""
def __init__(self,
layer_collection,
has_bias=False,
num_uses=None,
option=SeriesFBApproximation.option2):
"""Constructs a new `FullyConnectedSeriesFB`.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
has_bias: Whether the layer includes a bias parameter.
num_uses: int or None. Number of time-steps over which the layer
is used. Only required if the data is formatted with time folded into
the batch dimension (instead of time being a list dimension).
(Default: None)
option: A `SeriesFBApproximation` specifying the simplifying assumption
to be used in this block. `option1` approximates the cross-covariance
over time as a symmetric matrix, while `option2` makes
the assumption that training sequences are infinitely long. See section
3.5 of the paper for more details.
"""
self._has_bias = has_bias
self._option = option
super(FullyConnectedSeriesFB, self).__init__(
layer_collection=layer_collection,
num_uses=num_uses)
@property
def _num_timesteps(self):
return self._num_uses
@property
def _renorm_coeff(self):
# This should no longer be used since the multiply_X functions from the base
# class have been overridden
assert False
def instantiate_factors(self, grads_list, damping):
inputs, grads_list = self._process_data(grads_list)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF,
((inputs,), self._num_uses, self._has_bias))
self._input_factor.register_cov_dt1()
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF, (grads_list, self._num_uses))
self._output_factor.register_cov_dt1()
self._setup_damping(damping, normalization=self._num_uses)
def register_matpower(self, exp):
if exp != -1:
raise NotImplementedError("FullyConnectedSeriesFB only supports inverse"
"multiplications.")
if self._option == SeriesFBApproximation.option1:
self._input_factor.register_option1quants(self._input_damping_func)
self._output_factor.register_option1quants(self._output_damping_func)
elif self._option == SeriesFBApproximation.option2:
self._input_factor.register_option2quants(self._input_damping_func)
self._output_factor.register_option2quants(self._output_damping_func)
else:
raise ValueError(
"Unrecognized FullyConnectedSeriesFB approximation: {}".format(
self._option))
def multiply_matpower(self, vector, exp):
if exp != -1:
raise NotImplementedError("FullyConnectedSeriesFB only supports inverse"
"multiplications.")
# pylint: disable=invalid-name
Z = utils.layer_params_to_mat2d(vector)
# Derivations were done for "batch_dim==1" case so we need to convert to
# that orientation:
Z = array_ops.transpose(Z)
if self._option == SeriesFBApproximation.option1:
# Note that \\(L_A = A0^{-1/2} * U_A and L_G = G0^{-1/2} * U_G.\\)
L_A, psi_A = self._input_factor.get_option1quants(
self._input_damping_func)
L_G, psi_G = self._output_factor.get_option1quants(
self._output_damping_func)
def gamma(x):
# We are assuming that each case has the same number of time-steps.
# If this stops being the case one shouldn't simply replace this T
# with its average value. Instead, one needs to go back to the
# definition of the gamma function from the paper.
T = self._num_timesteps
return (1 - x)**2 / (T * (1 - x**2) - 2 * x * (1 - x**T))
# \\(Y = \gamma( psi_G*psi_A^T )\\) (computed element-wise)
# Even though Y is Z-independent we are recomputing it from the psi's
# each since Y depends on both A and G quantities, and it is relatively
# cheap to compute.
Y = gamma(array_ops.reshape(psi_G, [int(psi_G.shape[0]), -1]) * psi_A)
# \\(Z = L_G^T * Z * L_A\\)
# This is equivalent to the following computation from the original
# pseudo-code:
# \\(Z = G0^{-1/2} * Z * A0^{-1/2}\\)
# \\(Z = U_G^T * Z * U_A\\)
Z = math_ops.matmul(L_G, math_ops.matmul(Z, L_A), transpose_a=True)
# \\(Z = Z .* Y\\)
Z *= Y
# \\(Z = L_G * Z * L_A^T\\)
# This is equivalent to the following computation from the original
# pseudo-code:
# \\(Z = U_G * Z * U_A^T\\)
# \\(Z = G0^{-1/2} * Z * A0^{-1/2}\\)
Z = math_ops.matmul(L_G, math_ops.matmul(Z, L_A, transpose_b=True))
elif self._option == SeriesFBApproximation.option2:
# Note that \\(P_A = A_1^T * A_0^{-1} and P_G = G_1^T * G_0^{-1}\\),
# and \\(K_A = A_0^{-1/2} * E_A\ and\ K_G = G_0^{-1/2} * E_G.\\)
P_A, K_A, mu_A = self._input_factor.get_option2quants(
self._input_damping_func)
P_G, K_G, mu_G = self._output_factor.get_option2quants(
self._output_damping_func)
# Our approach differs superficially from the pseudo-code in the paper
# in order to reduce the total number of matrix-matrix multiplies.
# In particular, the first three computations in the pseudo code are
# \\(Z = G0^{-1/2} * Z * A0^{-1/2}\\)
# \\(Z = Z - hPsi_G^T * Z * hPsi_A\\)
# \\(Z = E_G^T * Z * E_A\\)
# Noting that hPsi = C0^{-1/2} * C1 * C0^{-1/2}\\), so that
# \\(C0^{-1/2} * hPsi = C0^{-1} * C1 * C0^{-1/2} = P^T * C0^{-1/2}\\)
# the entire computation can be written as
# \\(Z = E_G^T * (G0^{-1/2} * Z * A0^{-1/2}\\)
# \\( - hPsi_G^T * G0^{-1/2} * Z * A0^{-1/2} * hPsi_A) * E_A\\)
# \\( = E_G^T * (G0^{-1/2} * Z * A0^{-1/2}\\)
# \\( - G0^{-1/2} * P_G * Z * P_A^T * A0^{-1/2}) * E_A\\)
# \\( = E_G^T * G0^{-1/2} * Z * A0^{-1/2} * E_A\\)
# \\( - E_G^T* G0^{-1/2} * P_G * Z * P_A^T * A0^{-1/2} * E_A\\)
# \\( = K_G^T * Z * K_A - K_G^T * P_G * Z * P_A^T * K_A\\)
# This final expression is computed by the following two lines:
# \\(Z = Z - P_G * Z * P_A^T\\)
Z -= math_ops.matmul(P_G, math_ops.matmul(Z, P_A, transpose_b=True))
# \\(Z = K_G^T * Z * K_A\\)
Z = math_ops.matmul(K_G, math_ops.matmul(Z, K_A), transpose_a=True)
# \\(Z = Z ./ (1*1^T - mu_G*mu_A^T)\\)
# Be careful with the outer product. We don't want to accidentally
# make it an inner-product instead.
tmp = 1.0 - array_ops.reshape(mu_G, [int(mu_G.shape[0]), -1]) * mu_A
# Prevent some numerical issues by setting any 0.0 eigs to 1.0
tmp += 1.0 * math_ops.cast(math_ops.equal(tmp, 0.0), dtype=tmp.dtype)
Z /= tmp
# We now perform the transpose/reverse version of the operations
# derived above, whose derivation from the original pseudo-code is
# analgous.
# \\(Z = K_G * Z * K_A^T\\)
Z = math_ops.matmul(K_G, math_ops.matmul(Z, K_A, transpose_b=True))
# \\(Z = Z - P_G^T * Z * P_A\\)
Z -= math_ops.matmul(P_G, math_ops.matmul(Z, P_A), transpose_a=True)
# \\(Z = normalize (1/E[T]) * Z\\)
# Note that this normalization is done because we compute the statistics
# by averaging, not summing, over time. (And the gradient is presumably
# summed over time, not averaged, and thus their scales are different.)
Z /= math_ops.cast(self._num_timesteps, Z.dtype)
# Convert back to the "batch_dim==0" orientation.
Z = array_ops.transpose(Z)
return utils.mat2d_to_layer_params(vector, Z)
# pylint: enable=invalid-name
| allenlavoie/tensorflow | tensorflow/contrib/kfac/python/ops/fisher_blocks.py | Python | apache-2.0 | 63,050 |
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# tcpaccept Trace TCP accept()s.
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: tcpaccept [-h] [-t] [-p PID]
#
# This uses dynamic tracing of the kernel inet_csk_accept() socket function
# (from tcp_prot.accept), and will need to be modified to match kernel changes.
#
# Copyright (c) 2015 Brendan Gregg.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Oct-2015 Brendan Gregg Created this.
# 14-Feb-2016 " " Switch to bpf_perf_output.
from __future__ import print_function
from bcc import BPF
from socket import inet_ntop, AF_INET, AF_INET6
from struct import pack
import argparse
from bcc.utils import printb
# arguments
examples = """examples:
./tcpaccept # trace all TCP accept()s
./tcpaccept -t # include timestamps
./tcpaccept -p 181 # only trace PID 181
"""
parser = argparse.ArgumentParser(
description="Trace TCP accepts",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-t", "--timestamp", action="store_true",
help="include timestamp on output")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <net/sock.h>
#include <bcc/proto.h>
// separate data structs for ipv4 and ipv6
struct ipv4_data_t {
u64 ts_us;
u32 pid;
u32 saddr;
u32 daddr;
u64 ip;
u16 lport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv4_events);
struct ipv6_data_t {
u64 ts_us;
u32 pid;
unsigned __int128 saddr;
unsigned __int128 daddr;
u64 ip;
u16 lport;
char task[TASK_COMM_LEN];
};
BPF_PERF_OUTPUT(ipv6_events);
"""
#
# The following is the code for older kernels(Linux pre-4.16).
# It uses kprobes to instrument inet_csk_accept(). On Linux 4.16 and
# later, the sock:inet_sock_set_state tracepoint should be used instead, as
# is done by the code that follows this.
#
bpf_text_kprobe = """
int kretprobe__inet_csk_accept(struct pt_regs *ctx)
{
struct sock *newsk = (struct sock *)PT_REGS_RC(ctx);
u32 pid = bpf_get_current_pid_tgid();
##FILTER_PID##
if (newsk == NULL)
return 0;
// check this is TCP
u8 protocol = 0;
// workaround for reading the sk_protocol bitfield:
// Following comments add by Joe Yin:
// Unfortunately,it can not work since Linux 4.10,
// because the sk_wmem_queued is not following the bitfield of sk_protocol.
// And the following member is sk_gso_max_segs.
// So, we can use this:
// bpf_probe_read(&protocol, 1, (void *)((u64)&newsk->sk_gso_max_segs) - 3);
// In order to diff the pre-4.10 and 4.10+ ,introduce the variables gso_max_segs_offset,sk_lingertime,
// sk_lingertime is closed to the gso_max_segs_offset,and
// the offset between the two members is 4
int gso_max_segs_offset = offsetof(struct sock, sk_gso_max_segs);
int sk_lingertime_offset = offsetof(struct sock, sk_lingertime);
if (sk_lingertime_offset - gso_max_segs_offset == 4)
// 4.10+ with little endian
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
protocol = *(u8 *)((u64)&newsk->sk_gso_max_segs - 3);
else
// pre-4.10 with little endian
protocol = *(u8 *)((u64)&newsk->sk_wmem_queued - 3);
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// 4.10+ with big endian
protocol = *(u8 *)((u64)&newsk->sk_gso_max_segs - 1);
else
// pre-4.10 with big endian
protocol = *(u8 *)((u64)&newsk->sk_wmem_queued - 1);
#else
# error "Fix your compiler's __BYTE_ORDER__?!"
#endif
if (protocol != IPPROTO_TCP)
return 0;
// pull in details
u16 family = 0, lport = 0;
family = newsk->__sk_common.skc_family;
lport = newsk->__sk_common.skc_num;
if (family == AF_INET) {
struct ipv4_data_t data4 = {.pid = pid, .ip = 4};
data4.ts_us = bpf_ktime_get_ns() / 1000;
data4.saddr = newsk->__sk_common.skc_rcv_saddr;
data4.daddr = newsk->__sk_common.skc_daddr;
data4.lport = lport;
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(ctx, &data4, sizeof(data4));
} else if (family == AF_INET6) {
struct ipv6_data_t data6 = {.pid = pid, .ip = 6};
data6.ts_us = bpf_ktime_get_ns() / 1000;
bpf_probe_read(&data6.saddr, sizeof(data6.saddr),
&newsk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
bpf_probe_read(&data6.daddr, sizeof(data6.daddr),
&newsk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
data6.lport = lport;
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(ctx, &data6, sizeof(data6));
}
// else drop
return 0;
}
"""
bpf_text_tracepoint = """
TRACEPOINT_PROBE(sock, inet_sock_set_state)
{
if (args->protocol != IPPROTO_TCP)
return 0;
u32 pid = bpf_get_current_pid_tgid();
##FILTER_PID##
// pull in details
u16 family = 0, lport = 0;
family = args->family;
lport = args->sport;
if (family == AF_INET) {
struct ipv4_data_t data4 = {.pid = pid, .ip = 4};
data4.ts_us = bpf_ktime_get_ns() / 1000;
__builtin_memcpy(&data4.saddr, args->saddr, sizeof(data4.saddr));
__builtin_memcpy(&data4.daddr, args->daddr, sizeof(data4.daddr));
data4.lport = lport;
bpf_get_current_comm(&data4.task, sizeof(data4.task));
ipv4_events.perf_submit(args, &data4, sizeof(data4));
} else if (family == AF_INET6) {
struct ipv6_data_t data6 = {.pid = pid, .ip = 6};
data6.ts_us = bpf_ktime_get_ns() / 1000;
__builtin_memcpy(&data6.saddr, args->saddr, sizeof(data6.saddr));
__builtin_memcpy(&data6.daddr, args->daddr, sizeof(data6.daddr));
data6.lport = lport;
bpf_get_current_comm(&data6.task, sizeof(data6.task));
ipv6_events.perf_submit(args, &data6, sizeof(data6));
}
// else drop
return 0;
}
"""
if (BPF.tracepoint_exists("sock", "inet_sock_set_state")):
bpf_text += bpf_text_tracepoint
else:
bpf_text += bpf_text_kprobe
# code substitutions
if args.pid:
bpf_text = bpf_text.replace('##FILTER_PID##',
'if (pid != %s) { return 0; }' % args.pid)
else:
bpf_text = bpf_text.replace('##FILTER_PID##', '')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# process event
def print_ipv4_event(cpu, data, size):
event = b["ipv4_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="")
printb(b"%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET, pack("I", event.daddr)).encode(),
inet_ntop(AF_INET, pack("I", event.saddr)).encode(),
event.lport))
def print_ipv6_event(cpu, data, size):
event = b["ipv6_events"].event(data)
global start_ts
if args.timestamp:
if start_ts == 0:
start_ts = event.ts_us
print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="")
printb(b"%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid,
event.task, event.ip,
inet_ntop(AF_INET6, event.daddr).encode(),
inet_ntop(AF_INET6, event.saddr).encode(),
event.lport))
# initialize BPF
b = BPF(text=bpf_text)
# header
if args.timestamp:
print("%-9s" % ("TIME(s)"), end="")
print("%-6s %-12s %-2s %-16s %-16s %-4s" % ("PID", "COMM", "IP", "RADDR",
"LADDR", "LPORT"))
start_ts = 0
# read events
b["ipv4_events"].open_perf_buffer(print_ipv4_event)
b["ipv6_events"].open_perf_buffer(print_ipv6_event)
while 1:
try:
b.perf_buffer_poll()
except KeyboardInterrupt:
exit()
| mcaleavya/bcc | tools/tcpaccept.py | Python | apache-2.0 | 8,042 |
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains all the classes that handle the intermediate
representation language. It is basically the REIL language with minor
changes. Below there is an overview of the REIL language and its
instruction format. For full details see "REIL: A platform-independent
intermediate representation of disassembled code for static code
analysis."
All algorithms within the framework are designed to operate on the
intermediate representation. This provides great flexibility when it
comes to implement a cross-platform framework.
Instruction Format
------------------
mnemonic oprnd1, oprnd2, oprnd3
Instructions
------------
Arithmetic : ADD, SUB, MUL, DIV, MOD, BSH
Bitwise : AND, OR, XOR
Data Transfer : LDM, STM, STR
Conditional : BISZ, JCC
Other : UNDEF, UNKN, NOP
"""
# Display operands size in instruction
show_size = True
# TODO: Create module util and move this function there.
def split_address(address):
return address >> 0x08, address & 0xff
class ReilMnemonic(object):
"""Enumeration of IR mnemonics.
"""
# Arithmetic Instructions
ADD = 1
SUB = 2
MUL = 3
DIV = 4
MOD = 5
BSH = 6
# Bitwise Instructions
AND = 7
OR = 8
XOR = 9
# Data Transfer Instructions
LDM = 10
STM = 11
STR = 12
# Conditional Instructions
BISZ = 13
JCC = 14
# Other Instructions
UNKN = 15
UNDEF = 16
NOP = 17
# Added Instructions
RET = 18
# Extensions
SEXT = 19
SDIV = 20
SMOD = 21
@staticmethod
def to_string(mnemonic):
"""Return the string representation of the given mnemonic.
"""
strings = {
# Arithmetic Instructions
ReilMnemonic.ADD : "add",
ReilMnemonic.SUB : "sub",
ReilMnemonic.MUL : "mul",
ReilMnemonic.DIV : "div",
ReilMnemonic.MOD : "mod",
ReilMnemonic.BSH : "bsh",
# Bitwise Instructions
ReilMnemonic.AND : "and",
ReilMnemonic.OR : "or",
ReilMnemonic.XOR : "xor",
# Data Transfer Instructions
ReilMnemonic.LDM : "ldm",
ReilMnemonic.STM : "stm",
ReilMnemonic.STR : "str",
# Conditional Instructions
ReilMnemonic.BISZ : "bisz",
ReilMnemonic.JCC : "jcc",
# Other Instructions
ReilMnemonic.UNKN : "unkn" ,
ReilMnemonic.UNDEF : "undef" ,
ReilMnemonic.NOP : "nop" ,
# Added Instructions
ReilMnemonic.RET : "ret",
# Extensions
ReilMnemonic.SEXT : "sext",
ReilMnemonic.SDIV : "sdiv",
ReilMnemonic.SMOD : "smod",
}
return strings[mnemonic]
@staticmethod
def from_string(string):
"""Return the mnemonic represented by the given string.
"""
mnemonics = {
# Arithmetic Instructions
"add" : ReilMnemonic.ADD,
"sub" : ReilMnemonic.SUB,
"mul" : ReilMnemonic.MUL,
"div" : ReilMnemonic.DIV,
"mod" : ReilMnemonic.MOD,
"bsh" : ReilMnemonic.BSH,
# Bitwise Instructions
"and" : ReilMnemonic.AND,
"or" : ReilMnemonic.OR,
"xor" : ReilMnemonic.XOR,
# Data Transfer Instructions
"ldm" : ReilMnemonic.LDM,
"stm" : ReilMnemonic.STM,
"str" : ReilMnemonic.STR,
# Conditional Instructions
"bisz" : ReilMnemonic.BISZ,
"jcc" : ReilMnemonic.JCC,
# Other Instructions
"unkn" : ReilMnemonic.UNKN,
"undef" : ReilMnemonic.UNDEF,
"nop" : ReilMnemonic.NOP,
# Added Instructions
"ret" : ReilMnemonic.RET,
# Added Instructions
"sext" : ReilMnemonic.SEXT,
"sdiv" : ReilMnemonic.SDIV,
"smod" : ReilMnemonic.SMOD,
}
return mnemonics[string]
REIL_MNEMONICS = (
# Arithmetic Instructions
ReilMnemonic.ADD,
ReilMnemonic.SUB,
ReilMnemonic.MUL,
ReilMnemonic.DIV,
ReilMnemonic.MOD,
ReilMnemonic.BSH,
# Bitwise Instructions
ReilMnemonic.AND,
ReilMnemonic.OR,
ReilMnemonic.XOR,
# Data Transfer Instructions
ReilMnemonic.LDM,
ReilMnemonic.STM,
ReilMnemonic.STR,
# Conditional Instructions
ReilMnemonic.BISZ,
ReilMnemonic.JCC,
# Other Instructions
ReilMnemonic.UNKN,
ReilMnemonic.UNDEF,
ReilMnemonic.NOP,
# Added Instructions
ReilMnemonic.RET,
# Extensions
ReilMnemonic.SEXT,
ReilMnemonic.SDIV,
ReilMnemonic.SMOD,
)
class ReilInstruction(object):
"""Representation of a REIL instruction.
"""
__slots__ = [
'_mnemonic',
'_operands',
'_comment',
'_address',
]
def __init__(self):
# A REIL mnemonic
self._mnemonic = None
# A list of operand. Exactly 3.
self._operands = [ReilEmptyOperand()] * 3
# Optionally, a comment for the instruction.
self._comment = None
# A REIL address for the instruction.
self._address = None
@property
def mnemonic(self):
"""Get instruction mnemonic.
"""
return self._mnemonic
@property
def mnemonic_str(self):
"""Get instruction mnemonic as string.
"""
return ReilMnemonic.to_string(self._mnemonic)
@mnemonic.setter
def mnemonic(self, value):
"""Set instruction mnemonic.
"""
if value not in REIL_MNEMONICS:
raise Exception("Invalid instruction mnemonic : %s" % str(value))
self._mnemonic = value
@property
def operands(self):
"""Get instruction operands.
"""
return self._operands
@operands.setter
def operands(self, value):
"""Set instruction operands.
"""
if len(value) != 3:
raise Exception("Invalid instruction operands : %s" % str(value))
self._operands = value
@property
def address(self):
"""Get instruction address.
"""
return self._address
@address.setter
def address(self, value):
"""Set instruction address.
"""
self._address = value
@property
def comment(self):
"""Get instruction comment.
"""
return self._comment
@comment.setter
def comment(self, value):
"""Set instruction comment.
"""
self._comment = value
def __str__(self):
def print_oprnd(oprnd):
oprnd_str = str(oprnd)
sizes = {
256 : "DDQWORD",
128 : "DQWORD",
72 : "POINTER",
64 : "QWORD",
40 : "POINTER",
32 : "DWORD",
16 : "WORD",
8 : "BYTE",
1 : "BIT",
"" : "UNK",
}
if isinstance(oprnd, ReilEmptyOperand):
return "%s" % oprnd_str
else:
return "%s %s" % (sizes[oprnd.size if oprnd.size else ""], oprnd_str)
mnemonic_str = ReilMnemonic.to_string(self._mnemonic)
if show_size:
operands_str = ", ".join(map(print_oprnd, self._operands))
else:
operands_str = ", ".join(map(str, self._operands))
return "%-5s [%s]" % (mnemonic_str, operands_str)
def __hash__(self):
return hash(str(self))
def __getstate__(self):
state = {
'_mnemonic': self._mnemonic,
'_operands': self._operands,
'_comment': self._comment,
'_address': self._address
}
return state
def __setstate__(self, state):
self._mnemonic = state['_mnemonic']
self._operands = state['_operands']
self._comment = state['_comment']
self._address = state['_address']
class ReilOperand(object):
"""Representation of an IR instruction's operand.
"""
__slots__ = [
'_size',
]
def __init__(self, size):
# Size of the operand, in bits.
self._size = size
@property
def size(self):
"""Get operand size.
"""
return self._size
@size.setter
def size(self, value):
"""Set operand size.
"""
self._size = value
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {
'_size': self.size
}
return state
def __setstate__(self, state):
self._size = state['_size']
class ReilImmediateOperand(ReilOperand):
"""Representation of a REIL instruction immediate operand.
"""
__slots__ = [
'_immediate',
]
def __init__(self, immediate, size=None):
super(ReilImmediateOperand, self).__init__(size)
assert type(immediate) in [int, long], "Invalid immediate value type."
self._immediate = immediate
@property
def immediate(self):
"""Get immediate.
"""
if not self._size:
raise Exception("Operand size missing.")
return self._immediate & 2**self._size-1
def __str__(self):
if not self._size:
raise Exception("Operand size missing.")
string = hex(self._immediate & 2**self._size-1)
return string[:-1] if string[-1] == 'L' else string
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other.size and \
self._immediate == other.immediate
def __getstate__(self):
state = super(ReilImmediateOperand, self).__getstate__()
state['_immediate'] = self._immediate
return state
def __setstate__(self, state):
super(ReilImmediateOperand, self).__setstate__(state)
self._immediate = state['_immediate']
class ReilRegisterOperand(ReilOperand):
"""Representation of a REIL instruction register operand.
"""
__slots__ = [
'_name',
]
def __init__(self, name, size=None):
super(ReilRegisterOperand, self).__init__(size)
# Register name.
self._name = name
@property
def name(self):
"""Get IR register operand name.
"""
return self._name
def __str__(self):
return self._name
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other.size and \
self._name == other.name
def __getstate__(self):
state = super(ReilRegisterOperand, self).__getstate__()
state['_name'] = self._name
return state
def __setstate__(self, state):
super(ReilRegisterOperand, self).__setstate__(state)
self._name = state['_name']
class ReilEmptyOperand(ReilRegisterOperand):
"""Representation of an IR instruction's empty operand.
"""
def __init__(self):
super(ReilEmptyOperand, self).__init__("EMPTY", size=None)
class ReilInstructionBuilder(object):
"""REIL Instruction Builder. Generate REIL instructions, easily.
"""
# Arithmetic Instructions
# ======================================================================== #
def gen_add(self, src1, src2, dst):
"""Return an ADD instruction.
"""
return self.build(ReilMnemonic.ADD, src1, src2, dst)
def gen_sub(self, src1, src2, dst):
"""Return a SUB instruction.
"""
return self.build(ReilMnemonic.SUB, src1, src2, dst)
def gen_mul(self, src1, src2, dst):
"""Return a MUL instruction.
"""
return self.build(ReilMnemonic.MUL, src1, src2, dst)
def gen_div(self, src1, src2, dst):
"""Return a DIV instruction.
"""
return self.build(ReilMnemonic.DIV, src1, src2, dst)
def gen_mod(self, src1, src2, dst):
"""Return a MOD instruction.
"""
return self.build(ReilMnemonic.MOD, src1, src2, dst)
def gen_bsh(self, src1, src2, dst):
"""Return a BSH instruction.
"""
return self.build(ReilMnemonic.BSH, src1, src2, dst)
# Bitwise Instructions
# ======================================================================== #
def gen_and(self, src1, src2, dst):
"""Return an AND instruction.
"""
return self.build(ReilMnemonic.AND, src1, src2, dst)
def gen_or(self, src1, src2, dst):
"""Return an OR instruction.
"""
return self.build(ReilMnemonic.OR, src1, src2, dst)
def gen_xor(self, src1, src2, dst):
"""Return a XOR instruction.
"""
return self.build(ReilMnemonic.XOR, src1, src2, dst)
# Data Transfer Instructions
# ======================================================================== #
def gen_ldm(self, src, dst):
"""Return a LDM instruction.
"""
return self.build(ReilMnemonic.LDM, src, ReilEmptyOperand(), dst)
def gen_stm(self, src, dst):
"""Return a STM instruction.
"""
return self.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst)
def gen_str(self, src, dst):
"""Return a STR instruction.
"""
return self.build(ReilMnemonic.STR, src, ReilEmptyOperand(), dst)
# Conditional Instructions
# ======================================================================== #
def gen_bisz(self, src, dst):
"""Return a BISZ instruction.
"""
return self.build(ReilMnemonic.BISZ, src, ReilEmptyOperand(), dst)
def gen_jcc(self, src, dst):
"""Return a JCC instruction.
"""
return self.build(ReilMnemonic.JCC, src, ReilEmptyOperand(), dst)
# Other Instructions
# ======================================================================== #
def gen_unkn(self):
"""Return an UNKN instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.UNKN, empty_reg, empty_reg, empty_reg)
def gen_undef(self):
"""Return an UNDEF instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.UNDEF, empty_reg, empty_reg, empty_reg)
def gen_nop(self):
"""Return a NOP instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.NOP, empty_reg, empty_reg, empty_reg)
# Ad hoc Instructions
# ======================================================================== #
def gen_ret(self):
"""Return a RET instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.RET, empty_reg, empty_reg, empty_reg)
# Extensions
# ======================================================================== #
def gen_sext(self, src, dst):
"""Return a SEXT instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.SEXT, src, empty_reg, dst)
def gen_sdiv(self, src1, src2, dst):
"""Return a SDIV instruction.
"""
return self.build(ReilMnemonic.SDIV, src1, src2, dst)
def gen_smod(self, src1, src2, dst):
"""Return a SMOD instruction.
"""
return self.build(ReilMnemonic.SMOD, src1, src2, dst)
# Auxiliary functions
# ======================================================================== #
def build(self, mnemonic, oprnd1, oprnd2, oprnd3):
"""Return the specified instruction.
"""
ins = ReilInstruction()
ins.mnemonic = mnemonic
ins.operands = [oprnd1, oprnd2, oprnd3]
return ins
class DualInstruction(object):
"""Represents an assembler instruction paired with its IR
representation.
"""
__slots__ = [
'_address',
'_asm_instr',
'_ir_instrs',
]
def __init__(self, address, asm_instr, ir_instrs):
# Address of the assembler instruction.
self._address = address
# Assembler instruction.
self._asm_instr = asm_instr
# REIL translation of the assembler instruction. Note that one
# assembler instruction is mapped to more than one REIL
# instruction.
self._ir_instrs = ir_instrs
@property
def address(self):
"""Get instruction address.
"""
return self._address
@property
def asm_instr(self):
"""Get assembly instruction.
"""
return self._asm_instr
@property
def ir_instrs(self):
"""Get IR representation of the assembly instruction.
"""
return self._ir_instrs
def __eq__(self, other):
return self.address == other.address and \
self.asm_instr == other.asm_instr
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {
'_address': self._address,
'_asm_instr': self._asm_instr,
'_ir_instrs': self._ir_instrs
}
return state
def __setstate__(self, state):
self._address = state['_address']
self._asm_instr = state['_asm_instr']
self._ir_instrs = state['_ir_instrs']
class ReilSequence(object):
"""Reil instruction sequence.
"""
def __init__(self):
self.__sequence = []
self.__next_seq_address = None
def append(self, instruction):
self.__sequence.append(instruction)
def get(self, index):
return self.__sequence[index]
def dump(self):
for instr in self.__sequence:
base_addr, index = split_address(instr.address)
print("{:08x}:{:02x}\t{}".format(base_addr, index, instr))
@property
def address(self):
return self.__sequence[0].address if self.__sequence else None
@property
def next_sequence_address(self):
return self.__next_seq_address
@next_sequence_address.setter
def next_sequence_address(self, address):
self.__next_seq_address = address
def __len__(self):
return len(self.__sequence)
def __iter__(self):
for instr in self.__sequence:
yield instr
class ReilContainerInvalidAddressError(Exception):
pass
class ReilContainer(object):
"""Reil instruction container.
"""
def __init__(self):
self.__container = {}
def add(self, sequence):
base_addr, _ = split_address(sequence.address)
if base_addr in self.__container.keys():
raise Exception("Invalid sequence")
self.__container[base_addr] = sequence
def fetch(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise ReilContainerInvalidAddressError()
return self.__container[base_addr].get(index)
def get_next_address(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise Exception("Invalid address.")
addr = address
if index < len(self.__container[base_addr]) - 1:
addr += 1
else:
addr = self.__container[base_addr].next_sequence_address
return addr
def dump(self):
for base_addr in sorted(self.__container.keys()):
self.__container[base_addr].dump()
print("-" * 80)
def __iter__(self):
for addr in sorted(self.__container.keys()):
for instr in self.__container[addr]:
yield instr
def check_operands_size(instr, arch_size):
"""Enforce operands' size."""
if instr.mnemonic in [ ReilMnemonic.ADD, ReilMnemonic.SUB,
ReilMnemonic.MUL, ReilMnemonic.DIV,
ReilMnemonic.MOD, ReilMnemonic.BSH,
ReilMnemonic.AND, ReilMnemonic.OR,
ReilMnemonic.XOR]:
# operand0 : Source 1 (Literal or register)
# operand1 : Source 2 (Literal or register)
# operand2 : Destination register
# Check that source operands have the same size.
assert instr.operands[0].size == instr.operands[1].size, \
"Invalid operands size: %s" % instr
elif instr.mnemonic in [ReilMnemonic.LDM]:
# operand0 : Source address (Literal or register)
# operand1 : Empty register
# operand2 : Destination register
assert instr.operands[0].size == arch_size, \
"Invalid operands size: %s" % instr
elif instr.mnemonic in [ReilMnemonic.STM]:
# operand0 : Value to store (Literal or register)
# operand1 : Empty register
# operand2 : Destination address (Literal or register)
assert instr.operands[2].size == arch_size, \
"Invalid operands size: %s" % instr
elif instr.mnemonic in [ReilMnemonic.STR]:
# operand0 : Value to store (Literal or register)
# operand1 : Empty register
# operand2 : Destination register
pass
elif instr.mnemonic in [ReilMnemonic.BISZ]:
# operand0 : Value to compare (Literal or register)
# operand1 : Empty register
# operand2 : Destination register
pass
elif instr.mnemonic in [ReilMnemonic.JCC]:
# operand0 : Condition (Literal or register)
# operand1 : Empty register
# operand2 : Destination register
# FIX: operand2.size should be arch_size + 1 byte
assert instr.operands[2].size == arch_size + 8, \
"Invalid operands size: %s" % instr
pass
elif instr.mnemonic in [ReilMnemonic.UNKN]:
# operand0 : Empty register
# operand1 : Empty register
# operand2 : Empty register
pass
elif instr.mnemonic in [ReilMnemonic.UNDEF]:
# operand0 : Empty register
# operand1 : Empty register
# operand2 : Destination register
pass
elif instr.mnemonic in [ReilMnemonic.NOP]:
# operand0 : Empty register
# operand1 : Empty register
# operand2 : Empty register
pass
elif instr.mnemonic in [ReilMnemonic.SEXT]:
# operand0 : Value to store (Literal or register)
# operand1 : Empty register
# operand2 : Destination register
assert instr.operands[0].size <= instr.operands[2].size, \
"Invalid operands size: %s" % instr
| chubbymaggie/barf-project | barf/core/reil/reil.py | Python | bsd-2-clause | 24,142 |
# =============================================================================
# plugin
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# # Author: Klaudiusz Staniek
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from csmpe.plugins import CSMPlugin
class Plugin(CSMPlugin):
"""This plugin checks the configuration filesystem"""
name = "Config Filesystem Check Plugin"
platforms = {'ASR9K', 'CRS', 'NCS1K', 'NCS4K', 'NCS5K', 'NCS5500', 'NCS6K', 'IOS-XRv'}
phases = {'Pre-Upgrade', "Pre-Activate", "Pre-Deactivate"}
def run(self):
"""
RP/0/RSP0/CPU0:R3#cfs check
Tue May 17 09:56:43.720 UTC
Creating any missing directories in Configuration File system...OK
Initializing Configuration Version Manager...OK
Syncing commit database with running configuration...OK
"""
ok = 0
message = []
output = self.ctx.send("cfs check", timeout=300)
lines = output.split("\n", 50)
for line in lines:
if line != "":
message.append(line)
if 'OK' in line:
ok += 1
for line in message:
if ok < 3:
self.ctx.warning(line)
else:
self.ctx.info(line)
if ok < 3:
self.ctx.error("The configuration filesystem has inconsistencies")
else:
self.ctx.info("Configuration filesystem is consistent")
| anushreejangid/csmpe-main | csmpe/core_plugins/csm_check_config_filesystem/ios_xr/plugin.py | Python | bsd-2-clause | 2,764 |
import numpy as np
import copy
## extension of numpy.outer() to unlimited number of vectors,
## result[i,j,k] = v1[i] * v2[j] * v3[k] ##
def TensorOuter(vector_list):
L = len(vector_list)
dim = [vector_list[i].shape[0] for i in range(L)]
result = copy.deepcopy(vector_list[0])
previous_size = 1
for i in range(1, L):
previous_size = previous_size * dim[i-1]
result = np.outer(result,vector_list[i])
result = result.reshape(previous_size * dim[i])
result = result.reshape(dim)
return result
def TensorOuterFull(array_list):
L = len(array_list)
shapes = [array_list[i].shape for i in range(L)]
sizes_flat = [_prod(shapes[i]) for i in range(L)]
result = copy.deepcopy(array_list[0].reshape(sizes_flat[0]))
previous_size = 1
for i in range(1,L):
previous_size = previous_size * sizes_flat[i-1]
result = np.outer(result, array_list[i].reshape(sizes_flat[i])).reshape(previous_size * sizes_flat[i])
return result.reshape(_concatenate(shapes))
def _concatenate(array_list):
result = np.array([],dtype=np.int64)
for array in array_list:
result = np.concatenate((result, array), axis=0)
return result
def _prod(array):
result = 1
for value in array:
result *= value
return result
## extention of numpy.multiply (c[i,j] = a[i,j]*b[j]) to c[i,j] = a[i,j]*b[i] ##
def transMultiply(a, b):
if a.shape[0] != b.shape[0]:
raise ValueError("the first axis does not match")
return np.transpose(np.multiply(np.transpose(a), b))
if __name__ == "__main__":
a = np.arange(6).reshape([2, 3])
b = np.arange(4) * 1.1
c = np.arange(5) * 2.0
print TensorOuter([np.arange(2)+1, np.arange(3)+10, np.arange(4)+100])
print TensorOuterFull([np.arange(2)+1, np.arange(3)+10, np.arange(4)+100])
print TensorOuterFull([b,a,c])
# print transMultiply(np.arange(6).reshape([2,3]), np.arange(2))
| JasonLC506/CollaborativeFiltering | matrixTool.py | Python | mit | 1,944 |
import re
from rdp.symbols import to_symbol
from rdp.tokenizer import Token
from rdp.symbols import Marker
INDENT = Marker('INDENT')
INDENT.num = +1
DEDENT = Marker('DEDENT')
DEDENT.num = -1
NEWLINE = Marker('NEWLINE')
_space_re = re.compile(r'^[ \t]*')
def get_indention(s, tabsize=4):
space = _space_re.match(s).group(0)
return space.count(' ') + space.count('\t') * tabsize
def indent(opening=(), closing=(), tabsize=4, yield_newlines=False):
nesting_map = {}
for symbol in opening:
nesting_map[to_symbol(symbol)] = +1
for symbol in closing:
nesting_map[to_symbol(symbol)] = -1
def tokenize(tokens):
indention = [0]
last_token = None
depth = 0
for token in tokens:
last_token = token
depth += nesting_map.get(token.symbol, 0)
newline_index = token.lexeme.find('\n')
if depth or newline_index == -1:
yield token
continue
before, after = token.split(newline_index + 1)
indent = get_indention(after.lexeme, tabsize=tabsize)
if indent == indention[-1]:
if yield_newlines:
yield Token(NEWLINE, "", token.start)
yield token
continue
yield before
if indent > indention[-1]:
indention.append(indent)
yield Token(INDENT, "", after.start)
else:
while indention[-1] > indent:
yield Token(DEDENT, "", after.start)
indention.pop()
if indention[-1] != indent:
raise TokenizeError("unexpected indention level")
if after:
yield after
if last_token:
pos = last_token.end
while indention[-1] != 0:
yield Token(DEDENT, "", pos)
indention.pop()
return tokenize
| emulbreh/rdp | rdp/indention.py | Python | mit | 1,966 |
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from opencensus.common import utils
from opencensus.trace import attributes
_SpanData = collections.namedtuple(
'_SpanData',
(
'name',
'context',
'span_id',
'parent_span_id',
'attributes',
'start_time',
'end_time',
'child_span_count',
'stack_trace',
'annotations',
'message_events',
'links',
'status',
'same_process_as_parent_span',
'span_kind',
),
)
class SpanData(_SpanData):
"""Immutable representation of all data collected by a
:class: `~opencensus.trace.span.Span`.
:type name: str
:param name: The name of the span.
:type: context: :class: `~opencensus.trace.span_context.SpanContext`
:param context: The SpanContext of the Span
:type span_id: int
:param span_id: Identifier for the span, unique within a trace.
:type parent_span_id: int
:param parent_span_id: (Optional) Parent span id.
:type attributes: dict
:param attributes: Collection of attributes associated with the span.
:type start_time: str
:param start_time: (Optional) Start of the time interval (inclusive)
during which the trace data was collected from the
application.
:type end_time: str
:param end_time: (Optional) End of the time interval (inclusive) during
which the trace data was collected from the application.
:type child_span_count: int
:param child_span_count: the number of child spans that were
generated while the span was active.
:type stack_trace: :class: `~opencensus.trace.stack_trace.StackTrace`
:param stack_trace: (Optional) A call stack appearing in a trace
:type annotations: list(:class:`opencensus.trace.time_event.Annotation`)
:param annotations: (Optional) The list of span annotations.
:type message_events:
list(:class:`opencensus.trace.time_event.MessageEvent`)
:param message_events: (Optional) The list of span message events.
:type links: list
:param links: (Optional) Links associated with the span. You can have up
to 128 links per Span.
:type status: :class: `~opencensus.trace.status.Status`
:param status: (Optional) An optional final status for this span.
:type same_process_as_parent_span: bool
:param same_process_as_parent_span: (Optional) A highly recommended but not
required flag that identifies when a
trace crosses a process boundary.
True when the parent_span belongs to
the same process as the current span.
:type span_kind: int
:param span_kind: (Optional) Highly recommended flag that denotes the type
of span (valid values defined by :class:
`opencensus.trace.span.SpanKind`)
"""
__slots__ = ()
def _format_legacy_span_json(span_data):
"""
:param SpanData span_data: SpanData object to convert
:rtype: dict
:return: Dictionary representing the Span
"""
span_json = {
'displayName': utils.get_truncatable_str(span_data.name),
'spanId': span_data.span_id,
'startTime': span_data.start_time,
'endTime': span_data.end_time,
'childSpanCount': span_data.child_span_count,
'kind': span_data.span_kind
}
if span_data.parent_span_id is not None:
span_json['parentSpanId'] = span_data.parent_span_id
if span_data.attributes:
span_json['attributes'] = attributes.Attributes(
span_data.attributes).format_attributes_json()
if span_data.stack_trace is not None:
span_json['stackTrace'] = \
span_data.stack_trace.format_stack_trace_json()
formatted_time_events = []
if span_data.annotations:
formatted_time_events.extend(
{'time': aa.timestamp,
'annotation': aa.format_annotation_json()}
for aa in span_data.annotations)
if span_data.message_events:
formatted_time_events.extend(
{'time': aa.timestamp,
'message_event': aa.format_message_event_json()}
for aa in span_data.message_events)
if formatted_time_events:
span_json['timeEvents'] = {
'timeEvent': formatted_time_events
}
if span_data.links:
span_json['links'] = {
'link': [
link.format_link_json() for link in span_data.links]
}
if span_data.status is not None:
span_json['status'] = span_data.status.format_status_json()
if span_data.same_process_as_parent_span is not None:
span_json['sameProcessAsParentSpan'] = \
span_data.same_process_as_parent_span
return span_json
def format_legacy_trace_json(span_datas):
"""Formats a list of SpanData tuples into the legacy 'trace' dictionary
format for backwards compatibility
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
:rtype: dict
:return: Legacy 'trace' dictionary representing given SpanData tuples
"""
if not span_datas:
return {}
top_span = span_datas[0]
assert isinstance(top_span, SpanData)
trace_id = top_span.context.trace_id if top_span.context is not None \
else None
assert trace_id is not None
return {
'traceId': trace_id,
'spans': [_format_legacy_span_json(sd) for sd in span_datas],
}
| census-instrumentation/opencensus-python | opencensus/trace/span_data.py | Python | apache-2.0 | 6,330 |
__author__ = 'OmerMahgoub'
#!/usr/bin/env python
import os
import yaml
class StackSettings:
def ServiceSettings(self,ServiceName):
basepath = os.path.dirname(__file__)
keypairs = os.path.abspath(os.path.join(basepath, "config.yml"))
with open(keypairs,'r') as f:
doc = yaml.load(f)
cfgServiceSettings = doc[ServiceName]
return cfgServiceSettings
| omermahgoub/MigTool | settings/settings.py | Python | gpl-3.0 | 424 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .rigolDS2000A import *
class rigolDS2202A(rigolDS2000A):
"Rigol DS2202A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DS2202A')
super(rigolDS2202A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 200e6
self._bandwidth_limit = {'20M': 20e6, '100M': 100e6}
self._init_channels()
| alexforencich/python-ivi | ivi/rigol/rigolDS2202A.py | Python | mit | 1,683 |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class CBSIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cbs\.com/shows/[^/]+/(?:video|artist)/(?P<id>[^/]+)/.*'
_TESTS = [{
'url': 'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
'info_dict': {
'id': '4JUVEwq3wUT7',
'ext': 'flv',
'title': 'Connect Chat feat. Garth Brooks',
'description': 'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
'duration': 1495,
},
'params': {
# rtmp download
'skip_download': True,
},
'_skip': 'Blocked outside the US',
}, {
'url': 'http://www.cbs.com/shows/liveonletterman/artist/221752/st-vincent/',
'info_dict': {
'id': 'WWF_5KqY3PK1',
'ext': 'flv',
'title': 'Live on Letterman - St. Vincent',
'description': 'Live On Letterman: St. Vincent in concert from New York\'s Ed Sullivan Theater on Tuesday, July 16, 2014.',
'duration': 3221,
},
'params': {
# rtmp download
'skip_download': True,
},
'_skip': 'Blocked outside the US',
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
real_id = self._search_regex(
r"video\.settings\.pid\s*=\s*'([^']+)';",
webpage, 'real video ID')
return self.url_result(u'theplatform:%s' % real_id)
| kthordarson/youtube-dl-ruv | youtube_dl/extractor/cbs.py | Python | unlicense | 1,813 |
"""
GitNotify - Push/Commit notification script
Copyright (C) 2013 Manuel Peuster <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import git
import logging
import os
import time
class GitRepository(object):
'''
Represents a git repository.
Implements typical git functions like pull or clone.
'''
def __init__(self, repo_name, repo_remote_url, repo_base_path="/tmp"):
'''
Init with remote address etc.
'''
self.repo_name = repo_name
self.repo_remote_url = repo_remote_url
self.repo_base_path = repo_base_path
def __open_repository(self):
'''
Returns a connection to the repository.
'''
try:
logging.debug("Open repository: %s", self.repo_name)
return git.Repo(self.repo_base_path + "/" + self.repo_name)
except:
logging.exception("Can not open repository: %s", self.repo_name)
def clone(self):
'''
Clones a remote repository to the local machine.
'''
if not os.path.exists(self.repo_base_path + "/" + self.repo_name):
try:
logging.info("Cloning repository: %s", self.repo_remote_url)
git.Repo.clone_from(self.repo_remote_url,
self.repo_base_path + "/" + self.repo_name)
except:
logging.exception("Can not clone repository: %s",
self.repo_remote_url)
def pull(self):
'''
Pulls latest version from a remote repository.
'''
r = self.__open_repository()
try:
logging.info("Pull from repository: %s", self.repo_name)
r.remotes.origin.pull()
except AssertionError:
pass
except:
logging.exception("Can not pull from repository: %s",
self.repo_name)
def get_commits(self, branch="master", limit=20):
'''
Returns a list of commits.
List length defined by limit.
'''
r = self.__open_repository()
result = []
for c in r.iter_commits(branch, max_count=limit):
result.append(GitCommit.create(c, self.repo_name))
return result
class GitCommit(object):
'''
Represents a single git commit.
Contains names, hashes, and commit message.
'''
def __init__(self):
'''
Init.
'''
self.repo_name = None
self.hexsha = None
self.author = None
self.authored_date = None
self.committer = None
self.committed_date = None
self.message = None
def __repr__(self):
'''
Returns nice string version of commit.
'''
return "Commit(%s:%s, %s, %s, %s, %s, %s)" % (self.repo_name,
self.hexsha,
self.author,
self.authored_date,
self.committer,
self.committed_date,
self.message)
@staticmethod
def create(source, repo_name):
'''
Class function to create a new commit object from a result
of the git library.
'''
c = GitCommit()
c.repo_name = repo_name
c.hexsha = source.hexsha
c.author = str(source.author)
c.authored_date = time.strftime("%Y-%m-%d %H:%M",
time.gmtime(source.authored_date))
c.committer = str(source.committer)
c.committed_date = time.strftime("%Y-%m-%d %H:%M",
time.gmtime(source.committed_date))
c.message = str(source.message)
logging.debug("Created: " + str(c))
return c
class CommitHistory(object):
'''
Handles the already notified commits.
'''
def __init__(self, path="history.dat"):
'''
Init.
'''
self.commit_history = []
self.path = path
self.__load_from_file()
def __load_from_file(self):
'''
Loads the commit history from a file.
'''
try:
logging.info("Loading commit history file: %s", self.path)
f = open(self.path, "a+")
f.seek(0, 0)
for line in f:
if ':' in line:
self.commit_history.append(line.strip())
f.close()
except:
logging.exception("Can not open commit history file: %s",
self.path)
def filter_commits_to_notify(self, commit_list):
'''
Removes all commits from commit list which are already in the
commit history file.
'''
logging.info("Filtering commit list for not yet notified commits")
result = []
for c in commit_list:
if not str("%s:%s" % (c.repo_name, c.hexsha)) \
in self.commit_history:
result.append(c)
return result
def add_notified_commits(self, commit_list):
'''
Add new commits to commit history file.
'''
try:
logging.info("Updating commit history file: %s", self.path)
f = open(self.path, "a")
for c in commit_list:
f.write(str("%s:%s\n" % (c.repo_name, c.hexsha)))
f.close()
except:
logging.exception("Can not update commit history file: %s",
self.path)
| mpeuster/git-notify | src/repository.py | Python | gpl-3.0 | 6,343 |
"""
kombu.transport.zmq
===================
ZeroMQ transport.
"""
from __future__ import absolute_import, unicode_literals
import errno
import os
import socket
try:
import zmq
from zmq import ZMQError
except ImportError:
zmq = ZMQError = None # noqa
from kombu.five import Empty
from kombu.log import get_logger
from kombu.serialization import pickle
from kombu.utils import cached_property
from kombu.utils.eventio import poll, READ
from . import virtual
logger = get_logger('kombu.transport.zmq')
DEFAULT_PORT = 5555
DEFAULT_HWM = 128
DEFAULT_INCR = 1
dumps, loads = pickle.dumps, pickle.loads
class MultiChannelPoller(object):
eventflags = READ
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map
self._fd_to_chan = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
def close(self):
for fd in self._fd_to_chan:
try:
self.poller.unregister(fd)
except KeyError:
pass
self._channels.clear()
self._fd_to_chan.clear()
self.poller = None
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
self._fd_to_chan.pop(channel.client.connection.fd, None)
def _register(self, channel):
conn = channel.client.connection
self._fd_to_chan[conn.fd] = channel
self.poller.register(conn.fd, self.eventflags)
def on_poll_start(self):
for channel in self._channels:
self._register(channel)
def on_readable(self, fileno):
chan = self._fd_to_chan[fileno]
return chan.drain_events(), chan
def get(self, timeout=None):
self.on_poll_start()
events = self.poller.poll(timeout)
for fileno, _ in events or []:
return self.on_readable(fileno)
raise Empty()
@property
def fds(self):
return self._fd_to_chan
class Client(object):
def __init__(self, uri='tcp://127.0.0.1', port=DEFAULT_PORT,
hwm=DEFAULT_HWM, swap_size=None, enable_sink=True,
context=None):
try:
scheme, parts = uri.split('://')
except ValueError:
scheme = 'tcp'
parts = uri
endpoints = parts.split(';')
self.port = port
if scheme != 'tcp':
raise NotImplementedError('Currently only TCP can be used')
self.context = context or zmq.Context.instance()
if enable_sink:
self.sink = self.context.socket(zmq.PULL)
self.sink.bind('tcp://*:{0.port}'.format(self))
else:
self.sink = None
self.vent = self.context.socket(zmq.PUSH)
if hasattr(zmq, 'SNDHWM'):
self.vent.setsockopt(zmq.SNDHWM, hwm)
else:
self.vent.setsockopt(zmq.HWM, hwm)
if swap_size:
self.vent.setsockopt(zmq.SWAP, swap_size)
for endpoint in endpoints:
if scheme == 'tcp' and ':' not in endpoint:
endpoint += ':' + str(DEFAULT_PORT)
endpoint = ''.join([scheme, '://', endpoint])
self.connect(endpoint)
def connect(self, endpoint):
self.vent.connect(endpoint)
def get(self, queue=None, timeout=None):
sink = self.sink
try:
if timeout is not None:
prev_timeout, sink.RCVTIMEO = sink.RCVTIMEO, timeout
try:
return sink.recv()
finally:
sink.RCVTIMEO = prev_timeout
else:
return sink.recv()
except ZMQError as exc:
if exc.errno == zmq.EAGAIN:
raise socket.error(errno.EAGAIN, exc.strerror)
else:
raise
def put(self, queue, message, **kwargs):
return self.vent.send(message)
def close(self):
if self.sink and not self.sink.closed:
self.sink.close()
if not self.vent.closed:
self.vent.close()
@property
def connection(self):
if self.sink:
return self.sink
return self.vent
class Channel(virtual.Channel):
Client = Client
hwm = DEFAULT_HWM
swap_size = None
enable_sink = True
port_incr = DEFAULT_INCR
from_transport_options = (
virtual.Channel.from_transport_options +
('hwm', 'swap_size', 'enable_sink', 'port_incr')
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
# Evaluate socket
self.client.connection.closed
self.connection.cycle.add(self)
self.connection_errors = self.connection.connection_errors
def _get(self, queue, timeout=None):
try:
return loads(self.client.get(queue, timeout))
except socket.error as exc:
if exc.errno == errno.EAGAIN and timeout != 0:
raise Empty()
else:
raise
def _put(self, queue, message, **kwargs):
self.client.put(queue, dumps(message, -1), **kwargs)
def _purge(self, queue):
return 0
def _poll(self, cycle, timeout=None):
return cycle.get(timeout=timeout)
def close(self):
if not self.closed:
self.connection.cycle.discard(self)
try:
self.__dict__['client'].close()
except KeyError:
pass
super(Channel, self).close()
def _prepare_port(self, port):
return (port + self.channel_id - 1) * self.port_incr
def _create_client(self):
conninfo = self.connection.client
port = self._prepare_port(conninfo.port or DEFAULT_PORT)
return self.Client(uri=conninfo.hostname or 'tcp://127.0.0.1',
port=port,
hwm=self.hwm,
swap_size=self.swap_size,
enable_sink=self.enable_sink,
context=self.connection.context)
@cached_property
def client(self):
return self._create_client()
class Transport(virtual.Transport):
Channel = Channel
can_parse_url = True
default_port = DEFAULT_PORT
driver_type = 'zeromq'
driver_name = 'zmq'
connection_errors = virtual.Transport.connection_errors + (ZMQError,)
implements = virtual.Transport.implements.extend(
async=True,
)
polling_interval = None
def __init__(self, *args, **kwargs):
if zmq is None:
raise ImportError('The zmq library is not installed')
super(Transport, self).__init__(*args, **kwargs)
self.cycle = MultiChannelPoller()
def driver_version(self):
return zmq.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.poller = loop.poller
add_reader = loop.add_reader
on_readable = self.on_readable
cycle_poll_start = cycle.on_poll_start
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
def on_readable(self, fileno):
self._handle_event(self.cycle.on_readable(fileno))
def drain_events(self, connection, timeout=None):
more_to_read = False
for channel in connection.channels:
try:
evt = channel.cycle.get(timeout=timeout)
except socket.error as exc:
if exc.errno == errno.EAGAIN:
continue
raise
else:
connection._handle_event((evt, channel))
more_to_read = True
if not more_to_read:
raise socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN))
def _handle_event(self, evt):
item, channel = evt
self._deliver(*item)
def establish_connection(self):
self.context.closed
return super(Transport, self).establish_connection()
def close_connection(self, connection):
super(Transport, self).close_connection(connection)
try:
connection.__dict__['context'].term()
except KeyError:
pass
@cached_property
def context(self):
return zmq.Context(1)
| Elastica/kombu | kombu/transport/zmq.py | Python | bsd-3-clause | 8,476 |
"""Gershwin URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import InterfaceCheck
import InterfaceManage
import TestData
import UserManage
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/interface/', include('InterfaceManage.urls')),
# url(r'^api/check', include('InterfaceCheck.urls')),
# url(r'^api/test', include('TestData.urls')),
# url(r'^api/auth', include('UserManage.urls')),
]
| kaiyangjia/Gershwin | Gershwin/urls.py | Python | apache-2.0 | 1,076 |
from geopy import Photon
from geopy.extra.rate_limiter import RateLimiter
class GeoCodePipeline(object):
def open_spider(self, spider):
geolocator = Photon(timeout=5)
self.__geocodeFunc = RateLimiter(geolocator.geocode, min_delay_seconds=2)
def process_item(self, item, spider):
for crime in item["crimes"]:
place = crime["place"]
latitude, longitude = self.__geocode_address(place)
crime["latitude"] = latitude
crime["longitude"] = longitude
return item
def __geocode_address(self, place):
if place is None:
return None, None
location = self.__geocodeFunc(place)
if location is not None:
return location.latitude, location.longitude
else:
return None, None
| aberklotz/crimereport | parser/crimeparser/pipelinesEnricher.py | Python | mit | 825 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Identifier(Resource):
"""Identifier.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:param identifier_id: ID.
:type identifier_id: str
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identifier_id': {'key': 'properties.id', 'type': 'str'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None, identifier_id=None):
super(Identifier, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.identifier_id = identifier_id
| SUSE/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/identifier.py | Python | mit | 1,780 |
model = """
# Schlogl model (Schlogl 1972, Chemical reaction models for nonequilibrium phase transitions)
FIX: A B
# Reactions
R1:
A + {2} X > {3} X
1/2*c1*A*X*(X-1)
R2:
{3} X > A + {2} X
1/6*c2*X*(X-1)*(X-2)
R3:
B > X
c3 * B
R4:
X > B
c4*X
# Fixed species
A = 100000
B = 200000
# Variable species
X = 250
c1 = 3*10**-7
c2 = 10**-4
c3 = 10**-3
c4 = 3.5
"""
| SystemsBioinformatics/stochpy | stochpy/pscmodels/Schlogl.py | Python | gpl-3.0 | 399 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Transpose op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests.gradient_checker import ComputeGradient
class TransposeTest(tf.test.TestCase):
def _np_transpose(self, x, perm):
ret = np.copy(x)
ret = ret.transpose(perm)
return ret
def _compareCpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=False):
inx = tf.convert_to_tensor(x)
y = tf.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on CPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compareGpu(self, x, p):
np_ans = self._np_transpose(x, p)
with self.test_session(use_gpu=True):
inx = tf.convert_to_tensor(x)
y = tf.transpose(inx, p)
tf_ans = y.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, y)
jacob_t = None
# Gradient check on GPU.
xs = list(np.shape(x))
ys = list(np.shape(tf_ans))
if x.dtype == np.float32:
jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-3, 1e-3)
elif x.dtype == np.float64:
jacob_t, jacob_n = ComputeGradient(inx, xs, y, ys, x, 1e-2)
self.assertAllClose(jacob_t, jacob_n, 1e-6, 1e-6)
return tf_ans, jacob_t
def _compare(self, x, use_gpu=False):
n = np.ndim(x)
# generate all permutations of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
for p in all_perm[0:2]:
self._compareCpu(x, p)
if use_gpu:
self._compareGpu(x, p)
def _compare_cpu_gpu(self, x):
n = np.ndim(x)
# generate all permutation of [0, 1, ... n-1] in random order.
all_perm = np.random.permutation(
[p for p in itertools.permutations(range(n))]).astype(np.int32)
for p in all_perm[0:2]:
tf_a_cpu, tf_g_cpu = self._compareCpu(x, p)
tf_a_gpu, tf_g_gpu = self._compareGpu(x, p)
assert tf_g_cpu is not None
assert tf_g_gpu is not None
if x.dtype == np.float32:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-3, 1e-3)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-3, 1e-3)
elif x.dtype == np.float64:
self.assertAllClose(tf_a_cpu, tf_a_gpu, 1e-6, 1e-6)
self.assertAllClose(tf_g_cpu, tf_g_gpu, 1e-6, 1e-6)
def _testCpu(self, x):
self._compare(x, use_gpu=False)
def test1D(self):
self._compareCpu(np.arange(0., 2), [0])
def testNop(self):
self._compareCpu(np.arange(0, 6).reshape([3, 2]).astype(np.float32), [0, 1])
def testSimple(self):
self._compareCpu(np.arange(0, 8).reshape([2, 4]).astype(np.float32),
np.array([1, 0]).astype(np.int32))
def testFloat(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float32))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float32))
def testDouble(self):
self._compare_cpu_gpu(np.arange(0, 21).reshape([3, 7]).astype(np.float64))
self._compare_cpu_gpu(
np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.float64))
def testSComplex(self):
self._testCpu(np.complex(1, 2) * np.arange(0, 21).reshape(
[3, 7]).astype(np.complex64))
self._testCpu(np.complex(1, 2) * np.arange(0, 210).reshape(
[2, 3, 5, 7]).astype(np.complex64))
def testInt8(self):
self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int8))
self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int8))
def testInt16(self):
self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int16))
self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int16))
def testInt32(self):
self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int32))
self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int32))
def testInt64(self):
self._testCpu(np.arange(0, 21).reshape([3, 7]).astype(np.int64))
self._testCpu(np.arange(0, 210).reshape([2, 3, 5, 7]).astype(np.int64))
def testTranspose2DAuto(self):
x_np = [[1, 2, 3], [4, 5, 6]]
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
x_tf = tf.transpose(x_np).eval()
self.assertAllEqual(x_tf, [[1, 4], [2, 5], [3, 6]])
def testTransposeShapes(self):
self.assertEqual([], tf.transpose(
tf.placeholder(tf.int32, shape=[])).get_shape().dims)
self.assertEqual([100], tf.transpose(
tf.placeholder(tf.int32, shape=[100])).get_shape().dims)
self.assertEqual([37, 100], tf.transpose(
tf.placeholder(tf.int32, shape=[100, 37])).get_shape().dims)
self.assertEqual([100, 37], tf.transpose(
tf.placeholder(tf.int32, shape=[100, 37]), [0, 1]).get_shape().dims)
self.assertEqual([15, 37, 100], tf.transpose(
tf.placeholder(tf.int32, shape=[100, 37, 15])).get_shape().dims)
self.assertEqual([15, 100, 37], tf.transpose(
tf.placeholder(tf.int32,
shape=[100, 37, 15]), [2, 0, 1]).get_shape().dims)
self.assertEqual(tf.TensorShape(None), tf.transpose(
tf.placeholder(tf.int32)).get_shape())
def _testError(self, x, p, err):
with self.test_session():
with self.assertRaisesOpError(err):
tf.transpose(x, p).eval()
def testError(self):
with self.assertRaises(ValueError):
tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [[0, 1], [2, 3]])
self._testError(np.arange(0., 2 ** 10).reshape([2] * 10),
np.arange(10),
"not implemented")
with self.assertRaises(IndexError):
tf.transpose(np.arange(0., 30).reshape([2, 3, 5]), [0, 1, 3])
self._testError(np.arange(0., 30).reshape([2, 3, 5]),
[0, 1, 1],
"2 is missing")
if __name__ == "__main__":
tf.test.main()
| MehdiSfr/tensor-flow | tensorflow/python/kernel_tests/transpose_op_test.py | Python | apache-2.0 | 7,217 |
# MIT License
# Copyright (c) 2017-9 Matthew Chen, Arc676/Alessandro Vinciguerra
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bot import CelestialBot
import asyncio
import re
class Io(CelestialBot):
callAndResponse = [
("I love you", "I know"),
("ping", "pong"),
("This is madness!", "Madness? **THIS IS SPARTA!**"),
("There isn't enough (.+)", "Where we're going, we don't need $(s)."),
("May the force be with you", "And also with you"),
("Is this Tony Stank\\?", "Yes, this is the Tony Stank"),
("where [ia][sr]e? .+\\?", "Somewhere, over the rainbow"),
("what is .+", "42"),
("Why did the chicken cross the road\?", "To get to the other side"),
("LICENSE", "MIT License at https://github.com/Arc676/Discord-Bots"),
("Watcha doin\\?", "Eatin' chocolate"),
("Whered'ya get it\\?", "A dog dropped it!"),
("This isn't your average, everyday (.+)", "This is **ADVANCED** $"),
("How many (.w+)", "ALL THE $"),
("thank.+\\Wio\\W|thank.+\\Wio$", "You are very welcome")
]
def __init__(self):
super().__init__("Io", color=0xFF9300)
self.active = True
self.defaultCmd = self.handle
self.handleEverything = True
self.buildHelp({
"go away" : "Deactivates Io until called back",
"come back" : "Reactivates Io",
"rtfm" : "Provides a link to the repository for the Discord API module",
"(assorted)" : "Io also responds to various movie quotes and assorted phrases."
})
self.about = "Hi, My name is Io, one of the Galilean moons of Jupiter! I was discovered by Galileo in a telescope he built, and am the 3rd most massive of Jupiter's 69 moons. One of my most notable feature is Tvashtar, a giant volcano."
async def handle(self, message, args):
"""Default message handler
Args:
message: Message object
args: Message content split by whitespace
"""
if not self.active:
if message.content.lower() == "io come back":
self.active = True
await self.reply(message, "I'm back", reply=True)
return
if message.content.lower() == self.name.lower():
await self.reply(message, "Hi there!", reply=True)
elif self.wasAddressed(message):
if message.content.endswith(" go away"): # Makes chatbot leave
self.active = False
await self.reply(message, "OK :(", reply=True)
elif message.content.endswith(" rtfm"):
await self.reply(message, "Follow your own advice: https://github.com/Rapptz/discord.py", reply=True)
else:
await self.reply(message, "Yes?", reply=True)
else: # Runs commands and performs call and response
response = self.getReply(message.content)
if response is not None:
await self.reply(message, response, reply=True)
def getReply(self, msg):
"""Gets the appropriate reply for a message
Args:
msg: Message text
Return:
A response to the message, if any
"""
# Loop through array and get the appropriate response to the call
for call, response in self.callAndResponse:
match = re.search(call, msg, re.I)
if match is not None:
if '(' in call:
response = response.replace("$", match.group(1))
return response
return None
if __name__ == "__main__":
bot = Io()
bot.run(bot.getToken())
| Arc676/ChatBot | bots/io.py | Python | mit | 4,166 |
r"""HTTP cookie handling for web clients.
This is a backport of the Py3.3 ``http.cookiejar`` module for
python-future.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import filter, int, map, open, str
from future.utils import as_native_str
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
import copy
import datetime
import re
re.ASCII = 0
import time
from future.backports.urllib.parse import urlparse, urlsplit, quote
from future.backports.http.client import HTTP_PORT
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("http.cookiejar")
return logger.debug(*args)
DEFAULT_HTTP_PORT = str(HTTP_PORT)
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import io, warnings, traceback
f = io.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None:
dt = datetime.datetime.utcnow()
else:
dt = datetime.datetime.utcfromtimestamp(t)
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII)
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X | re.ASCII)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X | re. ASCII)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, str)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def strip_quotes(text):
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
v = strip_quotes(v)
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
v = http2time(strip_quotes(v)) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$", re.ASCII)
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""Path component of request-URI, as defined by RFC 2965."""
url = request.get_full_url()
parts = urlsplit(url)
path = escape_path(parts.path)
if not path.startswith("/"):
# fix bad RFC 2396 absoluteURI
path = "/" + path
return path
def request_port(request):
host = request.host
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
path = quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.get_origin_req_host())):
return True
else:
return False
class Cookie(object):
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
@as_native_str()
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
### Python-Future:
# Avoid u'...' prefixes for unicode strings:
if isinstance(attr, str):
attr = str(attr)
###
args.append(str("%s=%s") % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy(object):
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.type != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = sorted(adict.keys())
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
for subobj in deepvalues(obj):
yield subobj
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent(object): pass
class CookieJar(object):
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib.request.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII)
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda a: len(a.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib.request.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
ns_hdrs = headers.get_all("Set-Cookie", [])
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
@as_native_str()
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
# derives from IOError for backwards-compatibility with Python 2.4.0
class LoadError(IOError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
try:
filename+""
except:
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
keys = sorted(cookie._rest.keys())
for k in keys:
h.append((k, str(cookie._rest[k])))
h.append(("version", str(cookie.version)))
return join_header_words([h])
class LWPCookieJar(FileCookieJar):
"""
The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
"Set-Cookie3" is the format used by the libwww-perl libary, not known
to be compatible with any browser, but which is easy to read and
doesn't lose information about RFC 2965 cookies.
Additional methods
as_lwp_str(ignore_discard=True, ignore_expired=True)
"""
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
"""
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""])
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
# There really isn't an LWP Cookies 2.0 format, but this indicates
# that there is extra information in here (domain_dot and
# port_spec) while still being compatible with libwww-perl, I hope.
f.write("#LWP-Cookies-2.0\n")
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
finally:
f.close()
def _really_load(self, f, filename, ignore_discard, ignore_expires):
magic = f.readline()
if not self.magic_re.search(magic):
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
"file" % filename)
raise LoadError(msg)
now = time.time()
header = "Set-Cookie3:"
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
"secure", "discard")
value_attrs = ("version",
"port", "path", "domain",
"expires",
"comment", "commenturl")
try:
while 1:
line = f.readline()
if line == "": break
if not line.startswith(header):
continue
line = line[len(header):].strip()
for data in split_header_words([line]):
name, value = data[0]
standard = {}
rest = {}
for k in boolean_attrs:
standard[k] = False
for k, v in data[1:]:
if k is not None:
lc = k.lower()
else:
lc = None
# don't lose case distinction for unknown fields
if (lc in value_attrs) or (lc in boolean_attrs):
k = lc
if k in boolean_attrs:
if v is None: v = True
standard[k] = v
elif k in value_attrs:
standard[k] = v
else:
rest[k] = v
h = standard.get
expires = h("expires")
discard = h("discard")
if expires is not None:
expires = iso2time(expires)
if expires is None:
discard = True
domain = h("domain")
domain_specified = domain.startswith(".")
c = Cookie(h("version"), name, value,
h("port"), h("port_spec"),
domain, domain_specified, h("domain_dot"),
h("path"), h("path_spec"),
h("secure"),
expires,
discard,
h("comment"),
h("commenturl"),
rest)
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
(filename, line))
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = re.compile("#( Netscape)? HTTP Cookie File")
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not self.magic_re.search(magic):
f.close()
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except IOError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
| hughperkins/kgsgo-dataset-preprocessor | thirdparty/future/src/future/backports/http/cookiejar.py | Python | mpl-2.0 | 76,542 |
'''OpenGL extension VERSION.GL_4_1
This module customises the behaviour of the
OpenGL.raw.GL.VERSION.GL_4_1 to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/VERSION/GL_4_1.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.VERSION.GL_4_1 import *
from OpenGL.raw.GL.VERSION.GL_4_1 import _EXTENSION_NAME
def glInitGl41VERSION():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glShaderBinary.binary size not checked against length
# INPUT glShaderBinary.shaders size not checked against count
glShaderBinary=wrapper.wrapper(glShaderBinary).setInputArraySize(
'binary', None
).setInputArraySize(
'shaders', None
)
glGetShaderPrecisionFormat=wrapper.wrapper(glGetShaderPrecisionFormat).setOutput(
'range',size=(2,),orPassIn=True
).setOutput(
'precision',size=(2,),orPassIn=True
)
glGetProgramBinary=wrapper.wrapper(glGetProgramBinary).setOutput(
'binary',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'binaryFormat',size=(1,),orPassIn=True
)
# INPUT glProgramBinary.binary size not checked against length
glProgramBinary=wrapper.wrapper(glProgramBinary).setInputArraySize(
'binary', None
)
# INPUT glCreateShaderProgramv.strings size not checked against count
glCreateShaderProgramv=wrapper.wrapper(glCreateShaderProgramv).setInputArraySize(
'strings', None
)
# INPUT glDeleteProgramPipelines.pipelines size not checked against n
glDeleteProgramPipelines=wrapper.wrapper(glDeleteProgramPipelines).setInputArraySize(
'pipelines', None
)
glGenProgramPipelines=wrapper.wrapper(glGenProgramPipelines).setOutput(
'pipelines',size=lambda x:(x,),pnameArg='n',orPassIn=True
)
glGetProgramPipelineiv=wrapper.wrapper(glGetProgramPipelineiv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glProgramUniform1iv=wrapper.wrapper(glProgramUniform1iv).setInputArraySize(
'value', 1
)
glProgramUniform1fv=wrapper.wrapper(glProgramUniform1fv).setInputArraySize(
'value', 1
)
glProgramUniform1dv=wrapper.wrapper(glProgramUniform1dv).setInputArraySize(
'value', 1
)
glProgramUniform1uiv=wrapper.wrapper(glProgramUniform1uiv).setInputArraySize(
'value', 1
)
glProgramUniform2iv=wrapper.wrapper(glProgramUniform2iv).setInputArraySize(
'value', 2
)
glProgramUniform2fv=wrapper.wrapper(glProgramUniform2fv).setInputArraySize(
'value', 2
)
glProgramUniform2dv=wrapper.wrapper(glProgramUniform2dv).setInputArraySize(
'value', 2
)
glProgramUniform2uiv=wrapper.wrapper(glProgramUniform2uiv).setInputArraySize(
'value', 2
)
glProgramUniform3iv=wrapper.wrapper(glProgramUniform3iv).setInputArraySize(
'value', 3
)
glProgramUniform3fv=wrapper.wrapper(glProgramUniform3fv).setInputArraySize(
'value', 3
)
glProgramUniform3dv=wrapper.wrapper(glProgramUniform3dv).setInputArraySize(
'value', 3
)
glProgramUniform3uiv=wrapper.wrapper(glProgramUniform3uiv).setInputArraySize(
'value', 3
)
glProgramUniform4iv=wrapper.wrapper(glProgramUniform4iv).setInputArraySize(
'value', 4
)
glProgramUniform4fv=wrapper.wrapper(glProgramUniform4fv).setInputArraySize(
'value', 4
)
glProgramUniform4dv=wrapper.wrapper(glProgramUniform4dv).setInputArraySize(
'value', 4
)
glProgramUniform4uiv=wrapper.wrapper(glProgramUniform4uiv).setInputArraySize(
'value', 4
)
glProgramUniformMatrix2fv=wrapper.wrapper(glProgramUniformMatrix2fv).setInputArraySize(
'value', 2
)
glProgramUniformMatrix3fv=wrapper.wrapper(glProgramUniformMatrix3fv).setInputArraySize(
'value', 3
)
glProgramUniformMatrix4fv=wrapper.wrapper(glProgramUniformMatrix4fv).setInputArraySize(
'value', 4
)
glProgramUniformMatrix2dv=wrapper.wrapper(glProgramUniformMatrix2dv).setInputArraySize(
'value', 2
)
glProgramUniformMatrix3dv=wrapper.wrapper(glProgramUniformMatrix3dv).setInputArraySize(
'value', 3
)
glProgramUniformMatrix4dv=wrapper.wrapper(glProgramUniformMatrix4dv).setInputArraySize(
'value', 4
)
# INPUT glProgramUniformMatrix2x3fv.value size not checked against count
glProgramUniformMatrix2x3fv=wrapper.wrapper(glProgramUniformMatrix2x3fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2fv.value size not checked against count
glProgramUniformMatrix3x2fv=wrapper.wrapper(glProgramUniformMatrix3x2fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4fv.value size not checked against count
glProgramUniformMatrix2x4fv=wrapper.wrapper(glProgramUniformMatrix2x4fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2fv.value size not checked against count
glProgramUniformMatrix4x2fv=wrapper.wrapper(glProgramUniformMatrix4x2fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4fv.value size not checked against count
glProgramUniformMatrix3x4fv=wrapper.wrapper(glProgramUniformMatrix3x4fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3fv.value size not checked against count
glProgramUniformMatrix4x3fv=wrapper.wrapper(glProgramUniformMatrix4x3fv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x3dv.value size not checked against count
glProgramUniformMatrix2x3dv=wrapper.wrapper(glProgramUniformMatrix2x3dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2dv.value size not checked against count
glProgramUniformMatrix3x2dv=wrapper.wrapper(glProgramUniformMatrix3x2dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4dv.value size not checked against count
glProgramUniformMatrix2x4dv=wrapper.wrapper(glProgramUniformMatrix2x4dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2dv.value size not checked against count
glProgramUniformMatrix4x2dv=wrapper.wrapper(glProgramUniformMatrix4x2dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4dv.value size not checked against count
glProgramUniformMatrix3x4dv=wrapper.wrapper(glProgramUniformMatrix3x4dv).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3dv.value size not checked against count
glProgramUniformMatrix4x3dv=wrapper.wrapper(glProgramUniformMatrix4x3dv).setInputArraySize(
'value', None
)
glGetProgramPipelineInfoLog=wrapper.wrapper(glGetProgramPipelineInfoLog).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'infoLog',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
glVertexAttribL1dv=wrapper.wrapper(glVertexAttribL1dv).setInputArraySize(
'v', 1
)
glVertexAttribL2dv=wrapper.wrapper(glVertexAttribL2dv).setInputArraySize(
'v', 2
)
glVertexAttribL3dv=wrapper.wrapper(glVertexAttribL3dv).setInputArraySize(
'v', 3
)
glVertexAttribL4dv=wrapper.wrapper(glVertexAttribL4dv).setInputArraySize(
'v', 4
)
# INPUT glVertexAttribLPointer.pointer size not checked against size
glVertexAttribLPointer=wrapper.wrapper(glVertexAttribLPointer).setInputArraySize(
'pointer', None
)
glGetVertexAttribLdv=wrapper.wrapper(glGetVertexAttribLdv).setOutput(
'params',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
# INPUT glViewportArrayv.v size not checked against 'count'
glViewportArrayv=wrapper.wrapper(glViewportArrayv).setInputArraySize(
'v', None
)
glViewportIndexedfv=wrapper.wrapper(glViewportIndexedfv).setInputArraySize(
'v', 4
)
# INPUT glScissorArrayv.v size not checked against 'count'
glScissorArrayv=wrapper.wrapper(glScissorArrayv).setInputArraySize(
'v', None
)
glScissorIndexedv=wrapper.wrapper(glScissorIndexedv).setInputArraySize(
'v', 4
)
# INPUT glDepthRangeArrayv.v size not checked against 'count'
glDepthRangeArrayv=wrapper.wrapper(glDepthRangeArrayv).setInputArraySize(
'v', None
)
glGetFloati_v=wrapper.wrapper(glGetFloati_v).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
glGetDoublei_v=wrapper.wrapper(glGetDoublei_v).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='target',orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL.GL.ARB.ES2_compatibility import *
from OpenGL.GL.ARB.get_program_binary import *
from OpenGL.GL.ARB.separate_shader_objects import *
from OpenGL.GL.ARB.shader_precision import *
from OpenGL.GL.ARB.vertex_attrib_64bit import *
from OpenGL.GL.ARB.viewport_array import *
| stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/VERSION/GL_4_1.py | Python | lgpl-3.0 | 8,574 |
import numpy
import math
def mkRamp(*args):
''' mkRamp(SIZE, DIRECTION, SLOPE, INTERCEPT, ORIGIN)
Compute a matrix of dimension SIZE (a [Y X] 2-vector, or a scalar)
containing samples of a ramp function, with given gradient DIRECTION
(radians, CW from X-axis, default = 0), SLOPE (per pixel, default =
1), and a value of INTERCEPT (default = 0) at the ORIGIN (default =
(size+1)/2, [1 1] = upper left). All but the first argument are
optional '''
if len(args) == 0:
print("mkRamp(SIZE, DIRECTION, SLOPE, INTERCEPT, ORIGIN)")
print("first argument is required")
exit(1)
else:
sz = args[0]
if isinstance(sz, (int)):
sz = (sz, sz)
elif not isinstance(sz, (tuple)):
print("first argument must be a two element tuple or an integer")
exit(1)
# OPTIONAL args:
if len(args) > 1:
direction = args[1]
else:
direction = 0
if len(args) > 2:
slope = args[2]
else:
slope = 1
if len(args) > 3:
intercept = args[3]
else:
intercept = 0
if len(args) > 4:
origin = args[4]
else:
origin = (float(sz[0] - 1) / 2.0, float(sz[1] - 1) / 2.0)
#--------------------------
xinc = slope * math.cos(direction)
yinc = slope * math.sin(direction)
[xramp, yramp] = numpy.meshgrid(xinc * (numpy.array(list(range(sz[1]))) - origin[1]),
yinc * (numpy.array(list(range(sz[0]))) - origin[0]))
res = intercept + xramp + yramp
return res
| tochikuji/pyPyrTools | pyrtools/mkRamp.py | Python | mit | 1,617 |
import pyjd # dummy in pyjs
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.TextArea import TextArea
from pyjamas.ui.Label import Label
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.ListBox import ListBox
from pyjamas.JSONService import JSONProxy
class JSONRPCExample:
def onModuleLoad(self):
self.TEXT_WAITING = "Waiting for response..."
self.TEXT_ERROR = "Server Error"
self.METHOD_ECHO = "Echo"
self.METHOD_REVERSE = "Reverse"
self.METHOD_UPPERCASE = "UPPERCASE"
self.METHOD_LOWERCASE = "lowercase"
self.METHOD_NONEXISTANT = "Non existant"
self.methods = [self.METHOD_ECHO, self.METHOD_REVERSE,
self.METHOD_UPPERCASE, self.METHOD_LOWERCASE,
self.METHOD_NONEXISTANT]
self.remote_php = EchoServicePHP()
self.remote_py = [
EchoServicePython(),
EchoServicePython(server="flask"),
]
self.status = Label()
self.text_area = TextArea()
self.text_area.setText("""{'Test'} [\"String\"]
\tTest Tab
Test Newline\n
after newline
""" + r"""Literal String:
{'Test'} [\"String\"]
""")
self.text_area.setCharacterWidth(80)
self.text_area.setVisibleLines(8)
self.method_list = ListBox()
self.method_list.setName("hello")
self.method_list.setVisibleItemCount(1)
for method in self.methods:
self.method_list.addItem(method)
self.method_list.setSelectedIndex(0)
method_panel = HorizontalPanel()
method_panel.add(HTML("Remote string method to call: "))
method_panel.add(self.method_list)
method_panel.setSpacing(8)
self.button_php = Button("Send to PHP Service", self)
python_buttons = [
Button("Send to Python Service", self),
Button("Send to Flask view function (localhost:5000)", self),
]
buttons = HorizontalPanel()
buttons.add(self.button_php)
self.python_buttons = {}
for i in range(len(python_buttons)):
buttons.add(python_buttons[i])
self.python_buttons[python_buttons[i]] = self.remote_py[i]
buttons.setSpacing(8)
info = """<h2>JSON-RPC Example</h2>
<p>This example demonstrates the calling of server services with
<a href="http://json-rpc.org/">JSON-RPC</a>.
</p>
<p>Enter some text below, and press a button to send the text
to an Echo service on your server. An echo service simply sends the exact same text back that it receives.
</p>"""
panel = VerticalPanel()
panel.add(HTML(info))
panel.add(self.text_area)
panel.add(method_panel)
panel.add(buttons)
panel.add(self.status)
RootPanel().add(panel)
def onClick(self, sender):
method = self.methods[self.method_list.getSelectedIndex()]
text = self.text_area.getText()
# demonstrate proxy & callMethod()
if sender == self.button_php:
if method == self.METHOD_ECHO:
id = self.remote_php.echo(text, self)
elif method == self.METHOD_REVERSE:
id = self.remote_php.callMethod("reverse", [text], self)
elif method == self.METHOD_UPPERCASE:
id = self.remote_php.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = self.remote_php.lowercase(self, msg=text)
elif method == self.METHOD_NONEXISTANT:
id = self.remote_php.nonexistant(text, self)
elif(sender in self.python_buttons):
remote_py = self.python_buttons[sender]
if method == self.METHOD_ECHO:
id = remote_py.echo(text, self)
elif method == self.METHOD_REVERSE:
id = remote_py.reverse(text, self)
elif method == self.METHOD_UPPERCASE:
id = remote_py.uppercase(text, self)
elif method == self.METHOD_LOWERCASE:
id = remote_py.lowercase(text, self)
elif method == self.METHOD_NONEXISTANT:
id = remote_py.nonexistant(text, self)
else:
self.status.setText(self.TEXT_WAITING + " unrecognized method")
# what should really be done here?
pass
def onRemoteResponse(self, response, request_info):
self.status.setText(response)
def onRemoteError(self, code, errobj, request_info):
# onRemoteError gets the HTTP error code or 0 and
# errobj is an jsonrpc 2.0 error dict:
# {
# 'code': jsonrpc-error-code (integer) ,
# 'message': jsonrpc-error-message (string) ,
# 'data' : extra-error-data
# }
message = errobj['message']
if code != 0:
self.status.setText("HTTP error %d: %s" %
(code, message))
else:
code = errobj['code']
self.status.setText("JSONRPC Error %s: %s" %
(code, message))
class EchoServicePHP(JSONProxy):
def __init__(self):
JSONProxy.__init__(self, "services/EchoService.php", ["echo", "reverse", "uppercase", "lowercase", "nonexistant"])
class EchoServicePython(JSONProxy):
def __init__(self, server="mod_python"):
methods = ["echo", "reverse", "uppercase", "lowercase", "nonexistant"]
if server == "mod_python":
JSONProxy.__init__(self, "services/EchoService.py", methods)
elif server == "flask":
JSONProxy.__init__(
self, "http://localhost:5000/json_echo/", methods)
if __name__ == '__main__':
# for pyjd, set up a web server and load the HTML from there:
# this convinces the browser engine that the AJAX will be loaded
# from the same URI base as the URL, it's all a bit messy...
pyjd.setup("http://127.0.0.1:8000/public/JSONRPCExample.html")
app = JSONRPCExample()
app.onModuleLoad()
pyjd.run()
| minghuascode/pyj | examples/misc/flaskexamples/flaskcors/Flask_JSONRPC_CORS.py | Python | apache-2.0 | 6,253 |
# Copyright (C) 2014 Red Hat, Inc., Bryn M. Reeves <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Ctdb(Plugin, DebianPlugin, UbuntuPlugin):
"""Samba Clustered TDB
"""
packages = ('ctdb',)
profiles = ('cluster', 'storage')
plugin_name = "ctdb"
def setup(self):
self.add_copy_spec([
"/etc/ctdb/public_addresses",
"/etc/ctdb/static-routes",
"/etc/ctdb/multipathd",
"/var/log/log.ctdb"
])
self.add_cmd_output([
"ctdb ip",
"ctdb ping",
"ctdb status",
"ctdb ifaces",
"ctdb listnodes",
"ctdb listvars",
"ctdb statistics",
"ctdb getdbmap"
])
class RedHatCtdb(Ctdb, RedHatPlugin):
def setup(self):
super(RedHatCtdb, self).setup()
self.add_copy_spec("/etc/sysconfig/ctdb")
# vim: et ts=4 sw=4
| goodwinos/sos | sos/plugins/ctdb.py | Python | gpl-2.0 | 1,642 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Script to evaluate model predictions against the ground truth."""
import glob
import os
from absl import app
from absl import flags
import numpy as np
from PIL import Image
import tensorflow.compat.v2 as tf
from dual_pixels.eval import get_metrics
flags.DEFINE_string('test_dir', 'test/', 'Path to test dataset.')
flags.DEFINE_string('prediction_dir', 'model_prediction/',
'Path to model predictions.')
FLAGS = flags.FLAGS
# Crop over which we do evaluation.
CROP_HEIGHT = 512
CROP_WIDTH = 384
def get_captures():
"""Gets a list of captures."""
depth_dir = os.path.join(FLAGS.test_dir, 'merged_depth')
return [
name for name in os.listdir(depth_dir)
if os.path.isdir(os.path.join(depth_dir, name))
]
def load_capture(capture_name):
"""Loads the ground truth depth, confidence and prediction for a capture."""
# Assume that we are loading the center capture.
# Load GT Depth.
depth_dir = os.path.join(FLAGS.test_dir, 'merged_depth')
gt_depth_path = glob.glob(
os.path.join(depth_dir, capture_name, '*_center.png'))[0]
gt_depth = Image.open(gt_depth_path)
gt_depth = np.asarray(gt_depth, dtype=np.float32) / 255.0
# Load GT Depth confidence.
depth_conf_dir = os.path.join(FLAGS.test_dir, 'merged_conf')
gt_depth_conf_path = glob.glob(
os.path.join(depth_conf_dir, capture_name, '*_center.npy'))[0]
gt_depth_conf = np.load(gt_depth_conf_path)
# Load prediction.
prediction_path = glob.glob(
os.path.join(FLAGS.prediction_dir, capture_name + '.npy'))[0]
prediction = np.load(prediction_path)
return prediction, gt_depth, gt_depth_conf
def main(argv):
del argv # Unused.
tf.enable_v2_behavior()
captures = get_captures()
loss_dict = {'wmae': [], 'wrmse': [], 'spearman': []}
for capture in captures:
print(capture)
pred, depth_gt, conf_gt = load_capture(capture)
losses = get_metrics.metrics(pred, depth_gt, conf_gt, CROP_HEIGHT,
CROP_WIDTH)
for loss_name, loss in loss_dict.items():
loss.append(losses[loss_name].numpy())
for loss_name, loss in loss_dict.items():
loss_dict[loss_name] = np.mean(loss)
print(loss_dict)
if __name__ == '__main__':
app.run(main)
| google-research/google-research | dual_pixels/eval/script.py | Python | apache-2.0 | 2,861 |
"""Support the ISY-994 controllers."""
import asyncio
from functools import partial
from typing import Optional
from urllib.parse import urlparse
from pyisy import ISY
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.typing import ConfigType
from .const import (
_LOGGER,
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DEFAULT_IGNORE_STRING,
DEFAULT_RESTORE_LIGHT_STATE,
DEFAULT_SENSOR_STRING,
DEFAULT_VAR_SENSOR_STRING,
DOMAIN,
ISY994_ISY,
ISY994_NODES,
ISY994_PROGRAMS,
ISY994_VARIABLES,
MANUFACTURER,
SUPPORTED_PLATFORMS,
SUPPORTED_PROGRAM_PLATFORMS,
UNDO_UPDATE_LISTENER,
)
from .helpers import _categorize_nodes, _categorize_programs, _categorize_variables
from .services import async_setup_services, async_unload_services
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.url,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TLS_VER): vol.Coerce(float),
vol.Optional(
CONF_IGNORE_STRING, default=DEFAULT_IGNORE_STRING
): cv.string,
vol.Optional(
CONF_SENSOR_STRING, default=DEFAULT_SENSOR_STRING
): cv.string,
vol.Optional(
CONF_VAR_SENSOR_STRING, default=DEFAULT_VAR_SENSOR_STRING
): cv.string,
vol.Required(
CONF_RESTORE_LIGHT_STATE, default=DEFAULT_RESTORE_LIGHT_STATE
): bool,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the isy994 integration from YAML."""
isy_config: Optional[ConfigType] = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not isy_config:
return True
# Only import if we haven't before.
config_entry = _async_find_matching_config_entry(hass)
if not config_entry:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=dict(isy_config),
)
)
return True
# Update the entry based on the YAML configuration, in case it changed.
hass.config_entries.async_update_entry(config_entry, data=dict(isy_config))
return True
@callback
def _async_find_matching_config_entry(hass):
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.source == config_entries.SOURCE_IMPORT:
return entry
async def async_setup_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Set up the ISY 994 integration."""
# As there currently is no way to import options from yaml
# when setting up a config entry, we fallback to adding
# the options to the config entry and pull them out here if
# they are missing from the options
_async_import_options_from_data_if_missing(hass, entry)
hass.data[DOMAIN][entry.entry_id] = {}
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
hass_isy_data[ISY994_NODES] = {}
for platform in SUPPORTED_PLATFORMS:
hass_isy_data[ISY994_NODES][platform] = []
hass_isy_data[ISY994_PROGRAMS] = {}
for platform in SUPPORTED_PROGRAM_PLATFORMS:
hass_isy_data[ISY994_PROGRAMS][platform] = []
hass_isy_data[ISY994_VARIABLES] = []
isy_config = entry.data
isy_options = entry.options
# Required
user = isy_config[CONF_USERNAME]
password = isy_config[CONF_PASSWORD]
host = urlparse(isy_config[CONF_HOST])
# Optional
tls_version = isy_config.get(CONF_TLS_VER)
ignore_identifier = isy_options.get(CONF_IGNORE_STRING, DEFAULT_IGNORE_STRING)
sensor_identifier = isy_options.get(CONF_SENSOR_STRING, DEFAULT_SENSOR_STRING)
variable_identifier = isy_options.get(
CONF_VAR_SENSOR_STRING, DEFAULT_VAR_SENSOR_STRING
)
if host.scheme == "http":
https = False
port = host.port or 80
elif host.scheme == "https":
https = True
port = host.port or 443
else:
_LOGGER.error("isy994 host value in configuration is invalid")
return False
# Connect to ISY controller.
isy = await hass.async_add_executor_job(
partial(
ISY,
host.hostname,
port,
username=user,
password=password,
use_https=https,
tls_ver=tls_version,
log=_LOGGER,
webroot=host.path,
)
)
if not isy.connected:
return False
_categorize_nodes(hass_isy_data, isy.nodes, ignore_identifier, sensor_identifier)
_categorize_programs(hass_isy_data, isy.programs)
_categorize_variables(hass_isy_data, isy.variables, variable_identifier)
# Dump ISY Clock Information. Future: Add ISY as sensor to Hass with attrs
_LOGGER.info(repr(isy.clock))
hass_isy_data[ISY994_ISY] = isy
await _async_get_or_create_isy_device_in_registry(hass, entry, isy)
# Load platforms for the devices in the ISY controller that we support.
for platform in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
def _start_auto_update() -> None:
"""Start isy auto update."""
_LOGGER.debug("ISY Starting Event Stream and automatic updates")
isy.auto_update = True
await hass.async_add_executor_job(_start_auto_update)
undo_listener = entry.add_update_listener(_async_update_listener)
hass_isy_data[UNDO_UPDATE_LISTENER] = undo_listener
# Register Integration-wide Services:
async_setup_services(hass)
return True
async def _async_update_listener(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
@callback
def _async_import_options_from_data_if_missing(
hass: HomeAssistant, entry: config_entries.ConfigEntry
):
options = dict(entry.options)
modified = False
for importable_option in [
CONF_IGNORE_STRING,
CONF_SENSOR_STRING,
CONF_RESTORE_LIGHT_STATE,
]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
async def _async_get_or_create_isy_device_in_registry(
hass: HomeAssistant, entry: config_entries.ConfigEntry, isy
) -> None:
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, isy.configuration["uuid"])},
identifiers={(DOMAIN, isy.configuration["uuid"])},
manufacturer=MANUFACTURER,
name=isy.configuration["name"],
model=isy.configuration["model"],
sw_version=isy.configuration["firmware"],
)
async def async_unload_entry(
hass: HomeAssistant, entry: config_entries.ConfigEntry
) -> bool:
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in SUPPORTED_PLATFORMS
]
)
)
hass_isy_data = hass.data[DOMAIN][entry.entry_id]
isy = hass_isy_data[ISY994_ISY]
def _stop_auto_update() -> None:
"""Start isy auto update."""
_LOGGER.debug("ISY Stopping Event Stream and automatic updates")
isy.auto_update = False
await hass.async_add_executor_job(_stop_auto_update)
hass_isy_data[UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
async_unload_services(hass)
return unload_ok
| tchellomello/home-assistant | homeassistant/components/isy994/__init__.py | Python | apache-2.0 | 8,442 |
#!/usr/bin/env python
from setuptools import setup, find_packages
import cleware
setup(
name='cleware',
version=cleware.__version__,
description='Python library to control Cleware products',
long_description='Python library to control Cleware products',
author='Roderick Baier',
author_email='[email protected]',
license='MIT',
url='https://github.com/rbaier/python-cleware',
packages=find_packages(exclude=['tests']),
install_requires=[
'pyusb'
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities'
]
)
| rbaier/python-cleware | setup.py | Python | mit | 756 |
#!/usr/bin/env python
# Dan Blankenberg
"""
A wrapper script for converting SAM to BAM, with sorting.
%prog input_filename.sam output_filename.bam
"""
import os
import sys
import optparse
import tempfile
import subprocess
import shutil
CHUNK_SIZE = 2 ** 20 # 1mb
def cleanup_before_exit( tmp_dir ):
if tmp_dir and os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
def __main__():
# Parse Command Line
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
assert len( args ) == 2, 'You must specify the input and output filenames'
input_filename, output_filename = args
tmp_dir = tempfile.mkdtemp( prefix='tmp-sam_to_bam_converter-' )
# convert to SAM
unsorted_bam_filename = os.path.join( tmp_dir, 'unsorted.bam' )
unsorted_stderr_filename = os.path.join( tmp_dir, 'unsorted.stderr' )
cmd = 'samtools view -bS "%s" > "%s"' % ( input_filename, unsorted_bam_filename )
proc = subprocess.Popen( args=cmd, stderr=open( unsorted_stderr_filename, 'wb' ), shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
stderr_target = sys.stdout
stderr = open( unsorted_stderr_filename )
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
# sort sam, so indexing will not fail
sorted_stderr_filename = os.path.join( tmp_dir, 'sorted.stderr' )
sorting_prefix = os.path.join( tmp_dir, 'sorted_bam' )
cmd = 'samtools sort -o "%s" "%s" > "%s"' % ( unsorted_bam_filename, sorting_prefix, output_filename )
proc = subprocess.Popen( args=cmd, stderr=open( sorted_stderr_filename, 'wb' ), shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
stderr_target = sys.stdout
stderr = open( sorted_stderr_filename )
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
cleanup_before_exit( tmp_dir )
if __name__ == "__main__":
__main__()
| icaoberg/cellorganizer-galaxy-tools | datatypes/converters/sam_to_bam.py | Python | gpl-3.0 | 2,215 |
from allura.command.base import Command
class BlogCommand(Command):
group_name = 'ForgeBlog'
| pombredanne/SourceForge-Allura | ForgeBlog/forgeblog/command/base.py | Python | apache-2.0 | 98 |
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| schets/scikit-learn | sklearn/neighbors/approximate.py | Python | bsd-3-clause | 22,278 |
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: Julio Serna ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import conf_account_partner | 3dfxsoftware/cbss-addons | configure_account_partner/wizard/__init__.py | Python | gpl-2.0 | 1,281 |
from distutils.core import setup
import os
setup(
name = 'cheat',
version = '2.0.9',
author = 'Chris Lane',
author_email = '[email protected]',
license = 'GPL3',
description = 'cheat allows you to create and view interactive cheatsheets '
'on the command-line. It was designed to help remind *nix system '
'administrators of options for commands that they use frequently, but not '
'frequently enough to remember.',
url = 'https://github.com/chrisallenlane/cheat',
packages = [
'cheat',
'cheat.cheatsheets',
'cheat.test',
],
package_data = {
'cheat.cheatsheets': [f for f in os.listdir('cheat/cheatsheets') if '.' not in f]
},
scripts = ['bin/cheat'],
install_requires = [
'docopt >= 0.6.1',
'pygments >= 1.6.0',
]
)
| laomaiweng/cheat | setup.py | Python | gpl-3.0 | 890 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class Replication(Resource):
"""An object that represents a replication for a container registry.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource. This cannot be changed
after the resource is created.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:ivar provisioning_state: The provisioning state of the replication at the
time the operation was called. Possible values include: 'Creating',
'Updating', 'Deleting', 'Succeeded', 'Failed', 'Canceled'
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2017_10_01.models.ProvisioningState
:ivar status: The status of the replication at the time the operation was
called.
:vartype status: ~azure.mgmt.containerregistry.v2017_10_01.models.Status
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'Status'},
}
def __init__(self, location, tags=None):
super(Replication, self).__init__(location=location, tags=tags)
self.provisioning_state = None
self.status = None
| lmazuel/azure-sdk-for-python | azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/models/replication.py | Python | mit | 2,473 |
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^api/', include('users.urls')),
url(r'^api/', include('reception_office.urls')),
url(r'^api/admin/', include(admin.site.urls)),
url(r'^api-token-auth/',
'rest_framework.authtoken.views.obtain_auth_token'),
url(r'^api/api-auth/', include(
'rest_framework.urls', namespace='rest_framework')),
url(r'^api/rest-auth/', include('rest_auth.urls')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
]
| vechnoe/clinic | src/urls.py | Python | mit | 596 |
#!/usr/bin/python
import boto.ec2
import time
import argparse
import sys
def print_error(msg):
print >> sys.stderr , "[Error]" , msg
def is_running(instance):
return instance != None and instance.state == "running"
def wait_for_instance(instance, timeout = 900):
start = time.time()
counter = 0
polling_interval = 15
while instance.state == "pending" and counter < timeout:
instance.update()
time.sleep(polling_interval)
counter += polling_interval
end = time.time()
return end - start
def connect_to_openstack(access_key, secret_key, endpoint):
region = boto.ec2.regioninfo.RegionInfo(name = "nova",
endpoint = endpoint)
connection = boto.connect_ec2(aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
is_secure=False,
region=region,
port=8773,
path="/services/Cloud")
return connection
def spawn_instance(connection, ami, key_name, flavor, userdata, max_retries = 5):
instance = None
time_backoff = 20
retries = 0
while not is_running(instance) and retries < max_retries:
instance = spawn_instance_on_openstack(connection,
ami,
key_name,
flavor,
userdata)
retries += 1
if not is_running(instance):
instance_id = "unknown"
instance_state = "unknown"
if instance != None:
instance_id = str(instance.id)
instance_state = str(instance.state)
kill_instance(connection, instance_id)
print_error("Failed spawning instance " + instance_id +
" (#: " + str(retries) + " | state: " + instance_state + ")")
time.sleep(time_backoff)
if not is_running(instance):
return None
return instance
def spawn_instance_on_openstack(connection, ami, key_name, flavor, userdata):
try:
reservation = connection.run_instances(ami,
key_name=key_name,
instance_type=flavor,
user_data=userdata)
if len(reservation.instances) != 1:
print_error("Failed to start instance (#: " + reservation.instances + ")")
return None
instance = reservation.instances[0]
if instance.state != "pending":
print_error("Instance failed at startup (State: " + instance.state + ")")
return instance
waiting_time = wait_for_instance(instance)
if instance.state != "running":
print_error("Failed to boot up instance (State: " + instance.state + ")")
return instance
return instance
except Exception, e:
print_error("Exception: " + str(e))
return None
def kill_instance(connection, instance_id):
terminated_instances = []
try:
terminated_instances = connection.terminate_instances(instance_id)
except Exception, e:
print_error("Exception: " + str(e))
return len(terminated_instances) == 1
def create_instance(parent_parser, argv):
parser = argparse.ArgumentParser(parents=[parent_parser])
parser.add_argument("--ami",
nargs = 1,
metavar = "<ami_name>",
required = True,
dest = "ami",
help = "AMI identifier of the image to boot")
parser.add_argument("--key",
nargs = 1,
metavar = "<key_name>",
required = True,
dest = "key",
help = "Name of the access key to use")
parser.add_argument("--instance-type",
nargs = 1,
metavar = "<instance_type>",
required = True,
dest = "flavor",
help = "VM flavor to use")
parser.add_argument("--userdata",
nargs = 1,
metavar = "<user_data>",
required = False,
dest = "userdata",
default = "",
help = "Cloud-init user data string")
arguments = parser.parse_args(argv)
access_key = arguments.access_key[0]
secret_key = arguments.secret_key[0]
endpoint = arguments.cloud_endpoint[0]
ami = arguments.ami[0]
key_name = arguments.key[0]
flavor = arguments.flavor[0]
userdata = arguments.userdata[0]
connection = connect_to_openstack(access_key, secret_key, endpoint)
instance = spawn_instance(connection, ami, key_name, flavor, userdata)
if is_running(instance):
print instance.id , instance.private_ip_address
else:
print_error("Failed to start instance")
exit(2)
def terminate_instance(parent_parser, argv):
parser = argparse.ArgumentParser(parents=[parent_parser])
parser.add_argument("--instance-id",
nargs = 1,
metavar = "<instance_id>",
required = True,
dest = "instance_id",
help = "Instance ID of the instance to terminate")
arguments = parser.parse_args(argv)
access_key = arguments.access_key[0]
secret_key = arguments.secret_key[0]
endpoint = arguments.cloud_endpoint[0]
instance_id = arguments.instance_id[0]
connection = connect_to_openstack(access_key, secret_key, endpoint)
successful = kill_instance(connection, instance_id)
if not successful:
print_error("Failed to terminate instance")
exit(2)
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
parser = argparse.ArgumentParser(add_help = False,
description = "Start an Ibex instance")
parser.add_argument("--access-key",
nargs = 1,
metavar = "<aws_access_key_id>",
required = True,
dest = "access_key",
help = "EC2 Access Key String")
parser.add_argument("--secret-key",
nargs = 1,
metavar = "<aws_secret_access_key>",
required = True,
dest = "secret_key",
help = "EC2 Secret Key String")
parser.add_argument("--cloud-endpoint",
nargs = 1,
metavar = "<url to cloud controller endpoint>",
required = True,
dest = "cloud_endpoint",
help = "URL to the cloud controller")
if len(sys.argv) < 2:
print_error("please provide 'spawn' or 'terminate' as a subcommand...")
exit(1)
subcommand = sys.argv[1]
argv = sys.argv[2:]
if subcommand == "spawn":
create_instance(parser, argv)
elif subcommand == "terminate":
terminate_instance(parser, argv)
else:
print_error("unrecognized subcommand '" + subcommand + "'")
exit(1)
| cvmfs-testing/cvmfs | test/cloud_testing/steering/instance_handler.py | Python | bsd-3-clause | 7,346 |
#! /usr/bin/env python
"""Mauna Loa Weekly Atmospheric CO2 Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Mauna Loa Weekly Atmospheric CO2 Data"""
SOURCE = """
Data obtained from http://cdiac.ornl.gov/trends/co2/sio-keel-flask/sio-keel-flaskmlo_c.html
Obtained on 3/15/2014.
Citation:
Keeling, C.D. and T.P. Whorf. 2004. Atmospheric CO2 concentrations derived from flask air samples at sites in the SIO network. In Trends: A Compendium of Data on Global Change. Carbon Dioxide Information Analysis Center, Oak Ridge National Laboratory, U.S. Department of Energy, Oak Ridge, Tennessee, U.S.A.
"""
DESCRSHORT = """Atmospheric CO2 from Continuous Air Samples at Mauna Loa Observatory, Hawaii, U.S.A."""
DESCRLONG = """
Atmospheric CO2 from Continuous Air Samples at Mauna Loa Observatory, Hawaii, U.S.A.
Period of Record: March 1958 - December 2001
Methods: An Applied Physics Corporation (APC) nondispersive infrared gas analyzer was used to obtain atmospheric CO2 concentrations, based on continuous data (four measurements per hour) from atop intake lines on several towers. Steady data periods of not less than six hours per day are required; if no such six-hour periods are available on any given day, then no data are used that day. Weekly averages were calculated for most weeks throughout the approximately 44 years of record. The continuous data for year 2000 is compared with flask data from the same site in the graphics section."""
#suggested notes
NOTE = """::
Number of observations: 2225
Number of variables: 2
Variable name definitions:
date - sample date in YYMMDD format
co2 - CO2 Concentration ppmv
The data returned by load_pandas contains the dates as the index.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
import pandas as pd
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = data.dtype.names
return du.Dataset(data=data, names=names)
def load_pandas():
data = load()
# pandas <= 0.12.0 fails in the to_datetime regex on Python 3
index = pd.DatetimeIndex(start=data.data['date'][0].decode('utf-8'),
periods=len(data.data), format='%Y%m%d',
freq='W-SAT')
dataset = pd.DataFrame(data.data['co2'], index=index, columns=['co2'])
#NOTE: this is how I got the missing values in co2.csv
#new_index = pd.DatetimeIndex(start='1958-3-29', end=index[-1],
# freq='W-SAT')
#data.data = dataset.reindex(new_index)
data.data = dataset
return data
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/co2.csv', 'rb') as f:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=['a8', float])
return data
| yl565/statsmodels | statsmodels/datasets/co2/data.py | Python | bsd-3-clause | 3,041 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-08-16 19:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('communication', '0016_mailcommunication_lob_id'),
]
operations = [
migrations.CreateModel(
name='MailEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField()),
('event', models.CharField(max_length=255)),
('mail', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='communication.MailCommunication')),
],
),
]
| MuckRock/muckrock | muckrock/communication/migrations/0017_mailevent.py | Python | agpl-3.0 | 795 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import sys
import threading
from observedItem import *
from fileEventHandler import *
def getExt(filename):
# return extention of file
return os.path.splitext(filename)[-1].lower()
class FileObserver(threading.Thread):
def __init__ (self):
super(FileObserver, self).__init__()
self.stop_event = threading.Event()
self.handler = None
self.basedir = os.path.abspath(os.path.dirname(__file__))
self.interval = 1
self.recursive = True
self.observedExts = []
self.observedItems = []
def getFileList(self):
# return file list of base directory
fileList = []
if self.recursive:
for (root, dirs, files) in os.walk(self.basedir):
for f in files:
fileList.append(os.path.join(root, f))
else:
fileList = os.listdir(self.basedir)
# extention filtering (option)
if len(self.observedExts) > 0:
fileList_cpy = list(fileList)
for f in fileList_cpy:
if not getExt(f) in self.observedExts:
fileList.remove(f)
return fileList
def initCheckDir(self, recursive=True):
# get file list
fileList = self.getFileList()
# add to observed items list and call on_init event
for f in fileList:
item = ObservedItem(f)
self.observedItems.append(item)
self.handler.on_init(item)
def checkDir(self, recursive=True):
# get file list
fileList = self.getFileList()
# get observed item list now
items = [ObservedItem(f) for f in fileList]
# create list for detecting items deleted
deletedItems = list(self.observedItems)
for item in items:
if item in self.observedItems:
# check the time-stamps
idx = self.observedItems.index(item)
if self.observedItems[idx].isModified(item):
self.observedItems.remove(item)
self.observedItems.append(item)
self.handler.on_modify(item)
# remove exist file from deleted items list
deletedItems.remove(item)
else:
# detect new file
self.observedItems.append(item)
for observedItem in self.observedItems:
if observedItem.isMoved(item) and observedItem in deletedItems:
# detect moved file
deletedItems.remove(observedItem)
self.observedItems.remove(observedItem)
self.handler.on_move(item)
break
else:
self.handler.on_add(item)
# detect deleted file
for item in deletedItems:
self.observedItems.remove(item)
self.handler.on_delete(item)
def setSchedule(self,
handler,
interval=1,
directory=os.path.abspath(os.path.dirname(__file__)),
recursive=True):
# set Wathing schedule
self.handler = handler
self.interval = interval
self.basedir = directory
self.recursive = recursive
def run(self):
# run observeding thread
if self.handler is None:
sys.stderr.write("[ERROR]: you must set any event handler")
return
# first time checking
self.initCheckDir(self.recursive)
while not self.stop_event.is_set():
self.checkDir(self.recursive)
time.sleep(self.interval)
def stop(self):
# stop observeding thread
self.stop_event.set()
self.join() | DaikiShimada/patlabor-python | patlabor/fileObserver.py | Python | mit | 3,071 |
#!/usr/bin/env python
class BaseCodec(object):
"""
Base audio/video codec class.
"""
encoder_options = {}
codec_name = None
ffmpeg_codec_name = None
def parse_options(self, opt):
if 'codec' not in opt or opt['codec'] != self.codec_name:
raise ValueError('invalid codec name')
return None
def _codec_specific_parse_options(self, safe):
return safe
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
return []
def safe_options(self, opts):
safe = {}
# Only copy options that are expected and of correct type
# (and do typecasting on them)
for k, v in opts.items():
if k in self.encoder_options and v is not None:
typ = self.encoder_options[k]
try:
safe[k] = typ(v)
except:
pass
return safe
class AudioCodec(BaseCodec):
"""
Base audio codec class handles general audio options. Possible
parameters are:
* codec (string) - audio codec name
* channels (integer) - number of audio channels
* bitrate (integer) - stream bitrate
* samplerate (integer) - sample rate (frequency)
* language (str) - language of audio stream (3 char code)
* map (int) - stream index
Supported audio codecs are: null (no audio), copy (copy from
original), vorbis, aac, mp3, mp2
"""
encoder_options = {
'codec': str,
'language': str,
'channels': int,
'bitrate': int,
'samplerate': int,
'source': int,
'path': str,
'filter': str,
'map': int,
'disposition': str,
}
def parse_options(self, opt, stream=0):
super(AudioCodec, self).parse_options(opt)
safe = self.safe_options(opt)
stream = str(stream)
if 'channels' in safe:
c = safe['channels']
if c < 1 or c > 12:
del safe['channels']
if 'bitrate' in safe:
br = safe['bitrate']
if br < 8:
br = 8
if br > 1536:
br = 1536
if 'samplerate' in safe:
f = safe['samplerate']
if f < 1000 or f > 50000:
del safe['samplerate']
if 'language' in safe:
l = safe['language']
if len(l) > 3:
del safe['language']
if 'source' in safe:
s = str(safe['source'])
else:
s = str(0)
if 'filter' in safe:
x = safe['filter']
if len(x) < 1:
del safe['filter']
safe = self._codec_specific_parse_options(safe)
optlist = []
optlist.extend(['-c:a:' + stream, self.ffmpeg_codec_name])
if 'path' in safe:
optlist.extend(['-i', str(safe['path'])])
if 'map' in safe:
optlist.extend(['-map', s + ':' + str(safe['map'])])
if 'disposition' in safe:
optlist.extend(['-disposition:a:' + stream, str(safe['disposition'])])
if 'channels' in safe:
optlist.extend(['-ac:a:' + stream, str(safe['channels'])])
if 'bitrate' in safe:
optlist.extend(['-b:a:' + stream, str(br) + 'k'])
if 'samplerate' in safe:
optlist.extend(['-ar:a:' + stream, str(safe['samplerate'])])
if 'filter' in safe:
optlist.extend(['-filter:a:' + stream, str(safe['filter'])])
if 'language' in safe:
lang = str(safe['language'])
else:
lang = 'und' # Never leave blank if not specified, always set to und for undefined
optlist.extend(['-metadata:s:a:' + stream, "language=" + lang])
optlist.extend(self._codec_specific_produce_ffmpeg_list(safe))
return optlist
class SubtitleCodec(BaseCodec):
"""
Base subtitle codec class handles general subtitle options. Possible
parameters are:
* codec (string) - subtitle codec name (mov_text, subrib, ssa only supported currently)
* language (string) - language of subtitle stream (3 char code)
* forced (int) - force subtitles (1 true, 0 false)
* default (int) - default subtitles (1 true, 0 false)
Supported subtitle codecs are: null (no subtitle), mov_text
"""
encoder_options = {
'codec': str,
'language': str,
'forced': int,
'default': int,
'map': int,
'source': int,
'path': str,
'encoding': str
}
def parse_options(self, opt, stream=0):
super(SubtitleCodec, self).parse_options(opt)
stream = str(stream)
safe = self.safe_options(opt)
if 'forced' in safe:
f = safe['forced']
if f < 0 or f > 1:
del safe['forced']
if 'default' in safe:
d = safe['default']
if d < 0 or d > 1:
del safe['default']
if 'language' in safe:
l = safe['language']
if len(l) > 3:
del safe['language']
if 'source' in safe:
s = str(safe['source'])
else:
s = str(0)
if 'encoding' in safe:
if not safe['encoding']:
del safe['encoding']
safe = self._codec_specific_parse_options(safe)
optlist = []
if 'encoding' in safe:
optlist.extend(['-sub_charenc', str(safe['encoding'])])
optlist.extend(['-c:s:' + stream, self.ffmpeg_codec_name])
stream = str(stream)
if 'map' in safe:
optlist.extend(['-map', s + ':' + str(safe['map'])])
if 'path' in safe:
optlist.extend(['-i', str(safe['path'])])
if 'default' in safe:
optlist.extend(['-metadata:s:s:' + stream, "disposition:default=" + str(safe['default'])])
if 'forced' in safe:
optlist.extend(['-metadata:s:s:' + stream, "disposition:forced=" + str(safe['forced'])])
if 'language' in safe:
lang = str(safe['language'])
else:
lang = 'und' # Never leave blank if not specified, always set to und for undefined
optlist.extend(['-metadata:s:s:' + stream, "language=" + lang])
optlist.extend(self._codec_specific_produce_ffmpeg_list(safe))
return optlist
class VideoCodec(BaseCodec):
"""
Base video codec class handles general video options. Possible
parameters are:
* codec (string) - video codec name
* bitrate (string) - stream bitrate
* fps (integer) - frames per second
* width (integer) - video width
* height (integer) - video height
* mode (string) - aspect preserval mode; one of:
* stretch (default) - don't preserve aspect
* crop - crop extra w/h
* pad - pad with black bars
* src_width (int) - source width
* src_height (int) - source height
Aspect preserval mode is only used if both source
and both destination sizes are specified. If source
dimensions are not specified, aspect settings are ignored.
If source dimensions are specified, and only one
of the destination dimensions is specified, the other one
is calculated to preserve the aspect ratio.
Supported video codecs are: null (no video), copy (copy directly
from the source), Theora, H.264/AVC, DivX, VP8, H.263, Flv,
MPEG-1, MPEG-2.
"""
encoder_options = {
'codec': str,
'bitrate': int,
'crf': int,
'fps': int,
'width': int,
'height': int,
'mode': str,
'src_width': int,
'src_height': int,
'filter': str,
'pix_fmt': str,
'map': int
}
def _aspect_corrections(self, sw, sh, w, h, mode):
# If we don't have source info, we don't try to calculate
# aspect corrections
if not sw or not sh:
return w, h, None
# Original aspect ratio
aspect = (1.0 * sw) / (1.0 * sh)
# If we have only one dimension, we can easily calculate
# the other to match the source aspect ratio
if not w and not h:
return w, h, None
elif w and not h:
h = int((1.0 * w) / aspect)
return w, h, None
elif h and not w:
w = int(aspect * h)
return w, h, None
# If source and target dimensions are actually the same aspect
# ratio, we've got nothing to do
if int(aspect * h) == w:
return w, h, None
if mode == 'stretch':
return w, h, None
target_aspect = (1.0 * w) / (1.0 * h)
if mode == 'crop':
# source is taller, need to crop top/bottom
if target_aspect > aspect: # target is taller
h0 = int(w / aspect)
assert h0 > h, (sw, sh, w, h)
dh = (h0 - h) / 2
return w, h0, 'crop=%d:%d:0:%d' % (w, h, dh)
else: # source is wider, need to crop left/right
w0 = int(h * aspect)
assert w0 > w, (sw, sh, w, h)
dw = (w0 - w) / 2
return w0, h, 'crop=%d:%d:%d:0' % (w, h, dw)
if mode == 'pad':
# target is taller, need to pad top/bottom
if target_aspect < aspect:
h1 = int(w / aspect)
assert h1 < h, (sw, sh, w, h)
dh = (h - h1) / 2
return w, h1, 'pad=%d:%d:0:%d' % (w, h, dh) # FIXED
else: # target is wider, need to pad left/right
w1 = int(h * aspect)
assert w1 < w, (sw, sh, w, h)
dw = (w - w1) / 2
return w1, h, 'pad=%d:%d:%d:0' % (w, h, dw) # FIXED
assert False, mode
def parse_options(self, opt, stream=0):
super(VideoCodec, self).parse_options(opt)
safe = self.safe_options(opt)
if 'fps' in safe:
f = safe['fps']
if f < 1 or f > 120:
del safe['fps']
if 'bitrate' in safe:
br = safe['bitrate']
if br < 16 or br > 15000:
del safe['bitrate']
if 'crf' in safe:
crf = safe['crf']
if crf < 0 or crf > 51:
del safe['crf']
w = None
h = None
if 'width' in safe:
w = safe['width']
if w < 16 or w > 4000:
w = None
if 'height' in safe:
h = safe['height']
if h < 16 or h > 3000:
h = None
sw = None
sh = None
if 'src_width' in safe and 'src_height' in safe:
sw = safe['src_width']
sh = safe['src_height']
if not sw or not sh:
sw = None
sh = None
mode = 'stretch'
if 'mode' in safe:
if safe['mode'] in ['stretch', 'crop', 'pad']:
mode = safe['mode']
ow, oh = w, h # FIXED
w, h, filters = self._aspect_corrections(sw, sh, w, h, mode)
safe['width'] = w
safe['height'] = h
safe['aspect_filters'] = filters
if w and h:
safe['aspect'] = '%d:%d' % (w, h)
safe = self._codec_specific_parse_options(safe)
w = safe['width']
h = safe['height']
filters = safe['aspect_filters']
optlist = ['-vcodec', self.ffmpeg_codec_name]
if 'map' in safe:
optlist.extend(['-map', '0:' + str(safe['map'])])
if 'fps' in safe:
optlist.extend(['-r', str(safe['fps'])])
if 'pix_fmt' in safe:
optlist.extend(['-pix_fmt', str(safe['pix_fmt'])])
if 'bitrate' in safe:
optlist.extend(['-vb', str(safe['bitrate']) + 'k']) # FIXED
if 'crf' in safe:
optlist.extend(['-crf', str(safe['crf'])])
if 'filter' in safe:
if filters:
filters = '%s;%s' % (filters, str(safe['filter']))
else:
filters = str(safe['filter'])
if w and h:
optlist.extend(['-s', '%dx%d' % (w, h)])
if ow and oh:
optlist.extend(['-aspect', '%d:%d' % (ow, oh)])
if filters:
optlist.extend(['-vf', filters])
optlist.extend(self._codec_specific_produce_ffmpeg_list(safe))
if optlist.count('-vf') > 1:
vf = []
while optlist.count('-vf') > 0:
vf.append(optlist.pop(optlist.index('-vf') + 1))
del optlist[optlist.index('-vf')]
vfstring = ""
for line in vf:
vfstring = "%s;%s" % (vfstring, line)
optlist.extend(['-vf', vfstring[1:]])
return optlist
class AudioNullCodec(BaseCodec):
"""
Null audio codec (no audio).
"""
codec_name = None
def parse_options(self, opt, stream=0):
return ['-an']
class VideoNullCodec(BaseCodec):
"""
Null video codec (no video).
"""
codec_name = None
def parse_options(self, opt):
return ['-vn']
class SubtitleNullCodec(BaseCodec):
"""
Null subtitle codec (no subtitle)
"""
codec_name = None
def parse_options(self, opt, stream=0):
return ['-sn']
class AudioCopyCodec(BaseCodec):
"""
Copy audio stream directly from the source.
"""
codec_name = 'copy'
encoder_options = {'language': str,
'source': str,
'map': int,
'bsf': str,
'disposition': str}
def parse_options(self, opt, stream=0):
safe = self.safe_options(opt)
stream = str(stream)
optlist = []
optlist.extend(['-c:a:' + stream, 'copy'])
if 'source' in safe:
s = str(safe['source'])
else:
s = str(0)
if 'map' in safe:
optlist.extend(['-map', s + ':' + str(safe['map'])])
if 'bsf' in safe:
optlist.extend(['-bsf:a:' + stream, str(safe['bsf'])])
lang = 'und'
if 'language' in safe:
l = safe['language']
if len(l) > 3:
del safe['language']
else:
lang = str(safe['language'])
optlist.extend(['-metadata:s:a:' + stream, "language=" + lang])
if 'disposition' in safe:
optlist.extend(['-disposition:a:' + stream, str(safe['disposition'])])
return optlist
class VideoCopyCodec(BaseCodec):
"""
Copy video stream directly from the source.
"""
codec_name = 'copy'
encoder_options = {'map': int,
'source': str}
def parse_options(self, opt, stream=0):
safe = self.safe_options(opt)
optlist = []
optlist.extend(['-vcodec', 'copy'])
if 'source' in safe:
s = str(safe['source'])
else:
s = str(0)
if 'map' in safe:
optlist.extend(['-map', s + ':' + str(safe['map'])])
return optlist
class SubtitleCopyCodec(BaseCodec):
"""
Copy subtitle stream directly from the source.
"""
codec_name = 'copy'
encoder_options = {'map': int,
'source': str}
optlist = []
def parse_options(self, opt, stream=0):
safe = self.safe_options(opt)
stream = str(stream)
if 'source' in safe:
s = str(safe['source'])
else:
s = str(0)
if 'map' in safe:
optlist.extend(['-map', s + ':' + str(safe['map'])])
optlist.extend(['-c:s:' + stream, copy])
return optlist
# Audio Codecs
class VorbisCodec(AudioCodec):
"""
Vorbis audio codec.
"""
codec_name = 'vorbis'
ffmpeg_codec_name = 'libvorbis'
encoder_options = AudioCodec.encoder_options.copy()
encoder_options.update({
'quality': int, # audio quality. Range is 0-10(highest quality)
# 3-6 is a good range to try. Default is 3
})
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = []
stream = str(stream)
if 'quality' in safe:
optlist.extend(['-qscale:a:' + stream, safe['quality']])
return optlist
class AacCodec(AudioCodec):
"""
AAC audio codec.
"""
codec_name = 'aac'
ffmpeg_codec_name = 'aac'
aac_experimental_enable = ['-strict', 'experimental']
def parse_options(self, opt, stream=0):
if 'channels' in opt:
c = opt['channels']
if c > 6:
opt['channels'] = 6
return super(AacCodec, self).parse_options(opt, stream)
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
return self.aac_experimental_enable
class FdkAacCodec(AudioCodec):
"""
AAC audio codec.
"""
codec_name = 'libfdk_aac'
ffmpeg_codec_name = 'libfdk_aac'
def parse_options(self, opt, stream=0):
if 'channels' in opt:
c = opt['channels']
if c > 6:
opt['channels'] = 6
return super(FdkAacCodec, self).parse_options(opt, stream)
class FAacCodec(AudioCodec):
"""
AAC audio codec.
"""
codec_name = 'libfaac'
ffmpeg_codec_name = 'libfaac'
def parse_options(self, opt, stream=0):
if 'channels' in opt:
c = opt['channels']
if c > 6:
opt['channels'] = 6
return super(FAacCodec, self).parse_options(opt, stream)
class Ac3Codec(AudioCodec):
"""
AC3 audio codec.
"""
codec_name = 'ac3'
ffmpeg_codec_name = 'ac3'
def parse_options(self, opt, stream=0):
if 'channels' in opt:
c = opt['channels']
if c > 6:
opt['channels'] = 6
return super(Ac3Codec, self).parse_options(opt, stream)
class EAc3Codec(AudioCodec):
"""
Dolby Digital Plus/EAC3 audio codec.
"""
codec_name = 'eac3'
ffmpeg_codec_name = 'eac3'
def parse_options(self, opt, stream=0):
if 'channels' in opt:
c = opt['channels']
if c > 8:
opt['channels'] = 8
if 'bitrate' in opt:
br = opt['bitrate']
if br > 640:
opt['bitrate'] = 640
return super(EAc3Codec, self).parse_options(opt, stream)
class FlacCodec(AudioCodec):
"""
FLAC audio codec.
"""
codec_name = 'flac'
ffmpeg_codec_name = 'flac'
flac_experimental_enable = ['-strict', 'experimental']
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
return self.flac_experimental_enable
class DtsCodec(AudioCodec):
"""
DTS audio codec.
"""
codec_name = 'dts'
ffmpeg_codec_name = 'dts'
class Mp3Codec(AudioCodec):
"""
MP3 (MPEG layer 3) audio codec.
"""
codec_name = 'mp3'
ffmpeg_codec_name = 'libmp3lame'
class Mp2Codec(AudioCodec):
"""
MP2 (MPEG layer 2) audio codec.
"""
codec_name = 'mp2'
ffmpeg_codec_name = 'mp2'
# Video Codecs
class TheoraCodec(VideoCodec):
"""
Theora video codec.
"""
codec_name = 'theora'
ffmpeg_codec_name = 'libtheora'
encoder_options = VideoCodec.encoder_options.copy()
encoder_options.update({
'quality': int, # audio quality. Range is 0-10(highest quality)
# 5-7 is a good range to try (default is 200k bitrate)
})
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = []
if 'quality' in safe:
optlist.extend(['-qscale:v', safe['quality']])
return optlist
class H264Codec(VideoCodec):
"""
H.264/AVC video codec.
"""
codec_name = 'h264'
ffmpeg_codec_name = 'libx264'
encoder_options = VideoCodec.encoder_options.copy()
encoder_options.update({
'preset': str, # common presets are ultrafast, superfast, veryfast,
# faster, fast, medium(default), slow, slower, veryslow
'quality': int, # constant rate factor, range:0(lossless)-51(worst)
# default:23, recommended: 18-28
'profile': str, # default: not-set, for valid values see above link
'level': float, # default: not-set, values range from 3.0 to 4.2
'tune': str, # default: not-set, for valid values see above link
'wscale': int, # special handlers for the even number requirements of h264
'hscale': int # special handlers for the even number requirements of h264
})
def parse_options(self, opt, stream=0):
if 'width' in opt:
opt['wscale'] = opt['width']
del(opt['width'])
if 'height' in opt:
opt['hscale'] = opt['height']
del(opt['height'])
return super(H264Codec, self).parse_options(opt, stream)
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = []
if 'level' in safe:
if safe['level'] < 3.0 or safe['level'] > 4.2:
del safe['level']
if 'preset' in safe:
optlist.extend(['-preset', safe['preset']])
if 'quality' in safe:
optlist.extend(['-crf', str(safe['quality'])])
if 'profile' in safe:
optlist.extend(['-profile:v', safe['profile']])
if 'level' in safe:
optlist.extend(['-level', '%0.1f' % safe['level']])
if 'tune' in safe:
optlist.extend(['-tune', safe['tune']])
if 'wscale' in safe and 'hscale' in safe:
optlist.extend(['-vf', 'scale=%s:%s' % (safe['wscale'], safe['hscale'])])
elif 'wscale' in safe:
optlist.extend(['-vf', 'scale=%s:trunc(ow/a/2)*2' % (safe['wscale'])])
elif 'hscale' in safe:
optlist.extend(['-vf', 'scale=trunc((oh*a)/2)*2:%s' % (safe['hscale'])])
return optlist
class NVEncH264(H264Codec):
"""
Nvidia H.264/AVC video codec.
"""
codec_name = 'h264_nvenc'
ffmpeg_codec_name = 'h264_nvenc'
class H264VAAPI(H264Codec):
"""
H.264/AVC video codec.
"""
codec_name = 'h264vaapi'
ffmpeg_codec_name = 'h264_vaapi'
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = []
optlist.extend(['-vaapi_device', '/dev/dri/renderD128'])
if 'preset' in safe:
optlist.extend(['-preset', safe['preset']])
if 'quality' in safe:
optlist.extend(['-crf', str(safe['quality'])])
if 'profile' in safe:
optlist.extend(['-profile:v', safe['profile']])
if 'level' in safe:
optlist.extend(['-level', '%0.0f' % (safe['level'] * 10)]) # Automatically multiplied by 10
if 'tune' in safe:
optlist.extend(['-tune', safe['tune']])
# Start VF
optlist.extend(['-vf', "format=nv12,hwupload"])
if 'wscale' in safe and 'hscale' in safe:
optlist.extend(['-vf', 'scale=%s:%s' % (safe['wscale'], safe['hscale'])])
elif 'wscale' in safe:
optlist.extend(['-vf', 'scale=%s:trunc(ow/a/2)*2' % (safe['wscale'])])
elif 'hscale' in safe:
optlist.extend(['-vf', 'scale=trunc((oh*a)/2)*2:%s' % (safe['hscale'])])
return optlist
class H264QSV(H264Codec):
"""
H.264/AVC video codec.
"""
codec_name = 'h264qsv'
ffmpeg_codec_name = 'h264_qsv'
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = []
optlist.extend(['-look_ahead', '0'])
return optlist
class H265Codec(VideoCodec):
"""
H.265/AVC video codec.
"""
codec_name = 'h265'
ffmpeg_codec_name = 'libx265'
encoder_options = VideoCodec.encoder_options.copy()
encoder_options.update({
'preset': str, # common presets are ultrafast, superfast, veryfast,
# faster, fast, medium(default), slow, slower, veryslow
'quality': int, # constant rate factor, range:0(lossless)-51(worst)
# default:23, recommended: 18-28
'profile': str, # default: not-set, for valid values see above link
'level': float, # default: not-set, values range from 3.0 to 4.2
'tune': str, # default: not-set, for valid values see above link
'wscale': int, # special handlers for the even number requirements of h265
'hscale': int # special handlers for the even number requirements of h265
})
def parse_options(self, opt, stream=0):
if 'width' in opt:
opt['wscale'] = opt['width']
del(opt['width'])
if 'height' in opt:
opt['hscale'] = opt['height']
del(opt['height'])
return super(H265Codec, self).parse_options(opt, stream)
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = []
if 'preset' in safe:
optlist.extend(['-preset', safe['preset']])
if 'quality' in safe:
optlist.extend(['-crf', str(safe['quality'])])
if 'profile' in safe:
optlist.extend(['-profile:v', safe['profile']])
if 'level' in safe:
optlist.extend(['-level', '%0.1f' % safe['level']])
if 'tune' in safe:
optlist.extend(['-tune', safe['tune']])
if 'wscale' in safe and 'hscale' in safe:
optlist.extend(['-vf', 'scale=%s:%s' % (safe['wscale'], safe['hscale'])])
elif 'wscale' in safe:
optlist.extend(['-vf', 'scale=%s:trunc(ow/a/2)*2' % (safe['wscale'])])
elif 'hscale' in safe:
optlist.extend(['-vf', 'scale=trunc((oh*a)/2)*2:%s' % (safe['hscale'])])
optlist.extend(['-tag:v', 'hvc1'])
return optlist
class HEVCQSV(H265Codec):
"""
HEVC video codec.
"""
codec_name = 'hevcqsv'
ffmpeg_codec_name = 'hevc_qsv'
class NVEncH265(H265Codec):
"""
Nvidia H.265/AVC video codec.
"""
codec_name = 'h265_nvenc'
ffmpeg_codec_name = 'hevc_nvenc'
class DivxCodec(VideoCodec):
"""
DivX video codec.
"""
codec_name = 'divx'
ffmpeg_codec_name = 'mpeg4'
class Vp8Codec(VideoCodec):
"""
Google VP8 video codec.
"""
codec_name = 'vp8'
ffmpeg_codec_name = 'libvpx'
class H263Codec(VideoCodec):
"""
H.263 video codec.
"""
codec_name = 'h263'
ffmpeg_codec_name = 'h263'
class FlvCodec(VideoCodec):
"""
Flash Video codec.
"""
codec_name = 'flv'
ffmpeg_codec_name = 'flv'
class MpegCodec(VideoCodec):
"""
Base MPEG video codec.
"""
# Workaround for a bug in ffmpeg in which aspect ratio
# is not correctly preserved, so we have to set it
# again in vf; take care to put it *before* crop/pad, so
# it uses the same adjusted dimensions as the codec itself
# (pad/crop will adjust it further if neccessary)
def _codec_specific_parse_options(self, safe, stream=0):
w = safe['width']
h = safe['height']
if w and h:
filters = safe['aspect_filters']
tmp = 'aspect=%d:%d' % (w, h)
if filters is None:
safe['aspect_filters'] = tmp
else:
safe['aspect_filters'] = tmp + ',' + filters
return safe
class Mpeg1Codec(MpegCodec):
"""
MPEG-1 video codec.
"""
codec_name = 'mpeg1'
ffmpeg_codec_name = 'mpeg1video'
class Mpeg2Codec(MpegCodec):
"""
MPEG-2 video codec.
"""
codec_name = 'mpeg2'
ffmpeg_codec_name = 'mpeg2video'
# Subtitle Codecs
class MOVTextCodec(SubtitleCodec):
"""
mov_text subtitle codec.
"""
codec_name = 'mov_text'
ffmpeg_codec_name = 'mov_text'
class SrtCodec(SubtitleCodec):
"""
SRT subtitle codec.
"""
codec_name = 'srt'
ffmpeg_codec_name = 'srt'
class WebVTTCodec(SubtitleCodec):
"""
SRT subtitle codec.
"""
codec_name = 'webvtt'
ffmpeg_codec_name = 'webvtt'
class SSA(SubtitleCodec):
"""
SSA (SubStation Alpha) subtitle.
"""
codec_name = 'ass'
ffmpeg_codec_name = 'ass'
class SubRip(SubtitleCodec):
"""
SubRip subtitle.
"""
codec_name = 'subrip'
ffmpeg_codec_name = 'subrip'
class DVBSub(SubtitleCodec):
"""
DVB subtitles.
"""
codec_name = 'dvbsub'
ffmpeg_codec_name = 'dvbsub'
class DVDSub(SubtitleCodec):
"""
DVD subtitles.
"""
codec_name = 'dvdsub'
ffmpeg_codec_name = 'dvdsub'
audio_codec_list = [
AudioNullCodec, AudioCopyCodec, VorbisCodec, AacCodec, Mp3Codec, Mp2Codec,
FdkAacCodec, FAacCodec, EAc3Codec, Ac3Codec, DtsCodec, FlacCodec
]
video_codec_list = [
VideoNullCodec, VideoCopyCodec, TheoraCodec, H264Codec, H264QSV, HEVCQSV, H265Codec,
DivxCodec, Vp8Codec, H263Codec, FlvCodec, Mpeg1Codec, NVEncH264, NVEncH265,
Mpeg2Codec, H264VAAPI
]
subtitle_codec_list = [
SubtitleNullCodec, SubtitleCopyCodec, MOVTextCodec, SrtCodec, SSA, SubRip, DVDSub,
DVBSub, WebVTTCodec
]
| Filechaser/sickbeard_mp4_automator | converter/avcodecs.py | Python | mit | 29,019 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains and Evaluates the MNIST network using a feed dictionary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=missing-docstring
import argparse
import os.path
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder
def fill_feed_dict(data_set, images_pl, labels_pl):
"""Fills the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Args:
data_set: The set of images and labels, from input_data.read_data_sets()
images_pl: The images placeholder, from placeholder_inputs().
labels_pl: The labels placeholder, from placeholder_inputs().
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
# Create the feed_dict for the placeholders filled with the next
# `batch size` examples.
images_feed, labels_feed = data_set.next_batch(FLAGS.batch_size,
FLAGS.fake_data)
feed_dict = {
images_pl: images_feed,
labels_pl: labels_feed,
}
return feed_dict
def do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_set):
"""Runs one evaluation against the full epoch of data.
Args:
sess: The session in which the model has been trained.
eval_correct: The Tensor that returns the number of correct predictions.
images_placeholder: The images placeholder.
labels_placeholder: The labels placeholder.
data_set: The set of images and labels to evaluate, from
input_data.read_data_sets().
"""
# And run one epoch of eval.
true_count = 0 # Counts the number of correct predictions.
steps_per_epoch = data_set.num_examples // FLAGS.batch_size
num_examples = steps_per_epoch * FLAGS.batch_size
for step in xrange(steps_per_epoch):
feed_dict = fill_feed_dict(data_set,
images_placeholder,
labels_placeholder)
true_count += sess.run(eval_correct, feed_dict=feed_dict)
precision = float(true_count) / num_examples
print(' Num examples: %d Num correct: %d Precision @ 1: %0.04f' %
(num_examples, true_count, precision))
def run_training():
"""Train MNIST for a number of steps."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
# Generate placeholders for the images and labels.
images_placeholder, labels_placeholder = placeholder_inputs(
FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images_placeholder,
FLAGS.hidden1,
FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels_placeholder)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all()
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
# And then after everything is built:
# Run the Op to initialize the variables.
sess.run(init)
# Start the training loop.
for step in xrange(FLAGS.max_steps):
start_time = time.time()
# Fill a feed dictionary with the actual set of images and labels
# for this particular training step.
feed_dict = fill_feed_dict(data_sets.train,
images_placeholder,
labels_placeholder)
# Run one step of the model. The return values are the activations
# from the `train_op` (which is discarded) and the `loss` Op. To
# inspect the values of your Ops or variables, you may include them
# in the list passed to sess.run() and the value tensors will be
# returned in the tuple from the call.
_, loss_value = sess.run([train_op, loss],
feed_dict=feed_dict)
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
saver.save(sess, checkpoint_file, global_step=step)
# Evaluate against the training set.
print('Training Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.train)
# Evaluate against the validation set.
print('Validation Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.validation)
# Evaluate against the test set.
print('Test Data Eval:')
do_eval(sess,
eval_correct,
images_placeholder,
labels_placeholder,
data_sets.test)
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=2000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default='/tmp/tensorflow/mnist/logs/fully_connected_feed',
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| anilmuthineni/tensorflow | tensorflow/examples/tutorials/mnist/fully_connected_feed.py | Python | apache-2.0 | 9,515 |
import six
import html5lib
from nose.tools import eq_
import bleach
from bleach.tests.tools import in_
def test_empty():
eq_('', bleach.clean(''))
def test_nbsp():
if six.PY3:
expected = '\xa0test string\xa0'
else:
expected = six.u('\\xa0test string\\xa0')
eq_(expected, bleach.clean(' test string '))
def test_comments_only():
comment = '<!-- this is a comment -->'
open_comment = '<!-- this is an open comment'
eq_('', bleach.clean(comment))
eq_('', bleach.clean(open_comment))
eq_(comment, bleach.clean(comment, strip_comments=False))
eq_('{0!s}-->'.format(open_comment), bleach.clean(open_comment,
strip_comments=False))
def test_with_comments():
html = '<!-- comment -->Just text'
eq_('Just text', bleach.clean(html))
eq_(html, bleach.clean(html, strip_comments=False))
def test_no_html():
eq_('no html string', bleach.clean('no html string'))
def test_allowed_html():
eq_('an <strong>allowed</strong> tag',
bleach.clean('an <strong>allowed</strong> tag'))
eq_('another <em>good</em> tag',
bleach.clean('another <em>good</em> tag'))
def test_bad_html():
eq_('a <em>fixed tag</em>',
bleach.clean('a <em>fixed tag'))
def test_function_arguments():
TAGS = ['span', 'br']
ATTRS = {'span': ['style']}
eq_('a <br><span style="">test</span>',
bleach.clean('a <br/><span style="color:red">test</span>',
tags=TAGS, attributes=ATTRS))
def test_named_arguments():
ATTRS = {'a': ['rel', 'href']}
s = ('<a href="http://xx.com" rel="alternate">xx.com</a>',
'<a rel="alternate" href="http://xx.com">xx.com</a>')
eq_('<a href="http://xx.com">xx.com</a>', bleach.clean(s[0]))
in_(s, bleach.clean(s[0], attributes=ATTRS))
def test_disallowed_html():
eq_('a <script>safe()</script> test',
bleach.clean('a <script>safe()</script> test'))
eq_('a <style>body{}</style> test',
bleach.clean('a <style>body{}</style> test'))
def test_bad_href():
eq_('<em>no link</em>',
bleach.clean('<em href="fail">no link</em>'))
def test_bare_entities():
eq_('an & entity', bleach.clean('an & entity'))
eq_('an < entity', bleach.clean('an < entity'))
eq_('tag < <em>and</em> entity',
bleach.clean('tag < <em>and</em> entity'))
eq_('&', bleach.clean('&'))
def test_escaped_entities():
s = '<em>strong</em>'
eq_(s, bleach.clean(s))
def test_serializer():
s = '<table></table>'
eq_(s, bleach.clean(s, tags=['table']))
eq_('test<table></table>', bleach.linkify('<table>test</table>'))
eq_('<p>test</p>', bleach.clean('<p>test</p>', tags=['p']))
def test_no_href_links():
s = '<a name="anchor">x</a>'
eq_(s, bleach.linkify(s))
def test_weird_strings():
s = '</3'
eq_(bleach.clean(s), '')
def test_xml_render():
parser = html5lib.HTMLParser()
eq_(bleach._render(parser.parseFragment('')), '')
def test_stripping():
eq_('a test <em>with</em> <b>html</b> tags',
bleach.clean('a test <em>with</em> <b>html</b> tags', strip=True))
eq_('a test <em>with</em> <b>html</b> tags',
bleach.clean('a test <em>with</em> <img src="http://example.com/"> '
'<b>html</b> tags', strip=True))
s = '<p><a href="http://example.com/">link text</a></p>'
eq_('<p>link text</p>', bleach.clean(s, tags=['p'], strip=True))
s = '<p><span>multiply <span>nested <span>text</span></span></span></p>'
eq_('<p>multiply nested text</p>', bleach.clean(s, tags=['p'], strip=True))
s = ('<p><a href="http://example.com/"><img src="http://example.com/">'
'</a></p>')
eq_('<p><a href="http://example.com/"></a></p>',
bleach.clean(s, tags=['p', 'a'], strip=True))
def test_allowed_styles():
ATTR = ['style']
STYLE = ['color']
blank = '<b style=""></b>'
s = '<b style="color: blue;"></b>'
eq_(blank, bleach.clean('<b style="top:0"></b>', attributes=ATTR))
eq_(s, bleach.clean(s, attributes=ATTR, styles=STYLE))
eq_(s, bleach.clean('<b style="top: 0; color: blue;"></b>',
attributes=ATTR, styles=STYLE))
def test_idempotent():
"""Make sure that applying the filter twice doesn't change anything."""
dirty = '<span>invalid & </span> < extra http://link.com<em>'
clean = bleach.clean(dirty)
eq_(clean, bleach.clean(clean))
linked = bleach.linkify(dirty)
eq_(linked, bleach.linkify(linked))
def test_rel_already_there():
"""Make sure rel attribute is updated not replaced"""
linked = ('Click <a href="http://example.com" rel="tooltip">'
'here</a>.')
link_good = (('Click <a href="http://example.com" rel="tooltip nofollow">'
'here</a>.'),
('Click <a rel="tooltip nofollow" href="http://example.com">'
'here</a>.'))
in_(link_good, bleach.linkify(linked))
in_(link_good, bleach.linkify(link_good[0]))
def test_lowercase_html():
"""We should output lowercase HTML."""
dirty = '<EM CLASS="FOO">BAR</EM>'
clean = '<em class="FOO">BAR</em>'
eq_(clean, bleach.clean(dirty, attributes=['class']))
def test_wildcard_attributes():
ATTR = {
'*': ['id'],
'img': ['src'],
}
TAG = ['img', 'em']
dirty = ('both <em id="foo" style="color: black">can</em> have '
'<img id="bar" src="foo"/>')
clean = ('both <em id="foo">can</em> have <img src="foo" id="bar">',
'both <em id="foo">can</em> have <img id="bar" src="foo">')
in_(clean, bleach.clean(dirty, tags=TAG, attributes=ATTR))
def test_sarcasm():
"""Jokes should crash.<sarcasm/>"""
dirty = 'Yeah right <sarcasm/>'
clean = 'Yeah right <sarcasm/>'
eq_(clean, bleach.clean(dirty))
| sunze/py_flask | venv/lib/python3.4/site-packages/bleach/tests/test_basics.py | Python | mit | 5,942 |
#!/usr/bin/env python
import os
import sys
import subprocess
import time
MAX_PROC_NUM = 20
JAIL_TIME = 92
GOODWORD = 'goodword'
BADWORD = 'ultrasurf'
OUTTER_WEBSITES = {
'yahoo.com': 'http://www.yahoo.com/',
'wikipedia.org': 'http://www.wikipedia.org/',
'amazon.com': 'http://www.amazon.com/',
'live.com': 'http://www.live.com/',
'vk.com': 'http://www.vk.com/',
'linkedin.com': 'http://www.linkedin.com/',
'yandex.ru': 'http://www.yandex.ru/',
'reddit.com': 'http://www.reddit.com/',
'ebay.com': 'http://www.ebay.com/',
'msn.com': 'http://www.msn.com/',
'stackoverflow.com': 'http://www.stackoverflow.com/',
'microsoft.com': 'http://www.microsoft.com/',
'mail.ru': 'http://www.mail.ru/',
'netflix.com': 'http://www.netflix.com/',
'paypal.com': 'http://www.paypal.com/',
'ok.ru': 'http://www.ok.ru/',
'imgur.com': 'http://www.imgur.com/',
'github.com': 'http://www.github.com/',
'imdb.com': 'http://www.imdb.com/',
'whatsapp.com': 'http://www.whatsapp.com/',
'office.com': 'http://www.office.com/',
'adobe.com': 'http://www.adobe.com/',
'craigslist': 'http://www.craigslist.org/',
'twitch.tv': 'http://www.twitch.tv/',
'quora.com': 'http://www.quora.com/',
'cnn.com': 'http://www.cnn.com/',
'rakuten.jp': 'http://search.rakuten.co.jp/',
'coccoc.com': 'http://coccoc.com/',
'ask.com': 'http://www.ask.com/',
'bbc.com': 'http://www.bbc.com/',
'salesforce.com': 'http://www.salesforce.com/',
'outbrain.com': 'http://www.outbrain.com/',
'booking.com': 'http://www.booking.com/',
'indiatimes.com': 'http://www.indiatimes.com/',
'diply.com': 'http://www.diply.com/',
'globo.com': 'http://www.globo.com/',
'uol.com.br': 'http://www.uol.com.br/',
'dailymail.co.uk': 'http://www.dailymail.co.uk/',
'ettoday.net': 'http://www.ettoday.net/',
'daum.net': 'http://www.daum.net/',
'indeed.com': 'http://www.indeed.com/',
'blastingnews.com': 'http://www.blastingnews.com/',
'savefrom.net': 'http://en.savefrom.net/',
'trello.com': 'http://trello.com/',
'uptodown.com': 'http://en.uptodown.com/',
'deviantart.com': 'http://www.deviantart.com/',
'tribunnews.com': 'http://www.tribunnews.com/',
'addthis.com': 'http://www.addthis.com/',
'theguardian.com': 'http://www.theguardian.com/',
'cnet.com': 'http://www.cnet.com/',
# 'hulu.com': 'http://www.hulu.com/',
# 'royalmail.com': 'http://www.royalmail.com/',
# 'nationwide.co.uk': 'http://www.nationwide.co.uk/',
# 'currys.co.uk': 'http://www.currys.co.uk/',
# 'livedoor.com': 'http://search.livedoor.com/',
# 'naver.jp': 'http://matome.naver.jp/',
# 'nonews.com': 'http://legacy.nownews.com/',
# 'cheers.com.tw': 'http://www.cheers.com.tw/',
# 'u-car.com.tw': 'http://www.u-car.com.tw/',
# 'gaana.com': 'http://gaana.com/',
# 'monster.com': 'http://www.monsterindia.com/',
# 'rambler.ru': 'http://nova.rambler.ru/',
# 'eldorado.ru': 'http://www.eldorado.ru/',
# 'shaw.ca': 'http://www.shaw.ca/',
# 'cic.gc.ca': 'http://www.cic.gc.ca/',
# 'sbs.com.au': 'http://www.sbs.com.au/',
# 'nla.gov.au': 'http://www.nla.gov.au/',
}
TARGETS = OUTTER_WEBSITES
KEYWORD = None
start_time = None
def start_tcpdump(sid):
print("Starting tcpdump...")
p = subprocess.Popen(["tcpdump", "-i", "any", "-w", "./results/pktdump.pcap.%d.%s" % (sid, start_time), "tcp port 80"])
return p
def stop_tcpdump(p):
print("Stopping tcpdump...")
os.system("kill %d" % p.pid)
def is_alldone(test_count, round_num):
for website in TARGETS:
if website not in test_count:
return False
if test_count[website] < round_num:
return False
return True
def is_jailed(jail_time, website):
if website not in jail_time:
return False
if time.time() - jail_time[website] > JAIL_TIME:
del jail_time[website]
return False
return True
def test_websites(sid, rounds):
global start_time
start_time = time.strftime("%Y%m%d%H%M%S")
p = start_tcpdump(sid)
time.sleep(2)
jail_time = {}
testing = {}
test_count = {}
i = 0
while not is_alldone(test_count, rounds):
print("[Round %d]" % (i+1))
for website, url in TARGETS.iteritems():
if website not in test_count:
test_count[website] = 0
if test_count[website] >= rounds:
# the website has been done
continue
while len(testing) >= MAX_PROC_NUM:
# clean working set
websites = testing.keys()
for website in websites:
ret = testing[website].poll()
if ret is not None:
if ret == 4:
# connect reset by peer (reset by GFW?)
jail_time[website] = time.time()
del testing[website]
time.sleep(0.5)
if is_jailed(jail_time, website):
# in jail, skip
print("%s in jail. %ds left. skip..." % (website, jail_time[website] + JAIL_TIME - time.time()))
else:
if testing.get(website):
# testing, skip
ret = testing[website].poll()
if ret is not None:
if ret == 4:
# connect reset by peer (reset by GFW?)
jail_time[website] = time.time()
del testing[website]
else:
print("Testing website %s..." % website)
pwget = subprocess.Popen("wget -4 -O /dev/null --tries=1 --timeout=5 --max-redirect 0 \"%s\"" % (url + KEYWORD), shell=True)
testing[website] = pwget
test_count[website] += 1
time.sleep(0.1)
i += 1
time.sleep(5)
stop_tcpdump(p)
os.system("./stop.sh")
time.sleep(0.5)
os.system("cp /var/log/intangd.log ./results/intangd.log.%d.%s" % (sid, start_time))
os.system("cp /usr/local/share/intangd/dump.rdb ./results/dump.rdb.%d.%s" % (sid, start_time))
os.system("cd results && tar zcf result.%d.%s.tar.gz pktdump.pcap.%d.%s intangd.log.%d.%s dump.rdb.%d.%s" % (sid, start_time, sid, start_time, sid, start_time, sid, start_time))
if __name__ == "__main__":
if os.geteuid() != 0:
print("Needs root privilege.")
sys.exit(0)
import os.path
if not os.path.isfile("intangd"):
print("Cannot find intangd. Please try \"make\".")
sys.exit(0)
if len(sys.argv) != 3:
print("Usage: %s <sid> <num of rounds>" % sys.argv[0])
sid = int(sys.argv[1])
rounds = int(sys.argv[2])
if sid == 0:
KEYWORD = GOODWORD
else:
KEYWORD = BADWORD
os.system("mkdir results")
os.system("chmod 777 results")
print("Stopping intang and deleting redis db.")
os.system("./stop.sh")
time.sleep(1)
os.system("rm /usr/local/share/intangd/dump.rdb")
print("Restarting intang.")
os.system("./run.sh %d" % sid)
time.sleep(1)
test_websites(sid, rounds)
| seclab-ucr/INTANG | src/test/test_succ_rate_cn.py | Python | gpl-3.0 | 7,248 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-23 13:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('ip_address', models.GenericIPAddressField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.Project')),
('voter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='voter')),
],
options={
'ordering': ('-created',),
},
),
migrations.AlterUniqueTogether(
name='vote',
unique_together=set([('project', 'voter')]),
),
]
| jfterpstra/bluebottle | bluebottle/votes/migrations/0001_initial.py | Python | bsd-3-clause | 1,352 |
#!/usr/bin/env python
import json
import os
import pytz
import requests
import threading
import time
from collections import OrderedDict
from datetime import datetime
class Error(Exception):
pass
class Auditor(object):
def __init__(self, hostname, port, secure=False, buffer_secs=None):
self.hostname = hostname
self.port = port
self.secure = secure
self.buffer_secs = buffer_secs
self.events = Events(self)
def _request(self, caller, handler, key=None, value=None):
headers = {'Content-type': 'application/json'}
kwargs = {
"headers": headers,
"timeout": 10,
}
if key and value:
kwargs["data"] = json.dumps({key: value})
response = caller(
"http://%s:%s%s" % (self.hostname, self.port, handler), **kwargs)
data = json.loads(response.text)
if data["type"] == "error":
raise Error(data["data"]["msg"])
return data["data"]
def _put(self, key, value, handler):
return self._request(requests.put, key, value, handler)
def _post(self, key, value, handler):
return self._request(requests.post, key, value, handler)
def _get(self, handler):
return self._request(requests.get, handler)
def alog(self, summary, tags="", user=None, level=1, close=True):
data = {
"summary": summary,
"user": get_user(user),
"level": level,
"start": pytz.UTC.localize(datetime.utcnow()),
}
if isinstance(tags, list):
tags = ", ".join(tags)
if tags: data["tags"] = tags
if close:
data["end"] = data["start"]
response = json.loads(requests.post("http://%s:%s/event/" % (self.hostname, self.port), data=data).text)
if response["type"] == "error":
raise Error(response["data"]["msg"])
# Don't return an Event at all when doing a simple
# summary log.
if close:
return
return Event(self, response["data"])
class EventCommiter(threading.Thread):
def __init__(self, event):
self.event = event
super(EventCommiter, self).__init__()
def run(self):
last_update = 0
while not self.event._closing:
now = time.time()
if (now - last_update) >= self.event._connection.buffer_secs:
self.event.commit()
last_update = time.time()
time.sleep(.2)
class Events(object):
def __init__(self, connection):
self._connection = connection
self.limit = 50
def __getitem__(self, val):
offset = 0
if not isinstance(val, int):
if val.start:
offset = val.start
limit = self.limit + offset
if val.stop:
limit = val.stop
else:
limit = val
events, total = self._get_events(offset, limit)
return events
def _get_events(self, offset, limit):
response = self._connection._get("/event/?offset=%s&limit=%s" % (offset, limit))
total = response["total"]
events = [Event(self._connection, event) for event in response["events"]]
return events, total
def __iter__(self):
events, total = self._get_events(0, self.limit)
for event in events:
yield event
# If this is True we need to start paginating.
if total > len(events):
for idx in range(1, (total / self.limit) + 1):
offset = idx * self.limit
events, _ = self._get_events(offset, offset + self.limit)
for event in events:
yield event
class Event(object):
def __init__(self, connection, payload):
self._connection = connection
self._update(payload)
self._closing = False
self._commiter = None
self._batched_details = {
"attribute": OrderedDict(),
"stream": OrderedDict(),
}
self._batched_details_lock = threading.RLock()
self.attrs = DetailsDescriptor(self, "attribute")
self.streams = DetailsDescriptor(self, "stream")
def _add_detail(self, details_type, name, value, mode="set"):
if self._commiter is None:
self._start_commiter()
with self._batched_details_lock:
detail = self._batched_details[details_type]
if name not in detail:
detail[name] = {
"details_type": details_type,
"name": name,
"value": [],
"mode": "append",
}
if mode == "set":
detail[name]["mode"] = "set"
detail[name]["value"] = [value]
elif mode == "append":
detail[name]["value"].append(value)
if not self._connection.buffer_secs:
self.commit()
def _start_commiter(self):
if self._connection.buffer_secs:
# This must be started last so that it has access to all
# of the attributes when it is started.
self._commiter = EventCommiter(self)
self._commiter.daemon = True
self._commiter.start()
@staticmethod
def _build_payload(values):
payload = []
for detail in values:
if detail["details_type"] == "stream":
payload.append({
"details_type": "stream",
"name": detail["name"],
"value": "".join(detail["value"]),
"mode": detail["mode"],
})
elif detail["details_type"] == "attribute":
for idx, val in enumerate(detail["value"]):
mode = "append"
if detail["mode"] == "set" and idx == 0:
mode = "set"
payload.append({
"details_type": "attribute",
"name": detail["name"],
"value": val,
"mode": mode,
})
return payload
def commit(self):
with self._batched_details_lock:
values = self._batched_details["attribute"].values()
values += self._batched_details["stream"].values()
if not len(values):
return
self._batched_details["attribute"] = OrderedDict()
self._batched_details["stream"] = OrderedDict()
self._connection._post("/event/%s/details/" % self.id,
"details", self._build_payload(values))
def _update(self, payload):
self.id = payload.get("id")
self.summary = payload.get("summary")
self.user = payload.get("user")
self.tags = payload.get("tags", "").split(", ")
self.start = payload.get("start")
self.end = payload.get("end")
def close(self):
self._closing = True
self._update(self._connection._put(
"/event/%s/" % self.id, "end", str(pytz.UTC.localize(datetime.utcnow()))
))
if self._commiter:
self._commiter.join()
self.commit()
class DetailsContainer(object):
""" Wraps a value for a particular detail."""
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.value = []
def set(self, elem):
self.value = [elem]
self.parent.event._add_detail(
self.parent.name,
self.name,
elem,
mode="set")
def append(self, elem):
self.value.append(elem)
self.parent.event._add_detail(
self.parent.name,
self.name,
elem,
mode="append")
class DetailsDescriptor(object):
""" Acts as a proxy between varios details and their values."""
def __init__(self, event, name):
self.event = event
self.name = name
self._values = {}
def __getattr__(self, name):
if name not in self._values:
self._values[name] = DetailsContainer(self, name)
return self._values[name]
def __getitem__(self, key):
return self.__getattr__(key)
def get_user(user=None):
if user is not None:
return user
if "SUDO_USER" in os.environ:
return "%s(%s)" % (os.environ["USER"], os.environ["SUDO_USER"])
return os.environ["USER"]
| gmjosack/pyauditor | pyauditor/__init__.py | Python | mit | 8,565 |
# -*- coding: utf-8 -*-
import subprocess # noqa: S404
import six
def launchctl(subcommand, *args):
"""
Call the launchctl binary and capture the output.
:param subcommand: string
"""
if not isinstance(subcommand, six.string_types):
raise ValueError("Argument is invalid: %r" % repr(subcommand))
if isinstance(subcommand, six.text_type):
subcommand = subcommand.encode("utf-8")
cmd = ["launchctl", subcommand]
for arg in args:
if isinstance(arg, six.string_types):
if isinstance(arg, six.text_type):
cmd.append(arg.encode("utf-8"))
else:
cmd.append(arg)
else:
raise ValueError("Argument is invalid: %r" % repr(arg))
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, shell=False) # noqa: S603
| infothrill/python-launchd | launchd/cmd.py | Python | mit | 860 |
from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from django_ajax.encoder import LazyJSONEncoder
import json
class LazyJSONEncoderMixinTestCase(TestCase):
def test_default_date(self):
data = {'datetime': datetime.today()}
self.assertEqual('{"datetime": "' + data['datetime'].isoformat() + '"}', json.dumps(data, cls=LazyJSONEncoder))
class BaseTestCase(TestCase):
def post(self, uri, data=None):
response = resp = self.client.get(uri, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEquals(200, resp.status_code)
self.assertEquals('application/json', response['Content-Type'])
if isinstance(response.content, str):
return response, json.loads(response.content)
else:
return response, json.loads(response.content.decode('utf-8'))
class FooTestCase(BaseTestCase):
def test_json_response(self):
resp, data = self.post('/ajax/foo')
self.assertEqual('OK', data['statusText'])
self.assertEqual({'foo': True}, data['content'])
class LoginRequiredTestCase(BaseTestCase):
def test_json_response(self):
resp, data = self.post('/ajax/login-required')
self.assertEquals(302, data['status'])
self.assertEqual('FOUND', data['statusText'])
class RenderTestCase(BaseTestCase):
def test_json_response(self):
resp, data = self.post('/ajax/render')
self.assertEquals(200, data['status'])
self.assertEqual('OK', data['statusText'])
self.assertEqual('<html>Hello</html>', data['content'].strip())
class RenderClassBasedViewTestCase(BaseTestCase):
def test_json_response(self):
resp, data = self.post('/ajax/render-class-based-view')
self.assertEquals(200, data['status'])
self.assertEqual('OK', data['statusText'])
self.assertEqual('<html>Hello</html>', data['content'].strip())
class ExceptionTestCase(BaseTestCase):
def test_json_response(self):
resp, data = self.post('/ajax/exception')
# self.assertEquals(200, data['status'])
self.assertEqual('INTERNAL SERVER ERROR', data['statusText'])
class RaiseExceptionTestCase(BaseTestCase):
def test_json_response(self):
resp, data = self.post('/ajax/raise-exception')
# self.assertEquals(200, data['status'])
self.assertEqual('NOT FOUND', data['statusText'])
| yceruto/django-ajax | tests/ajaxmiddleware/app/tests.py | Python | mit | 2,443 |
from django import forms
from api.models import Sighting
from api.models import Answer
from api.models import UserComment
from django.contrib.auth.models import User
class SightingForm(forms.ModelForm):
class Meta:
model = Sighting
fields = ('type', 'free_text', 'location', 'lat', 'lng')
class QuestionForm(forms.ModelForm):
class Meta:
model = Answer
fields = ('value',)
class CommentSightingForm(forms.ModelForm):
class Meta:
model = UserComment
fields = ('body',)
class SignupUserForm(forms.Form):
username = forms.CharField(
min_length=4,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control'}))
password = forms.CharField(
min_length=4,
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(
min_length=4,
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
photo = forms.ImageField(required=False)
def clean_username(self):
"""Comprueba que no exista un username igual en la db"""
username = self.cleaned_data['username']
if User.objects.filter(username=username):
raise forms.ValidationError('Nombre de usuario ya registrado.')
return username
def clean_email(self):
"""Comprueba que no exista un email igual en la db"""
email = self.cleaned_data['email']
if User.objects.filter(email=email):
raise forms.ValidationError('Ya existe un email igual en la db.')
return email
def clean_password2(self):
"""Comprueba que password y password2 sean iguales."""
password = self.cleaned_data['password']
password2 = self.cleaned_data['password2']
if password != password2:
raise forms.ValidationError('Las contraseñas no coinciden.')
return password2
class UserProfileForm(forms.Form):
username = forms.CharField(
min_length=4,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control'}))
def __init__(self, *args, **kwargs):
"""Obtener request"""
self.request = kwargs.pop('request')
return super().__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
# Comprobar si ha cambiado el email
current_email = self.request.user.email
if email != current_email:
# Si lo ha cambiado, comprobar que no exista en la db.
exists = User.objects.filter(email=email)
if exists:
raise forms.ValidationError('Ya existe un usuario con este email.')
return email
def clean_username(self):
username = self.cleaned_data['username']
# Comprobar si ha cambiado el username
current_username = self.request.user.username
if username != current_username:
# Si lo ha cambiado, comprobar que no exista en la db.
exists = User.objects.filter(username=username)
if exists:
raise forms.ValidationError('Ya existe un usuario con este nombre.')
return username
class PasswordProfileForm(forms.Form):
actual_password = forms.CharField(
label='Contraseña actual',
min_length=4,
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password = forms.CharField(
label='Nueva contraseña',
min_length=4,
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
password2 = forms.CharField(
label='Repetir contraseña',
min_length=4,
widget=forms.PasswordInput(attrs={'class': 'form-control'}))
def __init__(self, *args, **kwargs):
"""Obtener user"""
self.user = kwargs.pop('user')
return super().__init__(*args, **kwargs)
def clean_actual_password(self):
"""Comprueba que actual_password sean la correcta."""
actual_password = self.cleaned_data['actual_password']
if not self.user.check_password(actual_password):
raise forms.ValidationError('Contraseña inválida')
return actual_password
def clean_password2(self):
"""Comprueba que password y password2 sean iguales."""
password = self.cleaned_data['password']
password2 = self.cleaned_data['password2']
if password != password2:
raise forms.ValidationError('Las contraseñas no coinciden.')
return password2
class PhotoProfileForm(forms.Form):
photo = forms.ImageField(required=False)
class ContactForm(forms.Form):
nameContact = forms.CharField(
min_length=4,
widget=forms.TextInput(attrs={'class': 'form-control'}))
emailContact = forms.EmailField(
widget=forms.EmailInput(attrs={'class': 'form-control'}))
phoneContact = forms.CharField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control'}))
messageContact = forms.CharField(
min_length=4,
widget=forms.TextInput(attrs={'class': 'form-control'}))
| CarlosTenorio/vespapp-web | web/forms.py | Python | gpl-3.0 | 5,273 |
import sys
import datetime
import time
from PIL import Image
import matplotlib.pyplot as plt
f = Image.open("1.jpg")
plt.figure("1")
plt.imshow(f)
plt.show() | liumeixia/xiaworkspace | pythonProject/punchcard/card.py | Python | gpl-2.0 | 157 |
import torch.nn.functional as F
import torch
from torch.autograd import Variable
import numpy as np
from src.data_ops.wrapping import wrap
from src.admin.utils import see_tensors_in_memory
def loss(y_pred, y, y_mask, bm):
l = nll
return l(y_pred, y, y_mask, bm)
def kl(y_pred, y, y_mask):
n = y_pred.shape[1]
dists = wrap(torch.Tensor(distances(n)) ** (1/2.5)).view(-1, n, n)
logprobs = stable_log(y_pred)
lossfn = torch.nn.KLDivLoss(reduce=False)
l = lossfn(logprobs, y)
l = l * dists
l = reweight_loss(l, y)
l = l.masked_select(y_mask.byte())
l = l.mean()
return l
def nll(y_pred, y, y_mask, batch_mask):
n = y_pred.shape[1]
n_ = batch_mask.sum(1,keepdim=True)[:,:,0]
#x = F.sigmoid(distances(n) - n / 2)
dists = wrap(torch.Tensor(distances(n))).view(-1, n, n) * batch_mask
x = torch.exp(-(n_.unsqueeze(1) - dists - 1)*0.01)
#import ipdb; ipdb.set_trace()
dists = (x)
lossfn = torch.nn.NLLLoss(reduce=False)
logprobs = stable_log(torch.stack([1-y_pred, y_pred], 1))
l = (lossfn(logprobs, y.long()))
l = l * dists
l = reweight_loss(l, y)
l = l.masked_select(y_mask.byte())
l = l.mean()
return l
def cho_loss(y_pred, y, y_mask):
n = y_pred.shape[1]
dists = wrap(torch.Tensor(distances(n)) ** (1./2.5))
y_pred = y_pred.view(-1, n ** 2)
y = y.view(-1, n ** 2)
l = my_bce_loss(y_pred, y, reduce=False)
l = reweight_loss(l, y)
l = l * dists
l = l.masked_select(y_mask.view(-1, n**2).byte())
l = l.mean()
return l
def vanilla_bce_loss(y_pred, y, y_mask):
n = y_pred.shape[1]
l = my_bce_loss(y_pred, y, reduce=False).view(-1, n**2)
l = l.masked_select(y_mask.view(-1, n**2).byte())
l = l.mean()
return l
def distance_loss(y_pred, y, y_mask):
return ((y_pred - y).pow(2) * y_mask).mean()
def reweight_loss(l, y):
n_pos = y.sum(1, keepdim=True)
n_neg = (1 - y).sum(1, keepdim=True)
l_pos = y * l
l_neg = (1 - y) * l
l = (l_pos * n_neg + l_neg * n_pos) / (n_pos + n_neg)
return l
def reweight(tensor, idx, weight):
tensor[:,idx] = tensor[:,idx] * weight
return tensor
def distances(n):
indices = np.arange(n ** 2)
rows = indices // n
columns = indices % n
b_dists = abs(rows - columns)
return b_dists
def stable_log(x):
minvar = Variable(torch.Tensor([1e-20]))
if torch.cuda.is_available():
minvar = minvar.cuda()
x = torch.log(torch.max(x, minvar))
return x
def my_bce_loss(input, target, weight=None, reduce=True):
input = stable_log(input)
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()
if weight is not None:
loss = loss * weight
if reduce:
loss = loss.mean()
return loss
| isaachenrion/jets | src/proteins/loss.py | Python | bsd-3-clause | 3,024 |
import sys
from django.conf.urls import url
from django.http import HttpResponse
from django.views import View
from request_logging.decorators import no_logging
from rest_framework import viewsets, routers
# DRF 3.8.2 is used in python versions 3.4 and older, which needs special handling
IS_DRF_382 = sys.version_info <= (3, 4)
def general_resource(request):
return HttpResponse(status=200, body="Generic repsonse entity")
class TestView(View):
def get(self):
return HttpResponse(status=200)
@no_logging()
def post(self, request):
return HttpResponse(status=200)
@no_logging()
def view_func(request):
return HttpResponse(status=200, body="view_func with no logging")
@no_logging("Custom message")
def view_msg(request):
return HttpResponse(status=200, body="view_msg with no logging with a custom reason why")
@no_logging(silent=True)
def dont_log_silent(request):
return HttpResponse(status=200, body="view_msg with silent flag set")
@no_logging("Empty response body")
def dont_log_empty_response_body(request):
return HttpResponse(status=201)
class UnannotatedDRF(viewsets.ModelViewSet):
@no_logging("DRF explicit annotation")
def list(self, request):
return HttpResponse(status=200, body="DRF Unannotated")
@no_logging("Takes excessive amounts of time to log")
def partial_update(self, request, *args, **kwargs):
return HttpResponse(status=200, body="NO logging")
router = routers.SimpleRouter(trailing_slash=False)
if IS_DRF_382:
last_arguments = {"base_name": "widgets"}
else:
last_arguments = {"basename": "widgets"}
router.register(r"widgets", UnannotatedDRF, **last_arguments)
urlpatterns = [
url(r"^somewhere$", general_resource),
url(r"^test_class$", TestView.as_view()),
url(r"^test_func$", view_func),
url(r"^test_msg$", view_msg),
url(r"^dont_log_empty_response_body$", dont_log_empty_response_body),
url(r"^dont_log_silent$", dont_log_silent),
] + router.urls
| Rhumbix/django-request-logging | test_urls.py | Python | mit | 2,009 |
from __future__ import absolute_import
from six import string_types
from twisted.internet.defer import inlineCallbacks
from Tribler.Core.CacheDB.SqliteCacheDBHandler import TorrentDBHandler, MyPreferenceDBHandler
from Tribler.Core.CacheDB.sqlitecachedb import str2bin
from Tribler.Test.Core.test_sqlitecachedbhandler import AbstractDB
class TestMyPreferenceDBHandler(AbstractDB):
@inlineCallbacks
def setUp(self):
yield super(TestMyPreferenceDBHandler, self).setUp()
self.tdb = TorrentDBHandler(self.session)
self.mdb = MyPreferenceDBHandler(self.session)
self.mdb._torrent_db = self.tdb
def tearDown(self):
self.mdb.close()
self.mdb = None
self.tdb.close()
self.tdb = None
super(TestMyPreferenceDBHandler, self).tearDown()
def test_getPrefList(self):
pl = self.mdb.getMyPrefListInfohash()
self.assertEqual(len(pl), 12)
def test_addMyPreference_deletePreference(self):
p = self.mdb.getOne(('torrent_id', 'destination_path', 'creation_time'), torrent_id=126)
torrent_id = p[0]
infohash = self.tdb.getInfohash(torrent_id)
destpath = p[1]
creation_time = p[2]
self.mdb.deletePreference(torrent_id)
pl = self.mdb.getMyPrefListInfohash()
self.assertEqual(len(pl), 12)
self.assertIn(infohash, pl)
data = {'destination_path': destpath}
self.mdb.addMyPreference(torrent_id, data)
p2 = self.mdb.getOne(('torrent_id', 'destination_path', 'creation_time'), torrent_id=126)
self.assertTrue(p2[0] == p[0])
self.assertTrue(p2[1] == p[1])
self.mdb.deletePreference(torrent_id)
pl = self.mdb.getMyPrefListInfohash(returnDeleted=False)
self.assertEqual(len(pl), 11)
self.assertNotIn(infohash, pl)
data = {'destination_path': destpath, 'creation_time': creation_time}
self.mdb.addMyPreference(torrent_id, data)
p3 = self.mdb.getOne(('torrent_id', 'destination_path', 'creation_time'), torrent_id=126)
self.assertEqual(p3, p)
def test_getMyPrefListInfohash(self):
preflist = self.mdb.getMyPrefListInfohash()
for p in preflist:
self.assertTrue(not p or len(p) == 20)
self.assertEqual(len(preflist), 12)
def test_get_my_pref_stats(self):
res = self.mdb.getMyPrefStats()
self.assertEqual(len(res), 12)
for k in res:
data = res[k]
self.assertIsInstance(data, string_types, "data is not destination_path: %s" % type(data))
res = self.mdb.getMyPrefStats(torrent_id=126)
self.assertEqual(len(res), 1)
def test_my_pref_stats_infohash(self):
infohash = str2bin('AB8cTG7ZuPsyblbRE7CyxsrKUCg=')
self.assertIsNone(self.mdb.getMyPrefStatsInfohash(infohash))
infohash = str2bin('ByJho7yj9mWY1ORWgCZykLbU1Xc=')
self.assertTrue(self.mdb.getMyPrefStatsInfohash(infohash))
def test_get_my_pref_list_infohash_limit(self):
self.assertEqual(len(self.mdb.getMyPrefListInfohash(limit=10)), 10)
def test_add_my_preference(self):
self.assertTrue(self.mdb.addMyPreference(127, {'destination_path': 'C:/mytorrent'}))
self.assertTrue(self.mdb.addMyPreference(12345678, {'destination_path': 'C:/mytorrent'}))
self.assertFalse(self.mdb.addMyPreference(12345678, {'destination_path': 'C:/mytorrent'}))
def test_delete_my_preference(self):
self.mdb.deletePreference(126)
res = self.mdb.getMyPrefStats(126)
self.assertFalse(res[126])
self.mdb.deletePreference(12348934)
def test_update_dest_dir(self):
self.mdb.updateDestDir(126, 'C:/mydest')
res = self.mdb.getMyPrefStats(126)
self.assertEqual(res[126], 'C:/mydest')
self.mdb.updateDestDir(126, {})
self.assertEqual(res[126], 'C:/mydest')
| Captain-Coder/tribler | Tribler/Test/Core/test_sqlitecachedbhandler_preferences.py | Python | lgpl-3.0 | 3,907 |
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
package_list = find_packages()
except ImportError:
from distutils.core import setup
package_list = ['pyabad', 'pyabad.data_creation']
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
config = {
'description': 'Automatic Broadband NIRS Artefact Detection',
'long_description': readme,
'author': 'Joshua Russell-Buckland',
'url': 'https://github.com/buck06191/ABROAD',
'download_url': 'https://github.com/buck06191/ABROAD/archive/master.zip',
'author_email': '[email protected]',
'version': '0.1.0',
'license': license,
'install_requires': required,
'packages': package_list,
'scripts': [],
'name': 'ABROAD'
}
setup(**config)
| buck06191/ABROAD | setup.py | Python | mit | 913 |
from django.db.models import F, Q
from olympia.amo.management import ProcessObjectsCommand
from olympia.bandwagon.models import Collection
from olympia.translations.models import Translation
from olympia.translations.tasks import reclean_collection_descriptions
class Command(ProcessObjectsCommand):
def get_model(self):
return Translation
def get_tasks(self):
return {
'reclean_collection_descriptions': {
'task': reclean_collection_descriptions,
# Need to fetch ids of translations that belong to collection
# descriptions (there might be more than one per collection!)
# and then find those where the cleaned string is not the same
# as the original: those are the ones we need to re-clean.
'queryset_filters': [
Q(
id__in=Collection.objects.all()
.filter(description__isnull=False)
.values_list('description', flat=True)
),
Q(localized_string_clean__isnull=False),
~Q(localized_string_clean=F('localized_string')),
],
},
}
| bqbn/addons-server | src/olympia/translations/management/commands/process_translations.py | Python | bsd-3-clause | 1,251 |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.orm import defaultload, joinedload
from indico.core.db import db
from indico.core.db.sqlalchemy.util.models import get_simple_column_attrs
from indico.modules.events.cloning import EventCloner
from indico.modules.events.models.events import EventType
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.i18n import _
class TimetableCloner(EventCloner):
name = 'timetable'
friendly_name = _('Timetable')
requires = {'sessions', 'contributions'}
@property
def is_available(self):
return bool(self.old_event.timetable_entries.count())
@property
def is_default(self):
return self.old_event.type_ == EventType.meeting
@property
def is_visible(self):
return self.old_event.type_ in {EventType.meeting, EventType.conference}
def run(self, new_event, cloners, shared_data):
self._session_block_map = shared_data['sessions']['session_block_map']
self._contrib_map = shared_data['contributions']['contrib_map']
with db.session.no_autoflush:
self._clone_timetable(new_event)
db.session.flush()
def _clone_timetable(self, new_event):
offset = new_event.start_dt - self.old_event.start_dt
# no need to copy the type; it's set automatically based on the object
attrs = get_simple_column_attrs(TimetableEntry) - {'type', 'start_dt'}
break_strategy = defaultload('break_')
break_strategy.joinedload('own_venue')
break_strategy.joinedload('own_room').lazyload('*')
entry_key_order = db.case({
TimetableEntryType.SESSION_BLOCK: db.func.concat('s', TimetableEntry.id),
TimetableEntryType.CONTRIBUTION: db.func.concat('c', TimetableEntry.id),
TimetableEntryType.BREAK: db.func.concat('b', TimetableEntry.id),
}, value=TimetableEntry.type)
query = (self.old_event.timetable_entries
.options(joinedload('parent').lazyload('*'),
break_strategy)
.order_by(TimetableEntry.parent_id.is_(None).desc(), entry_key_order))
# iterate over all timetable entries; start with top-level
# ones so we can build a mapping that can be used once we
# reach nested entries
entry_map = {}
for old_entry in query:
entry = TimetableEntry()
entry.start_dt = old_entry.start_dt + offset
entry.populate_from_attrs(old_entry, attrs)
if old_entry.parent is not None:
entry.parent = entry_map[old_entry.parent]
if old_entry.session_block is not None:
entry.session_block = self._session_block_map[old_entry.session_block]
if old_entry.contribution is not None:
entry.contribution = self._contrib_map[old_entry.contribution]
if old_entry.break_ is not None:
entry.break_ = self._clone_break(old_entry.break_)
new_event.timetable_entries.append(entry)
entry_map[old_entry] = entry
def _clone_break(self, old_break):
attrs = get_simple_column_attrs(Break) | {'own_room', 'own_venue'}
break_ = Break()
break_.populate_from_attrs(old_break, attrs)
return break_
| mic4ael/indico | indico/modules/events/timetable/clone.py | Python | mit | 3,639 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from curl2share.config import log_file, log_level
loglevel = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARN,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET}
logger = logging.getLogger(__name__)
logger.setLevel(loglevel[log_level])
fh = logging.FileHandler(log_file)
fh.setLevel(loglevel[log_level])
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s \
- %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
| cuongnv23/curl2share | curl2share/__init__.py | Python | mit | 691 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.common.types import ad_asset
from google.ads.googleads.v9.enums.types import call_conversion_reporting_state
from google.ads.googleads.v9.enums.types import display_ad_format_setting
from google.ads.googleads.v9.enums.types import (
display_upload_product_type as gage_display_upload_product_type,
)
from google.ads.googleads.v9.enums.types import legacy_app_install_ad_app_store
from google.ads.googleads.v9.enums.types import mime_type as gage_mime_type
from google.ads.googleads.v9.enums.types import video_thumbnail
__protobuf__ = proto.module(
package="google.ads.googleads.v9.common",
marshal="google.ads.googleads.v9",
manifest={
"TextAdInfo",
"ExpandedTextAdInfo",
"ExpandedDynamicSearchAdInfo",
"HotelAdInfo",
"ShoppingSmartAdInfo",
"ShoppingProductAdInfo",
"ShoppingComparisonListingAdInfo",
"GmailAdInfo",
"GmailTeaser",
"DisplayCallToAction",
"ProductImage",
"ProductVideo",
"ImageAdInfo",
"VideoBumperInStreamAdInfo",
"VideoNonSkippableInStreamAdInfo",
"VideoTrueViewInStreamAdInfo",
"VideoOutstreamAdInfo",
"VideoTrueViewDiscoveryAdInfo",
"VideoAdInfo",
"VideoResponsiveAdInfo",
"ResponsiveSearchAdInfo",
"LegacyResponsiveDisplayAdInfo",
"AppAdInfo",
"AppEngagementAdInfo",
"AppPreRegistrationAdInfo",
"LegacyAppInstallAdInfo",
"ResponsiveDisplayAdInfo",
"LocalAdInfo",
"DisplayUploadAdInfo",
"ResponsiveDisplayAdControlSpec",
"SmartCampaignAdInfo",
"CallAdInfo",
},
)
class TextAdInfo(proto.Message):
r"""A text ad.
Attributes:
headline (str):
The headline of the ad.
This field is a member of `oneof`_ ``_headline``.
description1 (str):
The first line of the ad's description.
This field is a member of `oneof`_ ``_description1``.
description2 (str):
The second line of the ad's description.
This field is a member of `oneof`_ ``_description2``.
"""
headline = proto.Field(proto.STRING, number=4, optional=True,)
description1 = proto.Field(proto.STRING, number=5, optional=True,)
description2 = proto.Field(proto.STRING, number=6, optional=True,)
class ExpandedTextAdInfo(proto.Message):
r"""An expanded text ad.
Attributes:
headline_part1 (str):
The first part of the ad's headline.
This field is a member of `oneof`_ ``_headline_part1``.
headline_part2 (str):
The second part of the ad's headline.
This field is a member of `oneof`_ ``_headline_part2``.
headline_part3 (str):
The third part of the ad's headline.
This field is a member of `oneof`_ ``_headline_part3``.
description (str):
The description of the ad.
This field is a member of `oneof`_ ``_description``.
description2 (str):
The second description of the ad.
This field is a member of `oneof`_ ``_description2``.
path1 (str):
The text that can appear alongside the ad's
displayed URL.
This field is a member of `oneof`_ ``_path1``.
path2 (str):
Additional text that can appear alongside the
ad's displayed URL.
This field is a member of `oneof`_ ``_path2``.
"""
headline_part1 = proto.Field(proto.STRING, number=8, optional=True,)
headline_part2 = proto.Field(proto.STRING, number=9, optional=True,)
headline_part3 = proto.Field(proto.STRING, number=10, optional=True,)
description = proto.Field(proto.STRING, number=11, optional=True,)
description2 = proto.Field(proto.STRING, number=12, optional=True,)
path1 = proto.Field(proto.STRING, number=13, optional=True,)
path2 = proto.Field(proto.STRING, number=14, optional=True,)
class ExpandedDynamicSearchAdInfo(proto.Message):
r"""An expanded dynamic search ad.
Attributes:
description (str):
The description of the ad.
This field is a member of `oneof`_ ``_description``.
description2 (str):
The second description of the ad.
This field is a member of `oneof`_ ``_description2``.
"""
description = proto.Field(proto.STRING, number=3, optional=True,)
description2 = proto.Field(proto.STRING, number=4, optional=True,)
class HotelAdInfo(proto.Message):
r"""A hotel ad.
"""
class ShoppingSmartAdInfo(proto.Message):
r"""A Smart Shopping ad.
"""
class ShoppingProductAdInfo(proto.Message):
r"""A standard Shopping ad.
"""
class ShoppingComparisonListingAdInfo(proto.Message):
r"""A Shopping Comparison Listing ad.
Attributes:
headline (str):
Headline of the ad. This field is required.
Allowed length is between 25 and 45 characters.
This field is a member of `oneof`_ ``_headline``.
"""
headline = proto.Field(proto.STRING, number=2, optional=True,)
class GmailAdInfo(proto.Message):
r"""A Gmail ad.
Attributes:
teaser (google.ads.googleads.v9.common.types.GmailTeaser):
The Gmail teaser.
header_image (str):
The MediaFile resource name of the header
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 300x100 pixels and the
aspect ratio must be between 3:1 and 5:1 (+-1%).
This field is a member of `oneof`_ ``_header_image``.
marketing_image (str):
The MediaFile resource name of the marketing
image. Valid image types are GIF, JPEG and PNG.
The image must either be landscape with a
minimum size of 600x314 pixels and aspect ratio
of 600:314 (+-1%) or square with a minimum size
of 300x300 pixels and aspect ratio of 1:1 (+-1%)
This field is a member of `oneof`_ ``_marketing_image``.
marketing_image_headline (str):
Headline of the marketing image.
This field is a member of `oneof`_ ``_marketing_image_headline``.
marketing_image_description (str):
Description of the marketing image.
This field is a member of `oneof`_ ``_marketing_image_description``.
marketing_image_display_call_to_action (google.ads.googleads.v9.common.types.DisplayCallToAction):
Display-call-to-action of the marketing
image.
product_images (Sequence[google.ads.googleads.v9.common.types.ProductImage]):
Product images. Up to 15 images are
supported.
product_videos (Sequence[google.ads.googleads.v9.common.types.ProductVideo]):
Product videos. Up to 7 videos are supported.
At least one product video or a marketing image
must be specified.
"""
teaser = proto.Field(proto.MESSAGE, number=1, message="GmailTeaser",)
header_image = proto.Field(proto.STRING, number=10, optional=True,)
marketing_image = proto.Field(proto.STRING, number=11, optional=True,)
marketing_image_headline = proto.Field(
proto.STRING, number=12, optional=True,
)
marketing_image_description = proto.Field(
proto.STRING, number=13, optional=True,
)
marketing_image_display_call_to_action = proto.Field(
proto.MESSAGE, number=6, message="DisplayCallToAction",
)
product_images = proto.RepeatedField(
proto.MESSAGE, number=7, message="ProductImage",
)
product_videos = proto.RepeatedField(
proto.MESSAGE, number=8, message="ProductVideo",
)
class GmailTeaser(proto.Message):
r"""Gmail teaser data. The teaser is a small header that acts as
an invitation to view the rest of the ad (the body).
Attributes:
headline (str):
Headline of the teaser.
This field is a member of `oneof`_ ``_headline``.
description (str):
Description of the teaser.
This field is a member of `oneof`_ ``_description``.
business_name (str):
Business name of the advertiser.
This field is a member of `oneof`_ ``_business_name``.
logo_image (str):
The MediaFile resource name of the logo
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 144x144 pixels and the
aspect ratio must be 1:1 (+-1%).
This field is a member of `oneof`_ ``_logo_image``.
"""
headline = proto.Field(proto.STRING, number=5, optional=True,)
description = proto.Field(proto.STRING, number=6, optional=True,)
business_name = proto.Field(proto.STRING, number=7, optional=True,)
logo_image = proto.Field(proto.STRING, number=8, optional=True,)
class DisplayCallToAction(proto.Message):
r"""Data for display call to action. The call to action is a
piece of the ad that prompts the user to do something. Like
clicking a link or making a phone call.
Attributes:
text (str):
Text for the display-call-to-action.
This field is a member of `oneof`_ ``_text``.
text_color (str):
Text color for the display-call-to-action in
hexadecimal, e.g. #ffffff for white.
This field is a member of `oneof`_ ``_text_color``.
url_collection_id (str):
Identifies the URL collection in the ``ad.url_collections``
field. If not set, the URL defaults to ``final_url``.
This field is a member of `oneof`_ ``_url_collection_id``.
"""
text = proto.Field(proto.STRING, number=5, optional=True,)
text_color = proto.Field(proto.STRING, number=6, optional=True,)
url_collection_id = proto.Field(proto.STRING, number=7, optional=True,)
class ProductImage(proto.Message):
r"""Product image specific data.
Attributes:
product_image (str):
The MediaFile resource name of the product
image. Valid image types are GIF, JPEG and PNG.
The minimum size is 300x300 pixels and the
aspect ratio must be 1:1 (+-1%).
This field is a member of `oneof`_ ``_product_image``.
description (str):
Description of the product.
This field is a member of `oneof`_ ``_description``.
display_call_to_action (google.ads.googleads.v9.common.types.DisplayCallToAction):
Display-call-to-action of the product image.
"""
product_image = proto.Field(proto.STRING, number=4, optional=True,)
description = proto.Field(proto.STRING, number=5, optional=True,)
display_call_to_action = proto.Field(
proto.MESSAGE, number=3, message="DisplayCallToAction",
)
class ProductVideo(proto.Message):
r"""Product video specific data.
Attributes:
product_video (str):
The MediaFile resource name of a video which
must be hosted on YouTube.
This field is a member of `oneof`_ ``_product_video``.
"""
product_video = proto.Field(proto.STRING, number=2, optional=True,)
class ImageAdInfo(proto.Message):
r"""An image ad.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
pixel_width (int):
Width in pixels of the full size image.
This field is a member of `oneof`_ ``_pixel_width``.
pixel_height (int):
Height in pixels of the full size image.
This field is a member of `oneof`_ ``_pixel_height``.
image_url (str):
URL of the full size image.
This field is a member of `oneof`_ ``_image_url``.
preview_pixel_width (int):
Width in pixels of the preview size image.
This field is a member of `oneof`_ ``_preview_pixel_width``.
preview_pixel_height (int):
Height in pixels of the preview size image.
This field is a member of `oneof`_ ``_preview_pixel_height``.
preview_image_url (str):
URL of the preview size image.
This field is a member of `oneof`_ ``_preview_image_url``.
mime_type (google.ads.googleads.v9.enums.types.MimeTypeEnum.MimeType):
The mime type of the image.
name (str):
The name of the image. If the image was
created from a MediaFile, this is the
MediaFile's name. If the image was created from
bytes, this is empty.
This field is a member of `oneof`_ ``_name``.
media_file (str):
The MediaFile resource to use for the image.
This field is a member of `oneof`_ ``image``.
data (bytes):
Raw image data as bytes.
This field is a member of `oneof`_ ``image``.
ad_id_to_copy_image_from (int):
An ad ID to copy the image from.
This field is a member of `oneof`_ ``image``.
"""
pixel_width = proto.Field(proto.INT64, number=15, optional=True,)
pixel_height = proto.Field(proto.INT64, number=16, optional=True,)
image_url = proto.Field(proto.STRING, number=17, optional=True,)
preview_pixel_width = proto.Field(proto.INT64, number=18, optional=True,)
preview_pixel_height = proto.Field(proto.INT64, number=19, optional=True,)
preview_image_url = proto.Field(proto.STRING, number=20, optional=True,)
mime_type = proto.Field(
proto.ENUM, number=10, enum=gage_mime_type.MimeTypeEnum.MimeType,
)
name = proto.Field(proto.STRING, number=21, optional=True,)
media_file = proto.Field(proto.STRING, number=12, oneof="image",)
data = proto.Field(proto.BYTES, number=13, oneof="image",)
ad_id_to_copy_image_from = proto.Field(
proto.INT64, number=14, oneof="image",
)
class VideoBumperInStreamAdInfo(proto.Message):
r"""Representation of video bumper in-stream ad format (very
short in-stream non-skippable video ad).
Attributes:
companion_banner (google.ads.googleads.v9.common.types.AdImageAsset):
The image assets of the companion banner used
with the ad.
"""
companion_banner = proto.Field(
proto.MESSAGE, number=3, message=ad_asset.AdImageAsset,
)
class VideoNonSkippableInStreamAdInfo(proto.Message):
r"""Representation of video non-skippable in-stream ad format (15
second in-stream non-skippable video ad).
Attributes:
companion_banner (google.ads.googleads.v9.common.types.AdImageAsset):
The image assets of the companion banner used
with the ad.
action_button_label (str):
Label on the "Call To Action" button taking
the user to the video ad's final URL.
action_headline (str):
Additional text displayed with the "Call To
Action" button to give context and encourage
clicking on the button.
"""
companion_banner = proto.Field(
proto.MESSAGE, number=5, message=ad_asset.AdImageAsset,
)
action_button_label = proto.Field(proto.STRING, number=3,)
action_headline = proto.Field(proto.STRING, number=4,)
class VideoTrueViewInStreamAdInfo(proto.Message):
r"""Representation of video TrueView in-stream ad format (ad
shown during video playback, often at beginning, which displays
a skip button a few seconds into the video).
Attributes:
action_button_label (str):
Label on the CTA (call-to-action) button
taking the user to the video ad's final URL.
Required for TrueView for action campaigns,
optional otherwise.
action_headline (str):
Additional text displayed with the CTA (call-
o-action) button to give context and encourage
clicking on the button.
companion_banner (google.ads.googleads.v9.common.types.AdImageAsset):
The image assets of the companion banner used
with the ad.
"""
action_button_label = proto.Field(proto.STRING, number=4,)
action_headline = proto.Field(proto.STRING, number=5,)
companion_banner = proto.Field(
proto.MESSAGE, number=7, message=ad_asset.AdImageAsset,
)
class VideoOutstreamAdInfo(proto.Message):
r"""Representation of video out-stream ad format (ad shown
alongside a feed with automatic playback, without sound).
Attributes:
headline (str):
The headline of the ad.
description (str):
The description line.
"""
headline = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
class VideoTrueViewDiscoveryAdInfo(proto.Message):
r"""Representation of video TrueView discovery ad format.
Attributes:
headline (str):
The headline of the ad.
description1 (str):
First text line for a TrueView video
discovery ad.
description2 (str):
Second text line for a TrueView video
discovery ad.
thumbnail (google.ads.googleads.v9.enums.types.VideoThumbnailEnum.VideoThumbnail):
Video thumbnail image to use.
"""
headline = proto.Field(proto.STRING, number=4,)
description1 = proto.Field(proto.STRING, number=5,)
description2 = proto.Field(proto.STRING, number=6,)
thumbnail = proto.Field(
proto.ENUM,
number=7,
enum=video_thumbnail.VideoThumbnailEnum.VideoThumbnail,
)
class VideoAdInfo(proto.Message):
r"""A video ad.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
video (google.ads.googleads.v9.common.types.AdVideoAsset):
The YouTube video assets used for the ad.
in_stream (google.ads.googleads.v9.common.types.VideoTrueViewInStreamAdInfo):
Video TrueView in-stream ad format.
This field is a member of `oneof`_ ``format``.
bumper (google.ads.googleads.v9.common.types.VideoBumperInStreamAdInfo):
Video bumper in-stream ad format.
This field is a member of `oneof`_ ``format``.
out_stream (google.ads.googleads.v9.common.types.VideoOutstreamAdInfo):
Video out-stream ad format.
This field is a member of `oneof`_ ``format``.
non_skippable (google.ads.googleads.v9.common.types.VideoNonSkippableInStreamAdInfo):
Video non-skippable in-stream ad format.
This field is a member of `oneof`_ ``format``.
discovery (google.ads.googleads.v9.common.types.VideoTrueViewDiscoveryAdInfo):
Video TrueView discovery ad format.
This field is a member of `oneof`_ ``format``.
"""
video = proto.Field(proto.MESSAGE, number=8, message=ad_asset.AdVideoAsset,)
in_stream = proto.Field(
proto.MESSAGE,
number=2,
oneof="format",
message="VideoTrueViewInStreamAdInfo",
)
bumper = proto.Field(
proto.MESSAGE,
number=3,
oneof="format",
message="VideoBumperInStreamAdInfo",
)
out_stream = proto.Field(
proto.MESSAGE, number=4, oneof="format", message="VideoOutstreamAdInfo",
)
non_skippable = proto.Field(
proto.MESSAGE,
number=5,
oneof="format",
message="VideoNonSkippableInStreamAdInfo",
)
discovery = proto.Field(
proto.MESSAGE,
number=6,
oneof="format",
message="VideoTrueViewDiscoveryAdInfo",
)
class VideoResponsiveAdInfo(proto.Message):
r"""A video responsive ad.
Attributes:
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets used for the short
headline, e.g. the "Call To Action" banner.
Currently, only a single value for the short
headline is supported.
long_headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets used for the long
headline. Currently, only a single value for the
long headline is supported.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets used for the description.
Currently, only a single value for the
description is supported.
call_to_actions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets used for the button, e.g.
the "Call To Action" button. Currently, only a
single value for the button is supported.
videos (Sequence[google.ads.googleads.v9.common.types.AdVideoAsset]):
List of YouTube video assets used for the ad.
Currently, only a single value for the YouTube
video asset is supported.
companion_banners (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
List of image assets used for the companion
banner. Currently, only a single value for the
companion banner asset is supported.
"""
headlines = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
long_headlines = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=3, message=ad_asset.AdTextAsset,
)
call_to_actions = proto.RepeatedField(
proto.MESSAGE, number=4, message=ad_asset.AdTextAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE, number=5, message=ad_asset.AdVideoAsset,
)
companion_banners = proto.RepeatedField(
proto.MESSAGE, number=6, message=ad_asset.AdImageAsset,
)
class ResponsiveSearchAdInfo(proto.Message):
r"""A responsive search ad.
Responsive search ads let you create an ad that adapts to show
more text, and more relevant messages, to your customers. Enter
multiple headlines and descriptions when creating a responsive
search ad, and over time, Google Ads will automatically test
different combinations and learn which combinations perform
best. By adapting your ad's content to more closely match
potential customers' search terms, responsive search ads may
improve your campaign's performance.
More information at https://support.google.com/google-
ads/answer/7684791
Attributes:
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
path1 (str):
First part of text that can be appended to
the URL in the ad.
This field is a member of `oneof`_ ``_path1``.
path2 (str):
Second part of text that can be appended to the URL in the
ad. This field can only be set when ``path1`` is also set.
This field is a member of `oneof`_ ``_path2``.
"""
headlines = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
path1 = proto.Field(proto.STRING, number=5, optional=True,)
path2 = proto.Field(proto.STRING, number=6, optional=True,)
class LegacyResponsiveDisplayAdInfo(proto.Message):
r"""A legacy responsive display ad. Ads of this type are labeled
'Responsive ads' in the Google Ads UI.
Attributes:
short_headline (str):
The short version of the ad's headline.
This field is a member of `oneof`_ ``_short_headline``.
long_headline (str):
The long version of the ad's headline.
This field is a member of `oneof`_ ``_long_headline``.
description (str):
The description of the ad.
This field is a member of `oneof`_ ``_description``.
business_name (str):
The business name in the ad.
This field is a member of `oneof`_ ``_business_name``.
allow_flexible_color (bool):
Advertiser's consent to allow flexible color. When true, the
ad may be served with different color if necessary. When
false, the ad will be served with the specified colors or a
neutral color. The default value is ``true``. Must be true
if ``main_color`` and ``accent_color`` are not set.
This field is a member of `oneof`_ ``_allow_flexible_color``.
accent_color (str):
The accent color of the ad in hexadecimal, e.g. #ffffff for
white. If one of ``main_color`` and ``accent_color`` is set,
the other is required as well.
This field is a member of `oneof`_ ``_accent_color``.
main_color (str):
The main color of the ad in hexadecimal, e.g. #ffffff for
white. If one of ``main_color`` and ``accent_color`` is set,
the other is required as well.
This field is a member of `oneof`_ ``_main_color``.
call_to_action_text (str):
The call-to-action text for the ad.
This field is a member of `oneof`_ ``_call_to_action_text``.
logo_image (str):
The MediaFile resource name of the logo image
used in the ad.
This field is a member of `oneof`_ ``_logo_image``.
square_logo_image (str):
The MediaFile resource name of the square
logo image used in the ad.
This field is a member of `oneof`_ ``_square_logo_image``.
marketing_image (str):
The MediaFile resource name of the marketing
image used in the ad.
This field is a member of `oneof`_ ``_marketing_image``.
square_marketing_image (str):
The MediaFile resource name of the square
marketing image used in the ad.
This field is a member of `oneof`_ ``_square_marketing_image``.
format_setting (google.ads.googleads.v9.enums.types.DisplayAdFormatSettingEnum.DisplayAdFormatSetting):
Specifies which format the ad will be served in. Default is
ALL_FORMATS.
price_prefix (str):
Prefix before price. E.g. 'as low as'.
This field is a member of `oneof`_ ``_price_prefix``.
promo_text (str):
Promotion text used for dynamic formats of
responsive ads. For example 'Free two-day
shipping'.
This field is a member of `oneof`_ ``_promo_text``.
"""
short_headline = proto.Field(proto.STRING, number=16, optional=True,)
long_headline = proto.Field(proto.STRING, number=17, optional=True,)
description = proto.Field(proto.STRING, number=18, optional=True,)
business_name = proto.Field(proto.STRING, number=19, optional=True,)
allow_flexible_color = proto.Field(proto.BOOL, number=20, optional=True,)
accent_color = proto.Field(proto.STRING, number=21, optional=True,)
main_color = proto.Field(proto.STRING, number=22, optional=True,)
call_to_action_text = proto.Field(proto.STRING, number=23, optional=True,)
logo_image = proto.Field(proto.STRING, number=24, optional=True,)
square_logo_image = proto.Field(proto.STRING, number=25, optional=True,)
marketing_image = proto.Field(proto.STRING, number=26, optional=True,)
square_marketing_image = proto.Field(
proto.STRING, number=27, optional=True,
)
format_setting = proto.Field(
proto.ENUM,
number=13,
enum=display_ad_format_setting.DisplayAdFormatSettingEnum.DisplayAdFormatSetting,
)
price_prefix = proto.Field(proto.STRING, number=28, optional=True,)
promo_text = proto.Field(proto.STRING, number=29, optional=True,)
class AppAdInfo(proto.Message):
r"""An app ad.
Attributes:
mandatory_ad_text (google.ads.googleads.v9.common.types.AdTextAsset):
Mandatory ad text.
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
List of image assets that may be displayed
with the ad.
youtube_videos (Sequence[google.ads.googleads.v9.common.types.AdVideoAsset]):
List of YouTube video assets that may be
displayed with the ad.
html5_media_bundles (Sequence[google.ads.googleads.v9.common.types.AdMediaBundleAsset]):
List of media bundle assets that may be used
with the ad.
"""
mandatory_ad_text = proto.Field(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
headlines = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=3, message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE, number=4, message=ad_asset.AdImageAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE, number=5, message=ad_asset.AdVideoAsset,
)
html5_media_bundles = proto.RepeatedField(
proto.MESSAGE, number=6, message=ad_asset.AdMediaBundleAsset,
)
class AppEngagementAdInfo(proto.Message):
r"""App engagement ads allow you to write text encouraging a
specific action in the app, like checking in, making a purchase,
or booking a flight. They allow you to send users to a specific
part of your app where they can find what they're looking for
easier and faster.
Attributes:
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
List of image assets that may be displayed
with the ad.
videos (Sequence[google.ads.googleads.v9.common.types.AdVideoAsset]):
List of video assets that may be displayed
with the ad.
"""
headlines = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE, number=3, message=ad_asset.AdImageAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE, number=4, message=ad_asset.AdVideoAsset,
)
class AppPreRegistrationAdInfo(proto.Message):
r"""App pre-registration ads link to your app or game listing on
Google Play, and can run on Google Play, on YouTube (in-stream
only), and within other apps and mobile websites on the Display
Network. It will help capture people’s interest in your app or
game and generate an early install base for your app or game
before a launch.
Attributes:
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list.
images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
List of image asset IDs whose images may be
displayed with the ad.
youtube_videos (Sequence[google.ads.googleads.v9.common.types.AdVideoAsset]):
List of YouTube video asset IDs whose videos
may be displayed with the ad.
"""
headlines = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
images = proto.RepeatedField(
proto.MESSAGE, number=3, message=ad_asset.AdImageAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE, number=4, message=ad_asset.AdVideoAsset,
)
class LegacyAppInstallAdInfo(proto.Message):
r"""A legacy app install ad that only can be used by a few select
customers.
Attributes:
app_id (str):
The ID of the mobile app.
This field is a member of `oneof`_ ``_app_id``.
app_store (google.ads.googleads.v9.enums.types.LegacyAppInstallAdAppStoreEnum.LegacyAppInstallAdAppStore):
The app store the mobile app is available in.
headline (str):
The headline of the ad.
This field is a member of `oneof`_ ``_headline``.
description1 (str):
The first description line of the ad.
This field is a member of `oneof`_ ``_description1``.
description2 (str):
The second description line of the ad.
This field is a member of `oneof`_ ``_description2``.
"""
app_id = proto.Field(proto.STRING, number=6, optional=True,)
app_store = proto.Field(
proto.ENUM,
number=2,
enum=legacy_app_install_ad_app_store.LegacyAppInstallAdAppStoreEnum.LegacyAppInstallAdAppStore,
)
headline = proto.Field(proto.STRING, number=7, optional=True,)
description1 = proto.Field(proto.STRING, number=8, optional=True,)
description2 = proto.Field(proto.STRING, number=9, optional=True,)
class ResponsiveDisplayAdInfo(proto.Message):
r"""A responsive display ad.
Attributes:
marketing_images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
Marketing images to be used in the ad. Valid image types are
GIF, JPEG, and PNG. The minimum size is 600x314 and the
aspect ratio must be 1.91:1 (+-1%). At least one
``marketing_image`` is required. Combined with
``square_marketing_images``, the maximum is 15.
square_marketing_images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
Square marketing images to be used in the ad. Valid image
types are GIF, JPEG, and PNG. The minimum size is 300x300
and the aspect ratio must be 1:1 (+-1%). At least one square
``marketing_image`` is required. Combined with
``marketing_images``, the maximum is 15.
logo_images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
Logo images to be used in the ad. Valid image types are GIF,
JPEG, and PNG. The minimum size is 512x128 and the aspect
ratio must be 4:1 (+-1%). Combined with
``square_logo_images``, the maximum is 5.
square_logo_images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
Square logo images to be used in the ad. Valid image types
are GIF, JPEG, and PNG. The minimum size is 128x128 and the
aspect ratio must be 1:1 (+-1%). Combined with
``square_logo_images``, the maximum is 5.
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
Short format headlines for the ad. The
maximum length is 30 characters. At least 1 and
max 5 headlines can be specified.
long_headline (google.ads.googleads.v9.common.types.AdTextAsset):
A required long format headline. The maximum
length is 90 characters.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
Descriptive texts for the ad. The maximum
length is 90 characters. At least 1 and max 5
headlines can be specified.
youtube_videos (Sequence[google.ads.googleads.v9.common.types.AdVideoAsset]):
Optional YouTube videos for the ad. A maximum
of 5 videos can be specified.
business_name (str):
The advertiser/brand name. Maximum display
width is 25.
This field is a member of `oneof`_ ``_business_name``.
main_color (str):
The main color of the ad in hexadecimal, e.g. #ffffff for
white. If one of ``main_color`` and ``accent_color`` is set,
the other is required as well.
This field is a member of `oneof`_ ``_main_color``.
accent_color (str):
The accent color of the ad in hexadecimal, e.g. #ffffff for
white. If one of ``main_color`` and ``accent_color`` is set,
the other is required as well.
This field is a member of `oneof`_ ``_accent_color``.
allow_flexible_color (bool):
Advertiser's consent to allow flexible color. When true, the
ad may be served with different color if necessary. When
false, the ad will be served with the specified colors or a
neutral color. The default value is ``true``. Must be true
if ``main_color`` and ``accent_color`` are not set.
This field is a member of `oneof`_ ``_allow_flexible_color``.
call_to_action_text (str):
The call-to-action text for the ad. Maximum
display width is 30.
This field is a member of `oneof`_ ``_call_to_action_text``.
price_prefix (str):
Prefix before price. E.g. 'as low as'.
This field is a member of `oneof`_ ``_price_prefix``.
promo_text (str):
Promotion text used for dynamic formats of
responsive ads. For example 'Free two-day
shipping'.
This field is a member of `oneof`_ ``_promo_text``.
format_setting (google.ads.googleads.v9.enums.types.DisplayAdFormatSettingEnum.DisplayAdFormatSetting):
Specifies which format the ad will be served in. Default is
ALL_FORMATS.
control_spec (google.ads.googleads.v9.common.types.ResponsiveDisplayAdControlSpec):
Specification for various creative controls.
"""
marketing_images = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdImageAsset,
)
square_marketing_images = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdImageAsset,
)
logo_images = proto.RepeatedField(
proto.MESSAGE, number=3, message=ad_asset.AdImageAsset,
)
square_logo_images = proto.RepeatedField(
proto.MESSAGE, number=4, message=ad_asset.AdImageAsset,
)
headlines = proto.RepeatedField(
proto.MESSAGE, number=5, message=ad_asset.AdTextAsset,
)
long_headline = proto.Field(
proto.MESSAGE, number=6, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=7, message=ad_asset.AdTextAsset,
)
youtube_videos = proto.RepeatedField(
proto.MESSAGE, number=8, message=ad_asset.AdVideoAsset,
)
business_name = proto.Field(proto.STRING, number=17, optional=True,)
main_color = proto.Field(proto.STRING, number=18, optional=True,)
accent_color = proto.Field(proto.STRING, number=19, optional=True,)
allow_flexible_color = proto.Field(proto.BOOL, number=20, optional=True,)
call_to_action_text = proto.Field(proto.STRING, number=21, optional=True,)
price_prefix = proto.Field(proto.STRING, number=22, optional=True,)
promo_text = proto.Field(proto.STRING, number=23, optional=True,)
format_setting = proto.Field(
proto.ENUM,
number=16,
enum=display_ad_format_setting.DisplayAdFormatSettingEnum.DisplayAdFormatSetting,
)
control_spec = proto.Field(
proto.MESSAGE, number=24, message="ResponsiveDisplayAdControlSpec",
)
class LocalAdInfo(proto.Message):
r"""A local ad.
Attributes:
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list. At least 1 and at most 5 headlines
must be specified.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list. At least 1 and at most 5
descriptions must be specified.
call_to_actions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for call-to-actions. When
the ad serves the call-to-actions will be
selected from this list. Call-to-actions are
optional and at most 5 can be specified.
marketing_images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
List of marketing image assets that may be
displayed with the ad. The images must be
314x600 pixels or 320x320 pixels. At least 1 and
at most 20 image assets must be specified.
logo_images (Sequence[google.ads.googleads.v9.common.types.AdImageAsset]):
List of logo image assets that may be
displayed with the ad. The images must be
128x128 pixels and not larger than 120KB. At
least 1 and at most 5 image assets must be
specified.
videos (Sequence[google.ads.googleads.v9.common.types.AdVideoAsset]):
List of YouTube video assets that may be
displayed with the ad. Videos are optional and
at most 20 can be specified.
path1 (str):
First part of optional text that can be
appended to the URL in the ad.
This field is a member of `oneof`_ ``_path1``.
path2 (str):
Second part of optional text that can be appended to the URL
in the ad. This field can only be set when ``path1`` is also
set.
This field is a member of `oneof`_ ``_path2``.
"""
headlines = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
call_to_actions = proto.RepeatedField(
proto.MESSAGE, number=3, message=ad_asset.AdTextAsset,
)
marketing_images = proto.RepeatedField(
proto.MESSAGE, number=4, message=ad_asset.AdImageAsset,
)
logo_images = proto.RepeatedField(
proto.MESSAGE, number=5, message=ad_asset.AdImageAsset,
)
videos = proto.RepeatedField(
proto.MESSAGE, number=6, message=ad_asset.AdVideoAsset,
)
path1 = proto.Field(proto.STRING, number=9, optional=True,)
path2 = proto.Field(proto.STRING, number=10, optional=True,)
class DisplayUploadAdInfo(proto.Message):
r"""A generic type of display ad. The exact ad format is controlled by
the ``display_upload_product_type`` field, which determines what
kinds of data need to be included with the ad.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
display_upload_product_type (google.ads.googleads.v9.enums.types.DisplayUploadProductTypeEnum.DisplayUploadProductType):
The product type of this ad. See comments on
the enum for details.
media_bundle (google.ads.googleads.v9.common.types.AdMediaBundleAsset):
A media bundle asset to be used in the ad. For information
about the media bundle for HTML5_UPLOAD_AD, see
https://support.google.com/google-ads/answer/1722096 Media
bundles that are part of dynamic product types use a special
format that needs to be created through the Google Web
Designer. See
https://support.google.com/webdesigner/answer/7543898 for
more information.
This field is a member of `oneof`_ ``media_asset``.
"""
display_upload_product_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_display_upload_product_type.DisplayUploadProductTypeEnum.DisplayUploadProductType,
)
media_bundle = proto.Field(
proto.MESSAGE,
number=2,
oneof="media_asset",
message=ad_asset.AdMediaBundleAsset,
)
class ResponsiveDisplayAdControlSpec(proto.Message):
r"""Specification for various creative controls for a responsive
display ad.
Attributes:
enable_asset_enhancements (bool):
Whether the advertiser has opted into the
asset enhancements feature.
enable_autogen_video (bool):
Whether the advertiser has opted into auto-
en video feature.
"""
enable_asset_enhancements = proto.Field(proto.BOOL, number=1,)
enable_autogen_video = proto.Field(proto.BOOL, number=2,)
class SmartCampaignAdInfo(proto.Message):
r"""A Smart campaign ad.
Attributes:
headlines (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for headlines. When the
ad serves the headlines will be selected from
this list. 3 headlines must be specified.
descriptions (Sequence[google.ads.googleads.v9.common.types.AdTextAsset]):
List of text assets for descriptions. When
the ad serves the descriptions will be selected
from this list. 2 descriptions must be
specified.
"""
headlines = proto.RepeatedField(
proto.MESSAGE, number=1, message=ad_asset.AdTextAsset,
)
descriptions = proto.RepeatedField(
proto.MESSAGE, number=2, message=ad_asset.AdTextAsset,
)
class CallAdInfo(proto.Message):
r"""A call ad.
Attributes:
country_code (str):
The country code in the ad.
phone_number (str):
The phone number in the ad.
business_name (str):
The business name in the ad.
headline1 (str):
First headline in the ad.
headline2 (str):
Second headline in the ad.
description1 (str):
The first line of the ad's description.
description2 (str):
The second line of the ad's description.
call_tracked (bool):
Whether to enable call tracking for the
creative. Enabling call tracking also enables
call conversions.
disable_call_conversion (bool):
Whether to disable call conversion for the creative. If set
to ``true``, disables call conversions even when
``call_tracked`` is ``true``. If ``call_tracked`` is
``false``, this field is ignored.
phone_number_verification_url (str):
The URL to be used for phone number
verification.
conversion_action (str):
The conversion action to attribute a call conversion to. If
not set a default conversion action is used. This field only
has effect if ``call_tracked`` is set to ``true``. Otherwise
this field is ignored.
conversion_reporting_state (google.ads.googleads.v9.enums.types.CallConversionReportingStateEnum.CallConversionReportingState):
The call conversion behavior of this call ad.
It can use its own call conversion setting,
inherit the account level setting, or be
disabled.
path1 (str):
First part of text that can be appended to
the URL in the ad. Optional.
path2 (str):
Second part of text that can be appended to the URL in the
ad. This field can only be set when ``path1`` is also set.
Optional.
"""
country_code = proto.Field(proto.STRING, number=1,)
phone_number = proto.Field(proto.STRING, number=2,)
business_name = proto.Field(proto.STRING, number=3,)
headline1 = proto.Field(proto.STRING, number=11,)
headline2 = proto.Field(proto.STRING, number=12,)
description1 = proto.Field(proto.STRING, number=4,)
description2 = proto.Field(proto.STRING, number=5,)
call_tracked = proto.Field(proto.BOOL, number=6,)
disable_call_conversion = proto.Field(proto.BOOL, number=7,)
phone_number_verification_url = proto.Field(proto.STRING, number=8,)
conversion_action = proto.Field(proto.STRING, number=9,)
conversion_reporting_state = proto.Field(
proto.ENUM,
number=10,
enum=call_conversion_reporting_state.CallConversionReportingStateEnum.CallConversionReportingState,
)
path1 = proto.Field(proto.STRING, number=13,)
path2 = proto.Field(proto.STRING, number=14,)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v9/common/types/ad_type_infos.py | Python | apache-2.0 | 51,120 |
import unittest
import numpy as np
np.random.seed(42)
import scipy
from scipy.stats import norm
import rampy as rp
class TestML(unittest.TestCase):
def test_mlregressor(self):
x = np.arange(0,600,1.0)
nb_samples = 100 # number of samples in our dataset
# true partial spectra
S_1 = norm.pdf(x,loc=200.,scale=130.)
S_2 = norm.pdf(x,loc=400,scale=70)
S_true = np.vstack((S_1,S_2))
#60 samples with random concentrations between 0 and 1
C_ = np.random.rand(nb_samples)
C_true = np.vstack((C_,(1-C_))).T
# We make some observations with random noise
Obs = np.dot(C_true,S_true) + np.random.randn(nb_samples,len(x))*1e-4
# new observations
C_new_ = np.random.rand(10) #10 samples with random concentrations between 0 and 1
C_new_true = np.vstack((C_new_,(1-C_new_))).T
noise_new = np.random.randn(len(x))*1e-4
Obs_new = np.dot(C_new_true,S_true) + noise_new
model = rp.mlregressor(Obs,C_true[:,0].reshape(-1,1))
for i in ["KernelRidge", "SVM", "LinearRegression", "NeuralNet", "BaggingNeuralNet"]:
# we do not test on Lasso and ElasticNet as this raises lots of warning due to convergence issues...
model.algorithm = i
model.user_kernel = 'poly'
model.fit()
C_new_predicted = model.predict(Obs_new)
# testing if refit works
model.refit()
if __name__ == '__main__':
unittest.main()
| charlesll/RamPy | rampy/tests/test_mlregressor.py | Python | gpl-2.0 | 1,520 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Unpack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(arr, axis=(axis,))
for arr in np.split(array, axis_len, axis=axis)
]
class UnpackOpTest(tf.test.TestCase):
def testSimple(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
# Convert data to a single tensorflow tensor
x = tf.constant(data)
# Unpack into a list of tensors
cs = tf.unpack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [c.eval() for c in cs]
self.assertAllEqual(cs, data)
def testGradientsAxis0(self):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.test_session(use_gpu=True):
x = tf.constant(data)
cs = tf.unpack(x, num=shape[0])
err = tf.test.compute_gradient_error(x, shape, cs[i], shapes[i])
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
out_shape = list(shape)
del out_shape[1]
for i in xrange(shape[1]):
with self.test_session(use_gpu=True):
x = tf.constant(data)
cs = tf.unpack(x, num=shape[1], axis=1)
err = tf.test.compute_gradient_error(x, shape, cs[i], out_shape)
self.assertLess(err, 1e-6)
def testInferNum(self):
with self.test_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = tf.placeholder(np.float32, shape=shape)
cs = tf.unpack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
def testCannotInferNumFromUnknownShape(self):
x = tf.placeholder(np.float32)
with self.assertRaisesRegexp(
ValueError, r'Cannot infer num from shape <unknown>'):
tf.unpack(x)
def testUnknownShapeOkWithNum(self):
x = tf.placeholder(np.float32)
tf.unpack(x, num=2)
def testCannotInferNumFromNoneShape(self):
x = tf.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \(\?,\)'):
tf.unpack(x)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
a = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
expected = np_split_squeeze(a, j)
with self.test_session() as sess:
actual = sess.run(tf.unpack(a, axis=j))
self.assertAllEqual(expected, actual)
def testAxis0Default(self):
with self.test_session() as sess:
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
unpacked = sess.run(tf.unpack(a))
self.assertEqual(len(unpacked), 2)
self.assertAllEqual(unpacked[0], [1, 2, 3])
self.assertAllEqual(unpacked[1], [4, 5, 6])
def testAxisOutOfRange(self):
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
tf.unpack(a, axis=2)
def testAxisOutOfNegativeRange(self):
a = tf.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
tf.unpack(a, axis=-3)
def testZeroLengthDim(self):
with self.test_session():
x = tf.zeros(shape=(0, 1, 2))
y = tf.unpack(x, axis=1)[0].eval()
self.assertEqual(y.shape, (0, 2))
if __name__ == '__main__':
tf.test.main()
| cg31/tensorflow | tensorflow/python/kernel_tests/unpack_op_test.py | Python | apache-2.0 | 4,695 |
#!/usr/bin/env python
from pathlib import Path
import logging
import numpy as np
from scipy.interpolate import interp1d
import h5py
import xarray
# consider atmosphere
try:
import lowtran
except ImportError as e:
logging.error(f"failure to load LOWTRAN, proceeding without atmospheric absorption model. {e}")
lowtran = None
"""
gets optical System Transmittance from filter, sensor window, and QE spec.
Michael Hirsch 2014
references:
BG3 filter datasheet: http://www.howardglass.com/pdf/bg3_datasheet.pdf
QE: http://www.andor.com/pdfs/specifications/Andor_iXon_Ultra_897_Specifications.pdf
http://occult.mit.edu/instrumentation/MORIS/Documents/DU-897_BI.pdf
window: http://www.andor.com/pdfs/specifications/Andor_Camera_Windows_Supplementary_Specifications.pdf
"""
def getSystemT(newLambda, bg3fn: Path, windfn: Path, qefn: Path, obsalt_km, zenang_deg, verbose: bool = False) -> xarray.Dataset:
bg3fn = Path(bg3fn).expanduser()
windfn = Path(windfn).expanduser()
qefn = Path(qefn).expanduser()
newLambda = np.asarray(newLambda)
# %% atmospheric absorption
if lowtran is not None:
c1 = {
"model": 5,
"h1": obsalt_km,
"angle": zenang_deg,
"wlshort": newLambda[0],
"wllong": newLambda[-1],
}
if verbose:
print("loading LOWTRAN7 atmosphere model...")
atmT = lowtran.transmittance(c1)["transmission"].squeeze()
try:
atmTcleaned = atmT.values.squeeze()
atmTcleaned[atmTcleaned == 0] = np.spacing(1) # to avoid log10(0)
fwl = interp1d(atmT.wavelength_nm, np.log(atmTcleaned), axis=0)
except AttributeError: # problem with lowtran
fwl = interp1d(newLambda, np.log(np.ones_like(newLambda)), kind="linear")
else:
fwl = interp1d(newLambda, np.log(np.ones_like(newLambda)), kind="linear")
atmTinterp = np.exp(fwl(newLambda))
if not np.isfinite(atmTinterp).all():
logging.error("problem in computing LOWTRAN atmospheric attenuation, results are suspect!")
# %% BG3 filter
with h5py.File(bg3fn, "r") as f:
try:
assert isinstance(f["/T"], h5py.Dataset), "we only allow one transmission curve per file" # simple legacy behavior
fbg3 = interp1d(f["/wavelength"], np.log(f["/T"]), kind="linear", bounds_error=False)
except KeyError:
raise KeyError("could not find /wavelength in {}".format(f.filename))
try:
fname = f["T"].attrs["name"].item()
if isinstance(fname, bytes):
fname = fname.decode("utf8")
except KeyError:
fname = ""
# %% camera window
with h5py.File(windfn, "r") as f:
fwind = interp1d(f["/lamb"], np.log(f["/T"]), kind="linear")
# %% quantum efficiency
with h5py.File(qefn, "r") as f:
fqe = interp1d(f["/lamb"], np.log(f["/QE"]), kind="linear")
# %% collect results into DataArray
T = xarray.Dataset(
{
"filter": ("wavelength_nm", np.exp(fbg3(newLambda))),
"window": ("wavelength_nm", np.exp(fwind(newLambda))),
"qe": ("wavelength_nm", np.exp(fqe(newLambda))),
"atm": ("wavelength_nm", atmTinterp),
},
coords={"wavelength_nm": newLambda},
attrs={"filename": fname},
)
T["sysNObg3"] = T["window"] * T["qe"] * T["atm"]
T["sys"] = T["sysNObg3"] * T["filter"]
return T
| scienceopen/gridaurora | gridaurora/filterload.py | Python | gpl-3.0 | 3,478 |
import re, urllib2, cookielib, time, xbmcgui, socket, xbmc, os
from urllib2 import Request, URLError, urlopen as urlopen2
from urlparse import parse_qs
from urllib import quote, urlencode
from urllib import quote, unquote_plus, unquote, urlencode
from httplib import HTTPConnection, CannotSendRequest, BadStatusLine, HTTPException
from socket import gaierror, error
from t0mm0.common.net import Net
from jsunpacker import cJsUnpacker
COOKIEFILE = xbmc.translatePath( 'special://temp/dabdate_cookie.lwp' )
cj = cookielib.LWPCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
if os.path.isfile(COOKIEFILE):
cj.load(COOKIEFILE)
xbmc.log( "Cookie is loaded", xbmc.LOGINFO )
xbmc.log( "Cookie is set, " + COOKIEFILE, xbmc.LOGINFO )
hosterlist = [
('youtube', '.*www\.youtube\.com'),
('putlocker', '.*www\.putlocker\.com/(?:file|embed)/'),
('sockshare', '.*www\.sockshare\.com/(?:file|embed)/'),
('videoslasher', '.*www\.videoslasher\.com/embed/'),
('faststream', '.*faststream\.in'),
('flashx', '.*flashx\.tv'),
('vk', '.*vk\.(me|com)/'),
('streamcloud', '.*streamcloud\.eu'),
('vidstream', '.*vidstream\.in'),
('xvidstage', '.*xvidstage\.com'),
('nowvideo', '.*nowvideo\.(?:eu|sx)'),
('movshare', '.*movshare\.net'),
('divxstage', '.*(?:embed\.divxstage\.eu|divxstage\.eu/video)'),
('videoweed', '.*videoweed\.es'),
('novamov', '.*novamov\.com'),
('primeshare', '.*primeshare'),
('videomega', '.*videomega\.tv'),
('bitshare', '.*bitshare\.com'),
('movreel', '.*movreel\.com'),
('uploadc', '.*uploadc\.com'),
('youwatch', '.*youwatch\.org'),
('yandex', '.*yandex\.ru'),
# ('K1no HD', '.*[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'),
('sharedsx', '.*shared\.sx'),
('vivosx', '.*vivo\.sx'),
('cloudyvideos', '.*cloudyvideos\.com'),
('vidx', '.*vidx\.to')]
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.6) Gecko/20100627 Firefox/3.6.6',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
}
class get_stream_link:
def __init__(self):
#self._callback = None
self.net = Net()
def get_stream(self, link):
hoster = self.get_hostername(link)
if hoster == 'putlocker': return self.streamPutlockerSockshare(link, 'putlocker')
elif hoster == 'sockshare': return self.streamPutlockerSockshare(link, 'sockshare')
elif hoster == 'youtube': return self.youtube(link)
elif hoster == 'videoslasher': return self.videoslaher(link)
elif hoster == 'faststream': return self.generic1(link, 'Faststream', 10, 0)
elif hoster == 'flashx': return self.flashx(link)
elif hoster == 'vk': return self.vk(link)
elif hoster == 'streamcloud': return self.streamcloud(link)
elif hoster == 'vidstream': return self.vidstream(link)
elif hoster == 'xvidstage': return self.xvidstage(link)
elif hoster == 'videoweed': return self.videoweed(link)
elif hoster == 'nowvideo': return self.generic2(link)
elif hoster == 'movshare': return self.generic2(link)
elif hoster == 'divxstage': return self.generic2(link)
elif hoster == 'novamov': return self.generic2(link)
elif hoster == 'primeshare': return self.primeshare(link)
elif hoster == 'videomega': return self.videomega(link)
elif hoster == 'bitshare': return self.bitshare(link)
elif hoster == 'movreel': return self.movreel(link)
elif hoster == 'uploadc': return self.uploadc(link)
elif hoster == 'youwatch': return self.youwatch(link)
elif hoster == 'yandex': return self.generic1(link, 'Yandex', 0, 0)
elif hoster == 'vidx': return self.generic1(link, 'ViDX', 10, 0)
elif hoster == 'K1no HD': return link
elif hoster == 'sharedsx': return self.generic1(link, 'Shared.sx', 0, 1)
elif hoster == 'vivosx': return self.generic1(link, 'Vivo.sx', 0, 1)
elif hoster == 'cloudyvideos': return self.generic1(link, 'CloudyVideos', 2, 2)
return 'Not Supported'
def getUrl(self, url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
def get_adfly_link(self, adflink):
print 'resolving adfly url: \'%s\' using http://dead.comuv.com/bypasser/process.php' % (adflink)
data = self.net.http_POST('http://dead.comuv.com/bypasser/process.php', {'url':adflink}, {'Referer':'http://dead.comuv.com/', 'X-Requested-With':'XMLHttpRequest'}).content
link = re.findall('<a[^>]*href="([^"]*)"', data, re.S|re.I|re.DOTALL)
if link: return link[0]
else: return 'empty'
def get_adfly_link_2(self, adflink):
print 'resolving adfly url: \'%s\' using http://cyberflux.info/shortlink.php' % (adflink)
data = self.net.http_POST('http://cyberflux.info/shortlink.php', {'urllist':adflink}, {'Referer':'http://cyberflux.info/shortlink.php'}).content
link = re.findall(adflink + '[ ]*=[ ]*<a[^>]*href=([^>]*)>', data, re.S|re.I|re.DOTALL)
if link: return link[0]
else: return 'empty'
def waitmsg(self, sec, msg):
isec = int(sec)
if isec > 0:
dialog = xbmcgui.DialogProgress()
dialog.create('Resolving', '%s Link.. Wait %s sec.' % (msg, sec))
dialog.update(0)
c = 100 / isec
i = 1
p = 0
while i < isec+1:
p += int(c)
time.sleep(1)
dialog.update(int(p))
i += 1
dialog.close()
def get_hostername(self, link):
if link:
for (hoster, urlrex) in hosterlist:
if re.match(urlrex, link, re.S|re.I): return hoster
return 'Not Supported'
def get_stream_url(self, sUnpacked):
if not sUnpacked: return
stream_url = re.findall('type="video/divx"src="(.*?)"', sUnpacked, re.S|re.I|re.DOTALL)
if not stream_url: stream_url = re.findall("file','(.*?)'", sUnpacked, re.S|re.I|re.DOTALL)
if not stream_url: stream_url = re.findall('file:"(.*?)"', sUnpacked, re.S|re.I|re.DOTALL)
if stream_url: return stream_url[0]
def youtube(self, url):
print url
match = re.compile('youtube.com/embed/([^\?]+)', re.DOTALL).findall(url)
if match:
youtubeID = match[0]
if xbmc.getCondVisibility("System.Platform.xbox") == True:
video_url = "plugin://video/YouTube/?path=/root/video&action=play_video&videoid=" + youtubeID
else:
video_url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=" + youtubeID
return video_url
def uploadc(self, url):
data = self.net.http_GET(url).content
ipcount_val = re.findall('<input type="hidden" name="ipcount_val".*?value="(.*?)">', data)
id = re.findall('<input type="hidden" name="id".*?value="(.*?)">', data)
fname = re.findall('<input type="hidden" name="fname".*?alue="(.*?)">', data)
if id and fname and ipcount_val:
info = {'ipcount_val' : ipcount_val[0], 'op' : 'download2', 'usr_login' : '', 'id' : id[0], 'fname' : fname[0], 'method_free' : 'Slow access'}
data2 = self.net.http_POST(url, info).content
stream_url = self.get_stream_url(data2)
if not stream_url:
get_packedjava = re.findall("(\(p,a,c,k,e,d.*?)</script>", data2, re.S|re.DOTALL)
if get_packedjava:
sUnpacked = cJsUnpacker().unpackByString(get_packedjava[0])
stream_url = self.get_stream_url(sUnpacked)
if stream_url: return stream_url
else: return 'Error: Konnte Datei nicht extrahieren'
def youwatch(self, url):
print url
resp = self.net.http_GET(url)
data = resp.content
for frm in re.findall('<form[^>]*method="POST"[^>]*action=\'\'[^>]*>(.*?)</form>', data, re.S|re.I):
info = {}
for i in re.finditer('<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"', frm): info[i.group(1)] = i.group(2)
if len(info) == 0: return 'Error: konnte Logindaten nicht extrahieren'
info['referer'] = resp.get_url()
self.waitmsg(int(10), 'Youwatch')
data = self.net.http_POST(resp.get_url(), info).content
get_packedjava = re.findall("(\(p,a,c,k,e,d.*?)</script>", data, re.S|re.I)
if get_packedjava:
sJavascript = get_packedjava[0]
sUnpacked = cJsUnpacker().unpackByString(sJavascript)
if sUnpacked:
stream_url = re.findall('file:"([^"]*(?:mkv|mp4|avi|mov|flv|mpg|mpeg))"', sUnpacked)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def movreel(self, url):
data = self.net.http_GET(url).content
id = re.findall('<input type="hidden" name="id".*?value="(.*?)">', data)
fname = re.findall('<input type="hidden" name="fname".*?value="(.*?)">', data)
if id and fname:
info = {'op': 'download1', 'usr_login': '', 'id': id[0], 'fname': fname[0], 'referer': '', 'method_free': ' Kostenloser Download'}
data2 = self.net.http_POST(url, info).content
id = re.findall('<input type="hidden" name="id".*?value="(.*?)">', data2)
rand = re.findall('<input type="hidden" name="rand".*?value="(.*?)">', data2)
if id and rand:
info2 = {'op': 'download2', 'usr_login': '', 'id': id[0], 'rand': rand[0], 'referer': '', 'method_free': ' Kostenloser Download'}
data = self.net.http_POST(url, info2).content
stream_url = re.findall("var file_link = '(.*?)'", data, re.S)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def bitshare(self, url):
data = self.net.http_GET(url).content
if re.match('.*?(Ihr Traffic.*?Heute ist verbraucht|Your Traffic is used up for today)', data, re.S|re.I): return 'Error: Ihr heutiger Traffic ist aufgebraucht'
elif re.match(".*?The file owner decided to limit file.*?access", data, re.S|re.I): return 'Error: Nutzer hat Dateizugriff limitiert'
elif re.match(".*?Sorry aber sie.*?nicht mehr als 1 Dateien gleichzeitig herunterladen", data, re.S|re.I): return 'Error: Mehr als 1 Datei gleichzeitig ist nicht erlaubt'
else:
stream_url = re.findall("url: '(http://.*?.bitshare.com/stream/.*?.avi)'", data)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def videomega(self, url):
if not re.match('.*?iframe.php', url):
id = url.split('ref=')
if id: url = "http://videomega.tv/iframe.php?ref=%s" % id[1]
data = self.net.http_GET(url).content
unescape = re.findall('unescape."(.*?)"', data, re.S)
if unescape:
javadata = urllib2.unquote(unescape[0])
if javadata:
stream_url = re.findall('file: "(.*?)"', javadata, re.S)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def primeshare(self, url):
data = self.getUrl(url)
hash = re.findall('<input type="hidden".*?name="hash".*?value="(.*?)"', data)
if hash:
info = {'hash': hash[0]}
self.waitmsg(16, "Primeshare")
data = self.net.http_POST(url, info).content
stream_url = re.findall('url: \'(.*?)\'', data, re.S)
if stream_url: return stream_url[2]
else: return 'Error: Konnte Datei nicht extrahieren'
def videoweed(self, url):
data = self.net.http_GET(url).content
r = re.search('flashvars.domain="(.+?)".*flashvars.file="(.+?)".*' + 'flashvars.filekey="(.+?)"', data, re.DOTALL)
if r:
domain, fileid, filekey = r.groups()
api_call = ('%s/api/player.api.php?user=undefined&codes=1&file=%s' + '&pass=undefined&key=%s') % (domain, fileid, filekey)
if api_call:
data = self.net.http_GET(api_call).content
rapi = re.search('url=(.+?)&title=', data)
if rapi:
stream_url = rapi.group(1)
if stream_url: return stream_url
else: return 'Error: Konnte Datei nicht extrahieren'
def vk(self, url):
data = self.net.http_GET(url).content
vars = re.findall('<param[^>]*name="flashvars"[^>]*value="([^"]*)"', data, re.I|re.S|re.DOTALL)
if vars:
urls = re.findall('url([0-9]+)=([^&]*)&', vars[0], re.I|re.S|re.DOTALL)
if urls:
maxres = 0
maxurl = ''
for (res, url) in urls:
if (int(res) > maxres):
maxres = int(res)
maxurl = url
return maxurl
def xvidstage(self, url):
data = self.net.http_GET(url).content
info = {}
for i in re.finditer('<input.*?name="(.*?)".*?value="(.*?)">', data):
info[i.group(1)] = i.group(2)
data = self.net.http_POST(url, info).content
get_packedjava = re.findall("(\(p,a,c,k,e,d.*?)</script>", data, re.S|re.DOTALL)
if get_packedjava:
sJavascript = get_packedjava[1]
sUnpacked = cJsUnpacker().unpackByString(sJavascript)
if sUnpacked:
if re.match('.*?type="video/divx', sUnpacked):
stream_url = re.findall('type="video/divx"src="(.*?)"', sUnpacked)
if stream_url: return stream_url[0]
elif re.match(".*?file", sUnpacked):
stream_url = re.findall("file','(.*?)'", sUnpacked)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def vidstream(self, url):
data = self.net.http_GET(url).content
if re.match('.*?maintenance mode', data, re.S): return 'Error: Server wegen Wartungsarbeiten ausser Betrieb'
info = {}
for i in re.finditer('<input[^>]*name="([^"]*)"[^>]*value="([^"]*)">', data):
info[i.group(1)] = i.group(2)
if len(info) == 0: return 'Error: konnte Logindaten nicht extrahieren'
print 'URL: '+ url, info
data = self.net.http_POST(url, info).content
if re.match('.*?not found', data, re.S|re.I): return 'Error: Datei nicht gefunden'
stream_url = re.findall('file: "([^"]*)"', data)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def streamcloud(self, url):
data = self.net.http_GET(url).content
info = {}
print url
if re.match('.*?No such file with this filename', data, re.S|re.I): return 'Error: Dateiname nicht bekannt'
for i in re.finditer('<input[^>]*name="([^"]*)"[^>]*value="([^"]*)">', data):
info[i.group(1)] = i.group(2).replace('download1', 'download2')
if len(info) == 0: return 'Error: konnte Logindaten nicht extrahieren'
#wait required
#print "POSTDATA: " + str(info)
#self.waitmsg(10, "Streamcloud")
data = self.net.http_POST(url, info).content
if re.match('.*?This video is encoding now', data, re.S): return 'Error: Das Video wird aktuell konvertiert'
if re.match('.*?The file you were looking for could not be found', data, re.S): return 'Error: Die Datei existiert nicht'
stream_url = re.findall('file: "(.*?)"', data)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def videoslaher(self, url):
url = url.replace('file','embed')
info = {'foo': "vs", 'confirm': "Close Ad and Watch as Free User"}
data = self.net.http_POST(url, info).content
code = re.findall("code: '(.*?)'", data, re.S)
hash = re.findall("hash: '(.*?)'", data, re.S)
xml_link = re.findall("playlist: '(/playlist/.*?)'", data, re.S)
if code and hash and xml_link:
data = self.net.http_GET("http://www.videoslasher.com"+xml_link[0]).content
stream_url = re.findall('<media:content url="(.*?)"', data)
if stream_url:
info = {'user': "0", 'hash': hash[0], 'code': code[0]}
data = self.net.http_POST("http://www.videoslasher.com/service/player/on-start", info).content
if 'ok' in data: return stream_url[1]
else: return 'Error: konnte stream nicht bestaetigen'
else: return 'Error: Stream-URL nicht gefunden'
else: return 'Error: konnte Logindaten nicht extrahieren'
def streamPutlockerSockshare(self, url, provider):
data = self.getUrl(url.replace('/file/','/embed/'))
if re.match('.*?File Does not Exist', data, re.S): return 'Error: Die Datei existiert nicht'
elif re.match('.*?Encoding to enable streaming is in progresss', data, re.S): return "Error: Die Datei wird aktuell konvertiert"
else:
enter = re.findall('<input type="hidden" value="(.*?)" name="fuck_you">', data)
values = {'fuck_you': enter[0], 'confirm': 'Close+Ad+and+Watch+as+Free+User'}
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = { 'User-Agent' : user_agent}
cookiejar = cookielib.LWPCookieJar()
cookiejar = urllib2.HTTPCookieProcessor(cookiejar)
opener = urllib2.build_opener(cookiejar)
urllib2.install_opener(opener)
data = urlencode(values)
req = urllib2.Request(url, data, headers)
try: response = urllib2.urlopen(req)
except: return 'Error: Error @ urllib2.Request()'
else:
link = response.read()
if link:
embed = re.findall("get_file.php.stream=(.*?)'\,", link, re.S)
if embed:
req = urllib2.Request('http://www.%s.com/get_file.php?stream=%s' %(provider, embed[0]))
req.add_header('User-Agent', user_agent)
try: response = urllib2.urlopen(req)
except: return 'Error: Error @ urllib2.Request()'
else:
link = response.read()
if link:
stream_url = re.findall('<media:content url="(.*?)"', link, re.S)
filename = stream_url[1].replace('&','&')
if filename: return filename
else: return 'Error: Konnte Datei nicht extrahieren'
def flashx(self, url):
print 'flashx: ' + url
resp = self.net.http_GET(url)
data = resp.content
for frm in re.findall('<form[^>]*method="POST"[^>]*>(.*?)</form>', data, re.S|re.I):
info = {}
for i in re.finditer('<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"', frm): info[i.group(1)] = i.group(2)
if len(info) == 0: return 'Error: konnte Logindaten nicht extrahieren'
info['referer'] = ""
self.waitmsg(int(5), "flashx")
data = self.net.http_POST(resp.get_url(), info).content
get_packedjava = re.findall("(\(p,a,c,k,e,d.*?)</script>", data, re.S|re.DOTALL)
if get_packedjava:
sJavascript = get_packedjava[0]
sUnpacked = cJsUnpacker().unpackByString(sJavascript)
if sUnpacked:
stream_url = re.findall('file:"([^"]*(?:mkv|mp4|avi|mov|flv|mpg|mpeg))"', sUnpacked)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def generic1(self, url, hostername, waitseconds, filerexid):
print hostername + ': ' + url
filerex = [ 'file:[ ]*[\'\"]([^\'\"]+(?:mkv|mp4|avi|mov|flv|mpg|mpeg))[\"\']',
'data-url=[\'\"]([^\'\"]+)[\"\']',
'<a[^>]*href="([^"]+)">[^<]*<input[^>]*value="Click for your file">' ]
resp = self.net.http_GET(url)
data = resp.content
for frm in re.findall('<form[^>]*method="POST"[^>]*>(.*?)</form>', data, re.S|re.I):
info = {}
for i in re.finditer('<input[^>]*name="([^"]*)"[^>]*value="([^"]*)"', frm): info[i.group(1)] = i.group(2)
if len(info) == 0: return 'Error: konnte Logindaten nicht extrahieren'
info['referer'] = resp.get_url()
self.waitmsg(int(waitseconds), hostername)
data = self.net.http_POST(resp.get_url(), info).content
if re.match('.*Video is processing now', data, re.S|re.I): return "Error: Die Datei wird aktuell konvertiert"
print "search for: " + filerex[filerexid]
stream_url = re.findall(filerex[filerexid], data, re.S|re.I)
if stream_url: return stream_url[0]
else: return 'Error: Konnte Datei nicht extrahieren'
def generic2(self, url):
url = re.sub('[ ]+', '', url)
data = self.net.http_GET(url).content
if re.match('.*?The file is being converted', data, re.S|re.I): return "Error: Das Video wird aktuell konvertiert"
dom = re.findall('flashvars.domain="(.*?)"', data)
file = re.findall('flashvars.file="(.*?)"', data)
key = re.findall('flashvars.filekey="(.*?)"', data)
if file and not key:
varname = re.findall('flashvars.filekey=(.*?);', data)
if varname: key = re.findall('var[ ]+%s="(.*?)"'%(varname[0]), data)
if dom and file and key:
url = "%s/api/player.api.php?file=%s&key=%s"%(dom[0], file[0], key[0])
if re.match('.*?The video has failed to convert', data, re.S|re.I): return "Error: Das Video wurde nicht fehlerfrei konvertiert"
data = self.net.http_GET(url).content
rapi = re.search('url=([^&]+)&title=', data)
if rapi:
stream_url = rapi.group(1)
if stream_url: return stream_url
else: return 'Error: Konnte Datei nicht extrahieren'
else: return "Error: Video wurde nicht gefunden"
| Stevie-Bs/Stevie-Bs-Kodi | plugin.video.szenestreams/stream.py | Python | gpl-2.0 | 19,849 |
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.http import HttpResponse
from funfactory.monkeypatches import patch
patch()
from base import views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
handler404 = views.error404
handler500 = views.error500
urlpatterns = patterns('',
(r'', include('django_browserid.urls')),
(r'', include('wprevents.events.urls')),
(r'', include('wprevents.base.urls')),
# API
url(r'^api/', include('wprevents.api.urls')),
(r'^admin/', include('wprevents.admin.urls')),
# Generate a robots.txt
(r'^robots\.txt$', lambda r: HttpResponse(
"User-agent: *\n%s: /" % 'Allow' if settings.ENGAGE_ROBOTS else 'Disallow' ,
mimetype="text/plain")
),
url(r'^404$', handler404, name='404'),
url(r'^500$', handler500, name='500'),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
## In DEBUG mode, serve media files through Django.
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | yvan-sraka/wprevents | wprevents/urls.py | Python | bsd-3-clause | 1,439 |
# -*- coding: utf-8 -*-
#
# cookiecutter documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 11 11:31:49 2013.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# For building docs in foreign environments where we don't have all our
# dependencies (like readthedocs), mock out imports that cause sphinx to fail.
# see: https://docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules # noqa
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['yaml']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# Add parent dir to path
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import cookiecutter # noqa 402
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.imgmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'docs.ccext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cookiecutter'
copyright = u'2013-2016, Audrey Roy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cookiecutter.__version__
# The full version, including alpha/beta/rc tags.
release = cookiecutter.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# Suppress nonlocal image warnings
suppress_warnings = ['image.nonlocal_uri']
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cookiecutterdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'cookiecutter.tex', u'cookiecutter Documentation',
u'Audrey Roy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cookiecutter', u'cookiecutter Documentation',
[u'Audrey Roy'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cookiecutter', u'cookiecutter Documentation',
u'Audrey Roy', 'cookiecutter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'cookiecutter'
epub_author = u'Audrey Roy'
epub_publisher = u'Audrey Roy'
epub_copyright = u'2013-2016, Audrey Roy'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# If 'no', URL addresses will not be shown.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| hackebrot/cookiecutter | docs/conf.py | Python | bsd-3-clause | 11,072 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
'attrs>=16.0',
'requests==2.11.1',
'email-validator==1.0.1',
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='mgship',
version='0.1.0',
description=("Tool to ship the Mailgun event log for "
"archiving and later analytics."),
long_description=readme + '\n\n' + history,
author="Konstantinos Koukopoulos",
author_email='[email protected]',
url='https://github.com/kouk/mgship',
packages=find_packages(),
entry_points={
'console_scripts': [
'mgship=mgship.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="BSD license",
zip_safe=False,
keywords='mgship',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| kouk/mgship | setup.py | Python | bsd-3-clause | 1,627 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## Test sdbcore.StencilData
##
##===------------------------------------------------------------------------------------------===##
from collections import Counter
from unittest import TestCase, main
from sdbcore.stencildata import StencilData
from sdbtest.serializerdatamock import SerializerDataMock
class TestSerializerData(TestCase):
def test_init(self):
serializer_data = SerializerDataMock("Input", "dir", "prefix")
serializer_data.make_serializer_empty()
stencil_data = StencilData(serializer_data)
self.assertEqual(stencil_data.serializer.directory, "dir")
self.assertEqual(stencil_data.serializer.prefix, "prefix")
def test_update_stencil_and_field_list(self):
serializer_data = SerializerDataMock("Input", "dir", "prefix")
serializer_data.add_stencil("s1", 2, ["u", "v", "w"])
serializer_data.add_stencil("s2", 2, ["u", "v", "w", "tt"])
serializer_data.add_stencil("s3", 2, [])
stencil_data = StencilData(serializer_data)
# Check s1, ..., s3 are in stencil list
self.assertTrue(Counter(stencil_data.stencil_list) == Counter(["s1", "s2", "s3"]))
# Check fields of s1
stencil_data.set_selected_stencil(stencil_data.stencil_list.index("s1"))
self.assertTrue(Counter(stencil_data.field_list) == Counter(["u", "v", "w"]))
# Check fields of s2
stencil_data.set_selected_stencil(stencil_data.stencil_list.index("s2"))
self.assertTrue(Counter(stencil_data.field_list) == Counter(["u", "v", "w", "tt"]))
# Check fields of s2
stencil_data.set_selected_stencil(stencil_data.stencil_list.index("s3"))
self.assertFalse(stencil_data.field_list)
if __name__ == "__main__":
main()
| thfabian/serialbox2 | test/serialbox-python/sdb/sdbcore/test_stencildata.py | Python | bsd-2-clause | 2,179 |
#!/usr/bin/python
import socket
import sys
verbose = False
if len(sys.argv) > 2 and sys.argv[2] == "-v":
verbose = True
def read():
return f.readline().strip()
def main():
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect("/tmp/mysocket.socket")
f = sock.makefile()
f.write("HELO\t1\n")
f.flush()
host = "vcs-edge-2.c.booking.com"
host = "c.booking.com"
myip = "1.0.64.10"
myip = "37.10.26.3"
myip = "8.8.8.8"
query = "Q\t%s\tIN\tANY\t-1\t%s\n" % (host, myip)
for i in range(1, int(sys.argv[1])):
f.write(query)
f.flush()
line = read()
while line:
if verbose:
print line
if line.startswith("END"):
break
line = read()
| wimtie/pdnsbe | testing/iterfromcli.py | Python | gpl-2.0 | 802 |
#
# PEDA - Python Exploit Development Assistance for GDB
#
# Copyright (C) 2012 Long Le Dinh <longld at vnsecurity.net>
#
# License: see LICENSE file for details
#
# change below settings to match your needs
## BEGIN OF SETTINGS ##
# external binaries, required for some commands
READELF = "/usr/bin/readelf"
OBJDUMP = "/usr/bin/objdump"
NASM = "/usr/bin/nasm"
NDISASM = "/usr/bin/ndisasm"
# PEDA global options
OPTIONS = {
"badchars" : ("", "bad characters to be filtered in payload/output, e.g: '\\x0a\\x00'"),
"pattern" : (1, "pattern type, 0 = basic, 1 = extended, 2 = maximum"),
"p_charset" : ("", "custom charset for pattern_create"),
"indent" : (4, "number of ident spaces for output python payload, e.g: 0|4|8"),
"ansicolor" : ("on", "enable/disable colorized output, e.g: on|off"),
"pagesize" : (25, "number of lines to display per page, 0 = disable paging"),
"session" : ("peda-session-#FILENAME#.txt", "target file to save peda session"),
"tracedepth": (0, "max depth for calls/instructions tracing, 0 means no limit"),
"tracelog" : ("peda-trace-#FILENAME#.txt", "target file to save tracecall output"),
"crashlog" : ("peda-crashdump-#FILENAME#.txt", "target file to save crash dump of fuzzing"),
"snapshot" : ("peda-snapshot-#FILENAME#.raw", "target file to save crash dump of fuzzing"),
"autosave" : ("on", "auto saving peda session, e.g: on|off"),
"payload" : ("peda-payload-#FILENAME#.txt", "target file to save output of payload command"),
"context" : ("register,code,stack", "context display setting, e.g: register, code, stack, all"),
"verbose" : ("off", "show detail execution of commands, e.g: on|off"),
"debug" : ("off", "show detail error of peda commands, e.g: on|off"),
"_teefd" : ("", "internal use only for tracelog/crashlog writing")
}
## END OF SETTINGS ##
class Option(object):
"""
Class to access global options of PEDA commands and functions
TODO: save/load option to/from file
"""
options = OPTIONS.copy()
def __init__(self):
"""option format: name = (value, 'help message')"""
pass
@staticmethod
def reset():
"""reset to default options"""
Option.options = OPTIONS.copy()
return True
@staticmethod
def show(name=""):
"""display options"""
result = {}
for opt in Option.options:
if name in opt and not opt.startswith("_"):
result[opt] = Option.options[opt][0]
return result
@staticmethod
def get(name):
"""get option"""
if name in Option.options:
return Option.options[name][0]
else:
return None
@staticmethod
def set(name, value):
"""set option"""
if name in Option.options:
Option.options[name] = (value, Option.options[name][1])
return True
else:
return False
@staticmethod
def help(name=""):
"""display help info of options"""
result = {}
for opt in Option.options:
if name in opt and not opt.startswith("_"):
result[opt] = Option.options[opt][1]
return result
| m42e/gdbinvim | peda/lib/config.py | Python | gpl-3.0 | 3,279 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import SlicedIndex, TableIndices, TableLoc, TableILoc, TableLocIndices
import sys
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import warnings
from copy import deepcopy
import types
import itertools
import weakref
import numpy as np
from numpy import ma
from astropy import log
from astropy.units import Quantity, QuantityInfo
from astropy.utils import isiterable, ShapedLikeNDArray
from astropy.utils.console import color_print
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaData, MetaAttribute
from astropy.utils.data_info import BaseColumnInfo, MixinInfo, DataInfo
from astropy.utils.decorators import format_doc
from astropy.io.registry import UnifiedReadWriteMethod
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy, _convert_sequence_data_to_array)
from .row import Row
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from .connect import TableRead, TableWrite
from .ndarray_mixin import NdarrayMixin
from .mixins.registry import get_mixin_handler
from . import conf
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
__doctest_requires__ = {'*pandas': ['pandas>=1.1']}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, 'info', None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError("Cannot replace column '{}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, '_instance_ref'):
out = f'<{self.__class__.__name__} name={self.name} value={self()}>'
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist"""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and '__attributes__' not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f'{name} not in {self.name}')
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list"""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'):
data[col.info.name].mask = col.mask
return data
def __init__(self, data=None, masked=False, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
units=None, descriptions=None,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray)
and data.shape == (0,)
and not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (names_from_list_of_dict
or _get_names_from_list_of_dict(data))
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f'Data type {type(data)} not allowed to init Table')
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute('unit', units)
self._set_column_attribute('description', descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(f'sequence of {attr} values must match number of columns')
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(f'invalid column name {name} for setting {attr} attribute')
# Special case: ignore unit if it is an empty or blank string
if attr == 'unit' and isinstance(value, str):
if value.strip() == '':
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table([getattr(col, 'mask', FalseArray(col.shape))
for col in self.itercols()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [col.filled(fill_value) if hasattr(col, 'filled') else col
for col in self.itercols()]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{}", of '
'type "{}"'.format(col.info.name, type(col)))
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError(f'{inp_str} must be a list or None')
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns')
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (original_data.__class__.__module__ + '.'
+ original_data.__class__.__name__)
raise TypeError('Mixin handler for object of type '
f'{fully_qualified_name} '
'did not return a valid mixin column')
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (not isinstance(data, Column) and not data_is_mixin
and isinstance(data, np.ndarray) and len(data.dtype) > 1):
data = data.view(NdarrayMixin)
data_is_mixin = True
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif 'info' in getattr(data, '__dict__', ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
col = col_copy(data, copy_indices=self._init_indices) if copy else data
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, 'dtype'):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = masked_col_cls if isinstance(data, np.ma.MaskedArray) else self.ColumnClass
else:
col_cls = self.ColumnClass
try:
col = col_cls(name=name, data=data, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError('unable to convert data to Column for Table')
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) > 1:
raise ValueError(f'Inconsistent data column lengths: {lengths}')
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError('Cannot have None for column name')
if len(set(names)) != len(names):
raise ValueError('Duplicate column names')
table.columns = table.TableColumns((name, col) for name, col in zip(names, cols))
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append(f'length={len(self)}')
descr = ' '.join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f'<i>{xml_escape(descr)}</i>\n'
else:
descr = f'<{descr}>\n'
if tableid is None:
tableid = f'table{id(self)}'
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f'<div>{out}</div>'
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
for col in self.itercols():
if hasattr(col, 'mask') and np.any(col.mask):
return True
else:
return False
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append(f'Length = {len(self)} rows')
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(max_lines, max_width, show_name,
show_unit, show_dtype, align)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = f'table{id(self)}-{np.random.randint(1, 1e6)}'
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.info.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin('file:', pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append(f'Length = {len(self)} rows')
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(max_lines, max_width, show_name,
show_unit, show_dtype, html, tableid,
align, tableclass)
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0)
or (isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f'Illegal type {type(item)} for table item access')
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or (isinstance(item, tuple) # output from np.where
and all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f'Illegal type {type(item)} for table item access')
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray))
and all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray))
and np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names
and all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True,
default_name=None):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f'col{len(self.columns)}'
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(col, name=name, copy=copy,
default_name=default_name)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError('Empty table cannot have column set to scalar value')
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, 'shape', ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape,
subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape,
subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError('Inconsistent data column lengths')
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + '_' + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
default_names = [f'col{ii + len(self.columns)}'
for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes)):
self.add_column(cols[ii], index=indexes[ii], name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate, copy=copy)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn(f"replaced column '{name}'",
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f'column name {name} is not in the table')
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError('length of new column must match table length')
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f'{name} is not a valid column name')
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f'columns {invalid_names} do not exist')
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
for name in self._set_of_names_in_colnames(names):
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, 'utf-8'))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in col.info.attr_names - col.info._attrs_no_copy - set(['dtype']):
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype('S', 'U', np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype('U', 'S', np.char.encode)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
self.columns.pop(colname)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
'''
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
'''
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError("input 'new_names' must be a tuple or a list of column names")
if len(names) != len(new_names):
raise ValueError("input 'names' and 'new_names' list arguments must be the same length")
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {} is out of bounds for table with length {}"
.format(index, N))
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn):
col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {} after inserting {}'
' (expected {}, got {})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, 'mask'):
newcol[index] = np.ma.masked
else:
raise TypeError("mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name))
columns[name] = newcol
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{}':\n{}"
.format(name, err)) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts='silent')
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs['order'] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs['kind'] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode('freeze'):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
'''
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
'''
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError('cannot compare tables with different column names')
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
eq = self[name] == other[name]
if (warns and issubclass(warns[-1].category, FutureWarning)
and 'elementwise comparison failed' in str(warns[-1].message)):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f'unable to compare column {name}') from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (isinstance(eq, np.ndarray)
and eq.dtype is np.dtype('bool')
and len(eq) == len(self)):
raise TypeError(f'comparison for column {name} returned {eq} '
f'instead of the expected boolean ndarray')
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError('index must be None, False, True or a table '
'column name')
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from . import serialize
from astropy.time import TimeBase, TimeDelta
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype('timedelta64[ns]')
nat = np.timedelta64('NaT')
else:
new_col = col.datetime64.copy()
nat = np.datetime64('NaT')
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)')
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, 'isnative', True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder('=')
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace('i', 'I').replace('u', 'U')
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to {out[name].dtype}",
TableReplaceWarning, stacklevel=3)
elif column.dtype.kind not in ['f', 'c']:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs['index'] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or 'index'
while index_name in names:
index_name = '_' + index_name + '_'
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f'`units` contains additional columns: {not_found}')
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ['u', 'i'] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit, copy=False)
continue
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == 'M':
from astropy.time import Time
out[name] = Time(data, format='datetime64')
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = 'isot'
# Numpy timedelta64
elif data.dtype.kind == 'm':
from astropy.time import TimeDelta
data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format='sec')
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, 'unit', None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
| pllim/astropy | astropy/table/table.py | Python | bsd-3-clause | 150,650 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.