code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
This directory contains estimations of the noise power spectral density of sets of natural earthquakes.
You can use them to generate synthetic earthquakes contaminated with realistic-like seismic noise.
E.g. Given two seismic signals, 'meq.bin' and 'meq2.txt', sample rate 50 Hz, adds
background noise of 2.0 dB. Noise is modeled by a FIR filter whose
coefficients are stored in the file 'IAGPDS-bfirls.txt'.
Results will be saved to 'eq00.out' and 'eq01.out'.
python apasvo-generator.py meq.bin meq2.txt -f 50 -np 2 -fir IAGPDS-bfirls.txt
For the estimation method of the background seismic noise see:
> Peterson, J. (1993). Observations and modeling of seismic background noise.
| APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/bfirls/README.bfirls.txt | README.bfirls.txt |
from PySide import QtCore
import traceback
from apasvo._version import _application_name
from apasvo._version import _organization
from apasvo.gui.models import eventcommands as commands
import sys
class PickingTask(QtCore.QObject):
"""A class to handle an event picking/detection task.
PickingTask objects are meant to be passed to a QThread instance
that controls their execution.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
def __init__(self, document, alg, threshold=None):
super(PickingTask, self).__init__()
self.document = document
self.alg = alg
self.threshold = threshold
def run(self):
settings = QtCore.QSettings(_organization, _application_name)
takanami = int(settings.value('takanami_settings/takanami', False))
takanami_margin = float(settings.value('takanami_margin', 5.0))
try:
self.document.detectEvents(self.alg, threshold=self.threshold,
takanami=takanami,
takanami_margin=takanami_margin)
except Exception, e:
self.error.emit(str(e), traceback.format_exc())
finally:
self.finished.emit()
def abort(self):
pass
class PickingStreamTask(QtCore.QObject):
"""A class to handle an event picking/detection task.
PickingTask objects are meant to be passed to a QThread instance
that controls their execution.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
def __init__(self, trace_selector_widget, alg, trace_list=None, threshold=None):
super(PickingStreamTask, self).__init__()
self.trace_selector = trace_selector_widget
self.alg = alg
self.trace_list = self.trace_selector.stream.traces if trace_list is None else trace_list
self.threshold = threshold
def run(self):
try:
settings = QtCore.QSettings(_organization, _application_name)
takanami = int(settings.value('takanami_settings/takanami', False))
takanami_margin = float(settings.value('takanami_margin', 5.0))
detect_command = commands.DetectStreamEvents(self.trace_selector,
self.alg,
self.trace_list,
threshold=self.threshold,
takanami=takanami,
takanami_margin=takanami_margin)
self.trace_selector.main_window.command_stack.push(detect_command)
except Exception, e:
self.error.emit(str(e), traceback.format_exc())
finally:
self.finished.emit()
def abort(self):
pass | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/models/pickingtask.py | pickingtask.py |
from PySide import QtCore
from PySide import QtGui
import obspy as op
import eventcommands as commands
from apasvo.gui.views.settingsdialog import COLOR_KEYS
from apasvo.gui.views.settingsdialog import DEFAULT_COLOR_KEY
from apasvo.gui.views.settingsdialog import DEFAULT_COLOR_SCHEME
from apasvo.picking import apasvotrace as rc
from apasvo._version import _application_name
from apasvo._version import _organization
class EventListModel(QtCore.QAbstractTableModel):
"""A Table Model class to handle a list of seismic events.
"""
emptyList = QtCore.Signal(bool)
eventCreated = QtCore.Signal(rc.ApasvoEvent)
eventDeleted = QtCore.Signal(rc.ApasvoEvent)
eventModified = QtCore.Signal(rc.ApasvoEvent)
detectionPerformed = QtCore.Signal()
DEFAULT_ATTRIBUTES = [
{'name': 'Label', 'type': 'event', 'attribute_name': 'name', 'editable': True},
{'name': 'Time', 'type': 'event', 'attribute_name': 'time', 'editable': False,
'attribute_type': 'date'},
{'name': 'Sample', 'type': 'event', 'attribute_name': 'stime', 'editable': False},
{'name': 'CF Value', 'type': 'event', 'attribute_name': 'cf_value', 'editable': False,
'format': "{:.6g}"},
{'name': 'Mode', 'type': 'event', 'attribute_name': 'evaluation_mode', 'editable': True,
'attribute_type': 'enum', 'value_list': op.core.event_header.EvaluationMode.keys()},
{'name': 'Phase hint', 'type': 'event', 'attribute_name': 'phase_hint', 'editable': True,
'attribute_type': 'enum', 'value_list': rc.PHASE_VALUES},
{'name': 'Method', 'type': 'event', 'attribute_name': 'method', 'editable': False,
'attribute_type': 'enum', 'value_list': rc.ALLOWED_METHODS},
{'name': 'Polarity', 'type': 'event', 'attribute_name': 'polarity', 'editable': True,
'attribute_type': 'enum', 'value_list': op.core.event_header.PickPolarity.keys()},
{'name': 'Status', 'type': 'event', 'attribute_name': 'evaluation_status', 'editable': True,
'attribute_type': 'enum', 'value_list': op.core.event_header.EvaluationStatus.keys()},
{'name': 'Comments', 'type': 'event', 'attribute_name': 'comments', 'editable': True},
]
def __init__(self, record, command_stack, attributes=None):
QtCore.QAbstractTableModel.__init__(self)
self.record = record
self.attributes = attributes if attributes is not None else self.DEFAULT_ATTRIBUTES
self.command_stack = command_stack
self.color_key = None
self.color_map = {}
self.loadColorMap()
@property
def empty(self):
return (len(self.record.events) != 0)
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self.record.events)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self.attributes)
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
elif role == QtCore.Qt.DisplayRole:
attribute = self.attributes[index.column()]
data = None
if attribute['type'] == 'event':
data = self.record.events[index.row()].__getattribute__(attribute['attribute_name'])
if attribute.get('attribute_type') == 'date':
dateformat = attribute.get('dateformat')
if dateformat is not None:
data = data.strftime(dateformat)
rep_format = attribute.get('format', '{}')
return rep_format.format(data)
elif role == QtCore.Qt.BackgroundRole:
return self.calculateEventColor(index)
else:
return None
def calculateEventColor(self, index):
if index.isValid():
if self.color_key is not None:
value = self.record.events[index.row()].__getattribute__(self.color_key)
return self.color_map.get(value)
return None
def loadColorMap(self):
settings = QtCore.QSettings(_organization, _application_name)
self.color_map = {}
# load color settings
if "color_settings" in settings.childGroups():
settings.beginGroup("color_settings")
for key in settings.childKeys():
if key == 'color_key':
self.color_key = COLOR_KEYS[int(settings.value(key))]
else:
self.color_map[key] = QtGui.QColor(settings.value(key))
settings.endGroup()
# load default color scheme otherwise
else:
self.color_key = DEFAULT_COLOR_KEY
for key, color in DEFAULT_COLOR_SCHEME:
self.color_map[key] = QtGui.QColor(color)
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.attributes[section].get('name')
return None
def sort(self, column, order=QtCore.Qt.AscendingOrder):
self.command_stack.push(commands.SortEventList(self,
self.attributes[column]['attribute_name'],
order))
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role == QtCore.Qt.EditRole:
key = self.attributes[index.column()]['attribute_name']
event = self.record.events[index.row()]
if event.__getattribute__(key) != value:
self.command_stack.push(commands.EditEvent(self, event,
**{key: value}))
return True
return False
def editEvent(self, event, **kwargs):
self.command_stack.push(commands.EditEvent(self, event, **kwargs))
def flags(self, index):
attribute = self.attributes[index.column()]
if not attribute.get('editable'):
return QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
return (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsEnabled)
def removeRows(self, row_list, parent=QtCore.QModelIndex()):
self.command_stack.push(commands.DeleteEvents(self, row_list))
self.emptyList.emit(self.empty)
def createEvent(self, time, name='', comments='', method=rc.method_other,
evaluation_mode=rc.mode_manual, evaluation_status=rc.status_preliminary):
event = rc.ApasvoEvent(self.record, time, name=name, comments=comments,
method=method, evaluation_mode=evaluation_mode, evaluation_status=evaluation_status)
self.addEvent(event)
self.emptyList.emit(self.empty)
return event
def addEvent(self, event):
self.command_stack.push(commands.AppendEvent(self, event))
self.emptyList.emit(self.empty)
def detectEvents(self, alg, **kwargs):
self.command_stack.push(commands.DetectEvents(self, alg, **kwargs))
self.emptyList.emit(self.empty)
def clearEvents(self):
if len(self.record.events) > 0:
self.command_stack.push(commands.ClearEventList(self))
self.emptyList.emit(self.empty)
def updateList(self):
self.modelAboutToBeReset.emit()
self.emptyList.emit(self.empty)
self.modelReset.emit()
def indexOf(self, event):
if event in self.record.events:
return self.record.events.index(event)
return None
def getEventByRow(self, row):
return self.record.events[row] | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/models/eventlistmodel.py | eventlistmodel.py |
from PySide import QtCore
class FilterListModel(QtCore.QAbstractTableModel):
"""A Table Model class to handle a list of length values.
"""
sizeChanged = QtCore.Signal(int)
def __init__(self, listobj, header=None):
QtCore.QAbstractTableModel.__init__(self)
self._list = listobj
if header is None:
header = ['Length (in seconds)']
self._header = header
def rowCount(self, parent=QtCore.QModelIndex()):
return len(self._list)
def columnCount(self, parent=QtCore.QModelIndex()):
return len(self._header)
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
elif role != QtCore.Qt.DisplayRole:
return None
return "%s" % self._list[index.row()]
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self._header[section]
return None
def sort(self, column, order=QtCore.Qt.AscendingOrder):
self.layoutAboutToBeChanged.emit()
self._list.sort(reverse=(order == QtCore.Qt.DescendingOrder))
self.layoutChanged.emit()
def setData(self, index, value, role=QtCore.Qt.EditRole):
if role == QtCore.Qt.EditRole:
self._list[index.row()] = value
self.dataChanged.emit(index, index)
return True
return False
def flags(self, index):
return (QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsEnabled)
def removeRows(self, row, count, parent=QtCore.QModelIndex()):
if row < 0 or row > len(self._list):
return False
self.beginRemoveRows(parent, row, row + count - 1)
while count != 0:
del self._list[row]
count -= 1
self.sizeChanged.emit(len(self._list))
self.endRemoveRows()
return True
def addFilter(self, value=10.0):
self.beginInsertRows(QtCore.QModelIndex(), len(self._list),
len(self._list))
self._list.append(value)
self.sizeChanged.emit(len(self._list))
self.endInsertRows()
def clearFilters(self):
if self.rowCount() > 0:
self.removeRows(0, self.rowCount())
def list(self):
return self._list | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/models/filterlistmodel.py | filterlistmodel.py |
from PySide import QtCore
from PySide import QtGui
from apasvo.gui.models.eventlistmodel import EventListModel
class AppendEvent(QtGui.QUndoCommand):
def __init__(self, model, event):
super(AppendEvent, self).__init__('Create event')
self.model = model
self.event = event
def undo(self):
self.model.beginRemoveRows(QtCore.QModelIndex(), len(self.model.record.events) - 1,
len(self.model.record.events) - 1)
self.model.record.events.pop()
self.model.eventDeleted.emit(self.event)
self.model.endRemoveRows()
def redo(self):
self.model.beginInsertRows(QtCore.QModelIndex(), len(self.model.record.events),
len(self.model.record.events))
self.model.record.events.append(self.event)
self.model.eventCreated.emit(self.event)
self.model.endInsertRows()
def id(self):
return 1
class DeleteEvents(QtGui.QUndoCommand):
def __init__(self, model, row_list):
super(DeleteEvents, self).__init__('Delete events')
self.model = model
self.row_list = sorted(row_list)
self.events = [self.model.record.events[i] for i in self.row_list]
def undo(self):
for row, event in zip(self.row_list, self.events):
self.model.beginInsertRows(QtCore.QModelIndex(), row, row)
self.model.record.events.insert(row, event)
self.model.eventCreated.emit(event)
self.model.endInsertRows()
def redo(self):
for row, event in zip(sorted(self.row_list, reverse=True), sorted(self.events, reverse=True)):
self.model.beginRemoveRows(QtCore.QModelIndex(), row, row)
self.model.record.events.remove(event)
self.model.eventDeleted.emit(event)
self.model.endRemoveRows()
def id(self):
return 2
class EditEvent(QtGui.QUndoCommand):
def __init__(self, model, event, **kwargs):
super(EditEvent, self).__init__('Edit event')
self.model = model
self.event = event
self.params = kwargs
self.old_params = {}
for key in self.params.keys():
self.old_params[key] = self.event.__getattribute__(key)
def undo(self):
self.model.layoutAboutToBeChanged.emit()
for key, value in self.old_params.items():
self.event.__setattr__(key, value)
self.model.eventModified.emit(self.event)
self.model.layoutChanged.emit()
def redo(self):
self.model.layoutAboutToBeChanged.emit()
for key, value in self.params.items():
self.event.__setattr__(key, value)
self.model.eventModified.emit(self.event)
self.model.layoutChanged.emit()
def id(self):
return 3
class ClearEventList(QtGui.QUndoCommand):
def __init__(self, model):
super(ClearEventList, self).__init__('Clear event list')
self.model = model
self.events = list(self.model.record.events)
def undo(self):
self.model.beginInsertRows(QtCore.QModelIndex(), 0,
len(self.events) - 1)
self.model.record.events = list(self.events)
for event in self.model.record.events:
self.model.eventCreated.emit(event)
self.model.endInsertRows()
def redo(self):
self.model.beginRemoveRows(QtCore.QModelIndex(), 0,
len(self.events) - 1)
self.model.record.events = []
for event in self.events:
self.model.eventDeleted.emit(event)
self.model.endRemoveRows()
def id(self):
return 4
class SortEventList(QtGui.QUndoCommand):
def __init__(self, model, key, order):
super(SortEventList, self).__init__('Sort event list')
self.model = model
self.key = key
self.order = order
self.old_events = list(self.model.record.events)
def undo(self):
self.model.layoutAboutToBeChanged.emit()
self.model.record.events = list(self.old_events)
self.model.layoutChanged.emit()
def redo(self):
self.model.layoutAboutToBeChanged.emit()
self.model.record.sort_events(key=self.key,
reverse=(self.order == QtCore.Qt.DescendingOrder))
self.model.layoutChanged.emit()
def id(self):
return 5
class DetectEvents(QtGui.QUndoCommand):
def __init__(self, model, alg, **kwargs):
super(DetectEvents, self).__init__('Apply %s' % alg.__class__.__name__.
upper())
self.model = model
self.n_events_before = len(self.model.record.events)
self.events_old = self.model.record.events[:]
self.events = list(self.model.record.detect(alg, **kwargs))
self.n_events_after = len(self.events)
self.model.detectionPerformed.emit()
def undo(self):
self.model.beginRemoveRows(QtCore.QModelIndex(), self.n_events_before,
self.n_events_after - 1)
self.model.record.events = self.events_old[:]
for i in range(self.n_events_before, self.n_events_after):
self.model.eventDeleted.emit(self.events[i])
self.model.endRemoveRows()
def redo(self):
self.model.beginInsertRows(QtCore.QModelIndex(), self.n_events_before,
self.n_events_after - 1)
self.model.record.events = self.events[:]
for i in range(self.n_events_before, self.n_events_after):
self.model.eventCreated.emit(self.events[i])
self.model.endInsertRows()
def id(self):
return 6
class DetectStreamEvents(QtGui.QUndoCommand):
def __init__(self, trace_selector_widget, alg, trace_list=None, **kwargs):
super(DetectStreamEvents, self).__init__('Apply %s' % alg.__class__.__name__.
upper())
self.trace_selector = trace_selector_widget
self.events_old = {trace.uuid: trace.events[:] for trace in self.trace_selector.stream.traces}
self.trace_selector.stream.detect(alg, trace_list, **kwargs)
self.events = {trace.uuid: trace.events[:] for trace in self.trace_selector.stream.traces}
self.new_events = {trace.uuid: [event for event in self.events[trace.uuid] if event not in self.events_old[trace.uuid]] \
for trace in self.trace_selector.stream.traces}
self.trace_selector.detection_performed.emit()
def undo(self):
for trace in self.trace_selector.stream.traces:
if trace.uuid in self.events_old:
trace.events = self.events_old[trace.uuid][:]
# Update current model data
self.trace_selector.events_deleted.emit(self.new_events)
def redo(self):
for trace in self.trace_selector.stream.traces:
if trace.uuid in self.events_old:
trace.events = self.events[trace.uuid][:]
# Update current model data
self.trace_selector.events_created.emit(self.new_events)
def id(self):
return 7
class OpenStream(QtGui.QUndoCommand):
def __init__(self, main_window, stream):
super(OpenStream, self).__init__('Open stream')
self.main_window = main_window
self.old_stream = self.main_window.stream[:]
self.old_document_list = self.main_window.document_list[:]
self.stream = stream[:] if self.main_window.stream is None \
else self.main_window.stream[:].extend(stream)
self.stream = self.main_window.stream[:]
self.document_list = self.main_window.document_list[:]
for trace in stream:
self.stream.append(trace)
self.document_list.append(EventListModel(trace, self.main_window.command_stack))
def undo(self):
self.main_window.stream = self.old_stream[:]
self.main_window.document_list = self.old_document_list[:]
self.main_window.trace_selector.set_stream(self.main_window.stream)
if self.main_window.stream:
if len(self.main_window.stream) < 2:
self.main_window.action_show_trace_selector.setEnabled(False)
self.main_window.action_show_trace_selector.setChecked(False)
if self.main_window.current_document_idx not in range(len(self.main_window.document_list)):
self.main_window.toogle_document(0)
else:
self.main_window.close()
def redo(self):
self.main_window.stream = self.stream[:]
self.main_window.document_list = self.document_list[:]
self.main_window.trace_selector.set_stream(self.main_window.stream)
if len(self.main_window.stream) > 1:
self.main_window.action_show_trace_selector.setEnabled(True)
self.main_window.action_show_trace_selector.setChecked(True)
if self.main_window.document is None:
self.main_window.toogle_document(0)
def id(self):
return 8
class CloseTraces(QtGui.QUndoCommand):
def __init__(self, main_window, trace_idx_list):
super(CloseTraces, self).__init__('Close trace')
self.trace_idx_set = set(trace_idx_list)
self.main_window = main_window
self.removed_traces_list = {i: self.main_window.stream[i] for i in self.trace_idx_set}
self.removed_documents_list = {i: self.main_window.document_list[i] for i in self.trace_idx_set}
def undo(self):
for i in sorted(self.trace_idx_set):
self.main_window.stream.insert(i, self.removed_traces_list[i])
self.main_window.document_list.insert(i, self.removed_documents_list[i])
self.main_window.trace_selector.set_stream(self.main_window.stream)
if len(self.main_window.stream) > 1:
self.main_window.action_show_trace_selector.setEnabled(True)
self.main_window.action_show_trace_selector.setChecked(True)
elif len(self.main_window.stream) == 1:
self.main_window.toogle_document(0)
def redo(self):
for i in sorted(self.trace_idx_set, reverse=True):
self.main_window.stream.pop(i)
self.main_window.document_list.pop(i)
self.main_window.trace_selector.set_stream(self.main_window.stream)
if self.main_window.stream:
if len(self.main_window.stream) < 2:
self.main_window.action_show_trace_selector.setEnabled(False)
self.main_window.action_show_trace_selector.setChecked(False)
if self.main_window.current_document_idx in self.trace_idx_set:
self.main_window.toogle_document(0)
else:
self.main_window.close()
def id(self):
return 9 | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/models/eventcommands.py | eventcommands.py |
from PySide import QtCore
from PySide import QtGui
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from apasvo.gui.views import navigationtoolbar
from apasvo.gui.views import processingdialog
from apasvo.utils import clt
import matplotlib.pyplot as plt
import numpy as np
import traceback
from apasvo.picking import apasvotrace as rc
from apasvo.picking import takanami
from apasvo._version import _application_name
from apasvo._version import _organization
MINIMUM_MARGIN_IN_SECS = 0.5
class TakanamiTask(QtCore.QObject):
"""A class to handle a Takanami exec. task.
Attributes:
record: An opened seismic record.
start: Start point of the signal segment where
the algorithm is going to be applied.
end: End point of the signal segment where
the algorithm is going to be applied.
Signals:
finished: Task finishes.
position_estimated: Return values of Takanami method are ready.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
position_estimated = QtCore.Signal(int, np.ndarray, int)
def __init__(self, record, start, end):
super(TakanamiTask, self).__init__()
self.record = record
self.start = start
self.end = end
self.algorithm = takanami.Takanami()
self._abort = False
def run(self):
self._abort = False
start_time_in_secs = max(0.0, self.start) / self.record.fs
end_time_in_secs = (min(len(self.record.signal), self.end) /
self.record.fs)
if self._abort: # checkpoint
return
try:
et, aic, n0_aic = self.algorithm.run(self.record.signal,
self.record.fs,
start_time_in_secs,
end_time_in_secs)
except Exception, e:
self.error.emit(str(e), traceback.format_exc())
return
if self._abort: # checkpoint
return
self.position_estimated.emit(et, aic, n0_aic)
self.finished.emit()
def abort(self):
self._abort = True
class TakanamiDialog(QtGui.QDialog):
"""A dialog to apply Takanami's AR picking method to a selected piece of a
seismic signal.
Attributes:
document: Current opened document containing a seismic record.
seismic_event: A seismic event to be refined by using Takanami method.
If no event is provided, then a new seismic event will be created
by using the estimated arrival time after clicking on 'Accept'
"""
def __init__(self, document, t_start=None, t_end=None, seismic_event=None, parent=None):
super(TakanamiDialog, self).__init__(parent)
self.document = document
self.record = self.document.record
self.load_settings()
self.seismic_event = seismic_event
self._start = t_start
self._end = t_end
if self.seismic_event is not None:
self.event_time = self.seismic_event.stime
if self._start is None:
self._start = max(0, self.event_time - self.default_margin)
if self._end is None:
self._end = min(len(self.record.signal) - 1, self.event_time + self.default_margin)
else:
if self._start is None or self._end is None:
raise ValueError("t_start and t_end values not specified")
else:
self._start = max(0, int(t_start * self.record.fs))
self._end = min(len(self.record.signal) - 1, int(t_end * self.record.fs))
self.event_time = self._start + int((self._end - self._start) / 2)
if not 0 <= self._start < self._end:
raise ValueError("Invalid t_start value")
if not self._start < self._end < len(self.record.signal):
raise ValueError("Invalid t_end value")
if (self._end - self._start) < (MINIMUM_MARGIN_IN_SECS * self.record.fs):
raise ValueError("Distance between t_start and t_end must be"
" at least of %g seconds" % MINIMUM_MARGIN_IN_SECS)
if not self._start < self.event_time < self._end:
raise ValueError("Event time must be a value between t-start and t_end")
self._init_ui()
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
self.button_box.clicked.connect(self.on_click)
self.start_point_spinbox.timeChanged.connect(self.on_start_point_changed)
self.end_point_spinbox.timeChanged.connect(self.on_end_point_changed)
def _init_ui(self):
self.setWindowTitle("Takanami's Autoregressive Method")
self.fig, _ = plt.subplots(2, 1, sharex=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setMinimumSize(self.canvas.size())
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.toolBarNavigation = navigationtoolbar.NavigationToolBar(self.canvas, self)
self.position_label = QtGui.QLabel("Estimated Arrival Time: 00 h 00 m 00.000 s")
self.group_box = QtGui.QGroupBox(self)
self.group_box.setTitle("Limits")
self.start_point_label = QtGui.QLabel("Start point:")
self.start_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.start_point_spinbox = QtGui.QTimeEdit(self.group_box)
self.start_point_spinbox.setDisplayFormat("hh 'h' mm 'm' ss.zzz 's'")
self.end_point_label = QtGui.QLabel("End point:")
self.end_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.end_point_spinbox = QtGui.QTimeEdit(self.group_box)
self.end_point_spinbox.setDisplayFormat("hh 'h' mm 'm' ss.zzz 's'")
self.group_box_layout = QtGui.QHBoxLayout(self.group_box)
self.group_box_layout.setContentsMargins(9, 9, 9, 9)
self.group_box_layout.setSpacing(12)
self.group_box_layout.addWidget(self.start_point_label)
self.group_box_layout.addWidget(self.start_point_spinbox)
self.group_box_layout.addWidget(self.end_point_label)
self.group_box_layout.addWidget(self.end_point_spinbox)
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(9, 9, 9, 9)
self.layout.setSpacing(6)
self.layout.addWidget(self.toolBarNavigation)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.position_label)
self.layout.addWidget(self.group_box)
self.layout.addWidget(self.button_box)
# set spinboxes's initial values and limits
max_time_in_msecs = int(((len(self.record.signal) - 1) * 1000) / self.record.fs)
start_time_in_msecs = int((self._start * 1000.0) / self.record.fs)
end_time_in_msecs = int((self._end * 1000.0) / self.record.fs)
self.start_point_spinbox.setTime(QtCore.QTime().addMSecs(start_time_in_msecs))
self.end_point_spinbox.setTime(QtCore.QTime().addMSecs(end_time_in_msecs))
self.start_point_spinbox.setMinimumTime(QtCore.QTime().addMSecs(0))
self.end_point_spinbox.setMinimumTime(QtCore.QTime().addMSecs(start_time_in_msecs + MINIMUM_MARGIN_IN_SECS * 1000))
self.start_point_spinbox.setMaximumTime(QtCore.QTime().addMSecs(end_time_in_msecs - MINIMUM_MARGIN_IN_SECS * 1000))
self.end_point_spinbox.setMaximumTime(QtCore.QTime().addMSecs(max_time_in_msecs))
def on_click(self, button):
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_event()
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self.do_takanami()
def on_start_point_changed(self, value):
time_in_msecs = QtCore.QTime().msecsTo(value)
t_start = int(max(0, (time_in_msecs / 1000.0) *
self.record.fs))
if self._start != t_start:
self._start = t_start
self.end_point_spinbox.setMinimumTime(QtCore.QTime().
addMSecs(time_in_msecs + MINIMUM_MARGIN_IN_SECS * 1000))
def on_end_point_changed(self, value):
time_in_msecs = QtCore.QTime().msecsTo(value)
t_end = int(min(len(self.record.signal),
((time_in_msecs / 1000.0) *
self.record.fs)))
if self._end != t_end:
self._end = t_end
self.start_point_spinbox.setMaximumTime(QtCore.QTime().
addMSecs(time_in_msecs - MINIMUM_MARGIN_IN_SECS * 1000))
def on_position_estimated(self, time, aic, n0_aic):
self.event_time = time
time_in_secs = self.event_time / self.record.fs
self.position_label.setText("Estimated Arrival Time: {}".format(
clt.float_secs_2_string_date(time_in_secs, starttime=self.record.starttime)))
# Plot estimated arrival time
m_event = rc.ApasvoEvent(self.record, time, aic=aic, n0_aic=n0_aic)
m_event.plot_aic(show_envelope=True, num=self.fig.number)
self.fig.canvas.draw_idle()
def load_settings(self):
"""Loads settings from persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("takanami_settings")
self.default_margin = int(float(settings.value('takanami_margin', 5.0)) *
self.record.fs)
settings.endGroup()
def save_event(self):
""""""
if self.seismic_event is not None:
if self.seismic_event.stime != self.event_time:
self.document.editEvent(self.seismic_event,
stime=self.event_time,
method=rc.method_takanami,
evaluation_mode=rc.mode_automatic,
evaluation_status=rc.status_preliminary)
else:
self.document.createEvent(self.event_time,
method=rc.method_takanami,
evaluation_mode=rc.mode_automatic,
evaluation_status=rc.status_preliminary)
def do_takanami(self):
self._task = TakanamiTask(self.record, self._start, self._end)
self._task.position_estimated.connect(self.on_position_estimated)
self.wait_dialog = processingdialog.ProcessingDialog(label_text="Applying Takanami's AR method...")
self.wait_dialog.setWindowTitle("Event detection")
return self.wait_dialog.run(self._task)
def exec_(self, *args, **kwargs):
return_code = self.do_takanami()
if return_code == QtGui.QDialog.Accepted:
return QtGui.QDialog.exec_(self, *args, **kwargs) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/takanamidialog.py | takanamidialog.py |
from PySide import QtGui
from PySide import QtCore
from apasvo.gui.views import playertoolbar
from apasvo.picking import apasvotrace as rc
from apasvo.utils import plotting
from apasvo._version import _application_name
from apasvo._version import _organization
COLOR_KEYS = ("method", "evaluation_mode", "evaluation_status")
COLOR_KEYS_LABELS = ("Picking method", "Picking mode", "Status")
DEFAULT_COLOR_KEY = "method"
DEFAULT_STALTA_COLOR = "#ffa500"
DEFAULT_STALTA_TAKANAMI_COLOR = "#ffa500"
DEFAULT_AMPA_COLOR = "#adff2f"
DEFAULT_AMPA_TAKANAMI_COLOR = "#adff2f"
DEFAULT_TAKANAMI_COLOR = "#dda0dd"
DEFAULT_OTHER_COLOR = "#d3d3d3"
DEFAULT_MANUAL_COLOR = "#f08080"
DEFAULT_AUTOMATIC_COLOR = "#87ceeb"
DEFAULT_PRELIMINARY_COLOR = "#ffa500"
DEFAULT_REVIEWED_COLOR = "#87ceeb"
DEFAULT_CONFIRMED_COLOR = "#adff2f"
DEFAULT_REJECTED_COLOR = "#f08080"
DEFAULT_FINAL_COLOR = "#d3d3d3"
SPECGRAM_WINDOW_LENGTHS = (16, 32, 64, 128, 256, 512, 1024, 2048)
DEFAULT_COLOR_SCHEME = ((rc.method_stalta, DEFAULT_STALTA_COLOR),
(rc.method_stalta_takanami, DEFAULT_STALTA_TAKANAMI_COLOR),
(rc.method_ampa, DEFAULT_AMPA_COLOR),
(rc.method_ampa_takanami, DEFAULT_AMPA_TAKANAMI_COLOR),
(rc.method_takanami, DEFAULT_TAKANAMI_COLOR),
(rc.method_other, DEFAULT_OTHER_COLOR))
class SettingsDialog(QtGui.QDialog):
"""A dialog window to edit application settings.
"""
saved = QtCore.Signal()
def __init__(self, parent=None):
super(SettingsDialog, self).__init__(parent)
self.setup_ui()
self.colorKeyComboBox.currentIndexChanged.connect(self._keyChanged)
self.colorMethodOtherButton.clicked.connect(self._colorButtonClicked)
self.colorMethodTakanamiButton.clicked.connect(self._colorButtonClicked)
self.colorMethodStaLtaButton.clicked.connect(self._colorButtonClicked)
self.colorMethodStaLtaTakanamiButton.clicked.connect(self._colorButtonClicked)
self.colorMethodAmpaButton.clicked.connect(self._colorButtonClicked)
self.colorMethodAmpaTakanamiButton.clicked.connect(self._colorButtonClicked)
self.colorModeManualButton.clicked.connect(self._colorButtonClicked)
self.colorModeAutomaticButton.clicked.connect(self._colorButtonClicked)
self.colorStatusPreliminaryButton.clicked.connect(self._colorButtonClicked)
self.colorStatusReviewedButton.clicked.connect(self._colorButtonClicked)
self.colorStatusConfirmedButton.clicked.connect(self._colorButtonClicked)
self.colorStatusRejectedButton.clicked.connect(self._colorButtonClicked)
self.colorStatusFinalButton.clicked.connect(self._colorButtonClicked)
self.windowlenComboBox.currentIndexChanged.connect(lambda i: self.noverlapSpinBox.setMaximum(SPECGRAM_WINDOW_LENGTHS[i] - 1))
self.treeWidget.currentItemChanged.connect(self._itemChanged)
self.buttonBox.clicked.connect(self.onclick)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.loadSettings()
def setup_ui(self):
self.setWindowTitle("Settings")
self.setMinimumHeight(480)
self.setMinimumWidth(640)
# Set the settings tree widget
self.treeWidget = QtGui.QTreeWidget(self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum,
QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.treeWidget.sizePolicy().
hasHeightForWidth())
self.treeWidget.setSizePolicy(sizePolicy)
self.treeWidget.setMaximumWidth(180)
self.treeWidget.setAnimated(False)
self.treeWidget.setHeaderHidden(True)
# Set Player Group Box
self.playerGroupBox = QtGui.QGroupBox("Audio Player", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.playerGroupBox.sizePolicy().
hasHeightForWidth())
self.playerGroupBox.setSizePolicy(sizePolicy)
self.playerGroupBox.setAlignment(QtCore.Qt.AlignLeading |
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignVCenter)
self.playerGroupBox.setVisible(True)
self.formLayout = QtGui.QFormLayout(self.playerGroupBox)
self.formLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setRowWrapPolicy(QtGui.QFormLayout.DontWrapRows)
self.formLayout.setLabelAlignment(QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.formLayout.setFormAlignment(QtCore.Qt.AlignLeading |
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignTop)
self.formLayout.setContentsMargins(24, 24, 24, 24)
self.formLayout.setHorizontalSpacing(24)
self.formLayout.setVerticalSpacing(24)
self.playbackfreqLabel = QtGui.QLabel("Playback frequency (Hz):",
self.playerGroupBox)
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole,
self.playbackfreqLabel)
self.playbackrateSpinBox = QtGui.QSpinBox(self.playerGroupBox)
self.playbackrateSpinBox.setMinimum(100)
self.playbackrateSpinBox.setMaximum(16000)
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole,
self.playbackrateSpinBox)
self.bitdepthLabel = QtGui.QLabel("Sample Format:", self.playerGroupBox)
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole,
self.bitdepthLabel)
self.bitdepthComboBox = QtGui.QComboBox(self.playerGroupBox)
self.bitdepthComboBox.addItems(playertoolbar.bit_depths.keys())
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole,
self.bitdepthComboBox)
# set colors group box
self.colorsGroupBox = QtGui.QGroupBox("Colors", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.colorsGroupBox.sizePolicy().
hasHeightForWidth())
self.colorsGroupBox.setSizePolicy(sizePolicy)
self.colorsGroupBox.setAlignment(QtCore.Qt.AlignLeading |
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignVCenter)
self.colorsGroupBox.setVisible(False)
self.colorsLayout = QtGui.QVBoxLayout(self.colorsGroupBox)
self.colorKeyWidget = QtGui.QWidget(self.colorsGroupBox)
self.colorKeyLayout = QtGui.QFormLayout(self.colorKeyWidget)
self.colorKeyLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.colorKeyLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.colorKeyLayout.setRowWrapPolicy(QtGui.QFormLayout.DontWrapRows)
self.colorKeyLayout.setLabelAlignment(QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.colorKeyLayout.setFormAlignment(QtCore.Qt.AlignLeading |
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignTop)
self.colorKeyLayout.setContentsMargins(24, 24, 24, 24)
self.colorKeyLayout.setHorizontalSpacing(24)
self.colorKeyLabel = QtGui.QLabel("Key to color the events:", self.colorsGroupBox)
self.colorKeyComboBox = QtGui.QComboBox(self.colorsGroupBox)
self.colorKeyComboBox.addItems(COLOR_KEYS_LABELS)
self.colorKeyLayout.setWidget(0, QtGui.QFormLayout.LabelRole,
self.colorKeyLabel)
self.colorKeyLayout.setWidget(0, QtGui.QFormLayout.FieldRole,
self.colorKeyComboBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# color by method buttons
self.colorMethodButtonsWidget = QtGui.QWidget(self.colorsGroupBox)
self.colorMethodButtonsWidget.setSizePolicy(sizePolicy)
self.colorMethodButtonsLayout = QtGui.QVBoxLayout(self.colorMethodButtonsWidget)
self.colorMethodButtonsLayout.setAlignment(QtCore.Qt.AlignTop)
self.colorMethodTakanamiButton = QtGui.QPushButton("Takanami method", self.colorMethodButtonsWidget)
self.colorMethodStaLtaButton = QtGui.QPushButton("STA-LTA method", self.colorMethodButtonsWidget)
self.colorMethodStaLtaTakanamiButton = QtGui.QPushButton("STA-LTA + Takanami method", self.colorMethodButtonsWidget)
self.colorMethodAmpaButton = QtGui.QPushButton("AMPA method", self.colorMethodButtonsWidget)
self.colorMethodAmpaTakanamiButton = QtGui.QPushButton("AMPA + Takanami method", self.colorMethodButtonsWidget)
self.colorMethodOtherButton = QtGui.QPushButton("Other method", self.colorMethodButtonsWidget)
self.colorMethodButtonsLayout.addWidget(self.colorMethodStaLtaButton)
self.colorMethodButtonsLayout.addWidget(self.colorMethodStaLtaTakanamiButton)
self.colorMethodButtonsLayout.addWidget(self.colorMethodAmpaButton)
self.colorMethodButtonsLayout.addWidget(self.colorMethodAmpaTakanamiButton)
self.colorMethodButtonsLayout.addWidget(self.colorMethodTakanamiButton)
self.colorMethodButtonsLayout.addWidget(self.colorMethodOtherButton)
self.colorMethodButtonsWidget.setVisible(False)
# color by mode buttons
self.colorModeButtonsWidget = QtGui.QWidget(self.colorsGroupBox)
self.colorModeButtonsWidget.setSizePolicy(sizePolicy)
self.colorModeButtonsLayout = QtGui.QVBoxLayout(self.colorModeButtonsWidget)
self.colorModeButtonsLayout.setAlignment(QtCore.Qt.AlignTop)
self.colorModeManualButton = QtGui.QPushButton("Manual", self.colorModeButtonsWidget)
self.colorModeAutomaticButton = QtGui.QPushButton("Automatic", self.colorModeButtonsWidget)
self.colorModeButtonsLayout.addWidget(self.colorModeManualButton)
self.colorModeButtonsLayout.addWidget(self.colorModeAutomaticButton)
self.colorModeButtonsWidget.setVisible(False)
# color by status buttons
self.colorStatusButtonsWidget = QtGui.QWidget(self.colorsGroupBox)
self.colorStatusButtonsWidget.setSizePolicy(sizePolicy)
self.colorStatusButtonsLayout = QtGui.QVBoxLayout(self.colorStatusButtonsWidget)
self.colorStatusButtonsLayout.setAlignment(QtCore.Qt.AlignTop)
self.colorStatusPreliminaryButton = QtGui.QPushButton("Preliminary", self.colorStatusButtonsWidget)
self.colorStatusReviewedButton = QtGui.QPushButton("Reviewed", self.colorStatusButtonsWidget)
self.colorStatusConfirmedButton = QtGui.QPushButton("Confirmed", self.colorStatusButtonsWidget)
self.colorStatusRejectedButton = QtGui.QPushButton("Rejected", self.colorStatusButtonsWidget)
self.colorStatusFinalButton = QtGui.QPushButton("Final", self.colorStatusButtonsWidget)
self.colorStatusButtonsLayout.addWidget(self.colorStatusPreliminaryButton)
self.colorStatusButtonsLayout.addWidget(self.colorStatusReviewedButton)
self.colorStatusButtonsLayout.addWidget(self.colorStatusConfirmedButton)
self.colorStatusButtonsLayout.addWidget(self.colorStatusRejectedButton)
self.colorStatusButtonsLayout.addWidget(self.colorStatusFinalButton)
self.colorStatusButtonsWidget.setVisible(False)
self.colorsLayout.addWidget(self.colorKeyWidget)
self.colorsLayout.addWidget(self.colorMethodButtonsWidget)
self.colorsLayout.addWidget(self.colorModeButtonsWidget)
self.colorsLayout.addWidget(self.colorStatusButtonsWidget)
# Set Spectrogram Group Box
self.specgramGroupBox = QtGui.QGroupBox("Spectrogram", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.specgramGroupBox.sizePolicy().
hasHeightForWidth())
self.specgramGroupBox.setSizePolicy(sizePolicy)
self.specgramGroupBox.setAlignment(QtCore.Qt.AlignLeading |
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignVCenter)
self.specgramGroupBox.setVisible(False)
self.specgramFormLayout = QtGui.QFormLayout(self.specgramGroupBox)
self.specgramFormLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.specgramFormLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.specgramFormLayout.setRowWrapPolicy(QtGui.QFormLayout.DontWrapRows)
self.specgramFormLayout.setLabelAlignment(QtCore.Qt.AlignRight |
QtCore.Qt.AlignTrailing |
QtCore.Qt.AlignVCenter)
self.specgramFormLayout.setFormAlignment(QtCore.Qt.AlignLeading |
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignTop)
self.specgramFormLayout.setContentsMargins(24, 24, 24, 24)
self.specgramFormLayout.setHorizontalSpacing(24)
self.specgramFormLayout.setVerticalSpacing(24)
self.windowlenLabel = QtGui.QLabel("Window length (in samples):",
self.specgramGroupBox)
self.windowlenComboBox = QtGui.QComboBox(self.specgramGroupBox)
self.windowlenComboBox.addItems(map(str, SPECGRAM_WINDOW_LENGTHS))
self.noverlapLabel = QtGui.QLabel("Overlap (in samples):",
self.specgramGroupBox)
self.noverlapSpinBox = QtGui.QSpinBox(self.specgramGroupBox)
self.noverlapSpinBox.setMinimum(0)
self.windowLabel = QtGui.QLabel("Window type:", self.specgramGroupBox)
self.windowComboBox = QtGui.QComboBox(self.specgramGroupBox)
self.windowComboBox.addItems(plotting.SPECGRAM_WINDOWS_NAMES)
self.specgramFormLayout.setWidget(0, QtGui.QFormLayout.LabelRole,
self.windowlenLabel)
self.specgramFormLayout.setWidget(0, QtGui.QFormLayout.FieldRole,
self.windowlenComboBox)
self.specgramFormLayout.setWidget(1, QtGui.QFormLayout.LabelRole,
self.noverlapLabel)
self.specgramFormLayout.setWidget(1, QtGui.QFormLayout.FieldRole,
self.noverlapSpinBox)
self.specgramFormLayout.setWidget(2, QtGui.QFormLayout.LabelRole,
self.windowLabel)
self.specgramFormLayout.setWidget(2, QtGui.QFormLayout.FieldRole,
self.windowComboBox)
# Button Box
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setDefault(True)
# Set layouts
self.settings_frame = QtGui.QFrame(self)
self.horizontalLayout = QtGui.QHBoxLayout(self.settings_frame)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.addWidget(self.treeWidget)
self.horizontalLayout.addWidget(self.playerGroupBox)
self.horizontalLayout.addWidget(self.colorsGroupBox)
self.horizontalLayout.addWidget(self.specgramGroupBox)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.verticalLayout.addWidget(self.settings_frame)
self.verticalLayout.addWidget(self.buttonBox)
self.setLayout(self.verticalLayout)
self.item_player = QtGui.QTreeWidgetItem(self.treeWidget)
self.item_colors = QtGui.QTreeWidgetItem(self.treeWidget)
self.item_specgram = QtGui.QTreeWidgetItem(self.treeWidget)
self.item_player.setText(0, self.playerGroupBox.title())
self.item_colors.setText(0, self.colorsGroupBox.title())
self.item_specgram.setText(0, self.specgramGroupBox.title())
self.treeWidget.addTopLevelItem(self.item_player)
self.treeWidget.setSortingEnabled(False)
self._settingsMenus = {}
self._settingsMenus[self.treeWidget.topLevelItem(0).text(0)] = self.playerGroupBox
self._settingsMenus[self.treeWidget.topLevelItem(1).text(0)] = self.colorsGroupBox
self._settingsMenus[self.treeWidget.topLevelItem(2).text(0)] = self.specgramGroupBox
self.treeWidget.setCurrentItem(self.treeWidget.topLevelItem(0))
self.currentMenu = self.playerGroupBox
def loadSettings(self):
"""Loads settings from persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("player_settings")
self.playbackrateSpinBox.setValue(int(settings.value('playback_freq', playertoolbar.DEFAULT_REAL_FREQ)))
bit_depth_index = playertoolbar.bit_depths.values().index(settings.value('bit_depth', playertoolbar.DEFAULT_BIT_DEPTH))
self.bitdepthComboBox.setCurrentIndex(bit_depth_index)
settings.endGroup()
settings.beginGroup("color_settings")
key = int(settings.value('color_key', COLOR_KEYS.index(DEFAULT_COLOR_KEY)))
self.colorKeyComboBox.setCurrentIndex(key)
self._keyChanged(key)
mColor = QtGui.QColor(settings.value(rc.method_stalta, DEFAULT_STALTA_COLOR))
self.colorMethodStaLtaButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.method_stalta_takanami, DEFAULT_STALTA_TAKANAMI_COLOR))
self.colorMethodStaLtaTakanamiButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.method_ampa, DEFAULT_AMPA_COLOR))
self.colorMethodAmpaButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.method_ampa_takanami, DEFAULT_AMPA_TAKANAMI_COLOR))
self.colorMethodAmpaTakanamiButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.method_takanami, DEFAULT_TAKANAMI_COLOR))
self.colorMethodTakanamiButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.method_other, DEFAULT_OTHER_COLOR))
self.colorMethodOtherButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.mode_manual, DEFAULT_MANUAL_COLOR))
self.colorModeManualButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.mode_automatic, DEFAULT_AUTOMATIC_COLOR))
self.colorModeAutomaticButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.status_preliminary, DEFAULT_PRELIMINARY_COLOR))
self.colorStatusPreliminaryButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.status_reviewed, DEFAULT_REVIEWED_COLOR))
self.colorStatusReviewedButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.status_confirmed, DEFAULT_CONFIRMED_COLOR))
self.colorStatusConfirmedButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.status_rejected, DEFAULT_REJECTED_COLOR))
self.colorStatusRejectedButton.setStyleSheet("background-color: %s" % mColor.name())
mColor = QtGui.QColor(settings.value(rc.status_final, DEFAULT_FINAL_COLOR))
self.colorStatusFinalButton.setStyleSheet("background-color: %s" % mColor.name())
settings.endGroup()
settings.beginGroup("specgram_settings")
windowlen = int(settings.value('window_len', SPECGRAM_WINDOW_LENGTHS[4]))
self.windowlenComboBox.setCurrentIndex(SPECGRAM_WINDOW_LENGTHS.index(windowlen))
self.noverlapSpinBox.setMaximum(windowlen - 1)
self.noverlapSpinBox.setValue(int(settings.value('noverlap', windowlen / 2)))
mwindow = settings.value('window', plotting.SPECGRAM_WINDOWS[2])
self.windowComboBox.setCurrentIndex(plotting.SPECGRAM_WINDOWS.index(mwindow))
settings.endGroup()
def saveSettings(self):
"""Saves settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("player_settings")
settings.setValue('playback_freq', self.playbackrateSpinBox.value())
settings.setValue('bit_depth',
playertoolbar.bit_depths[self.bitdepthComboBox.currentText()])
settings.endGroup()
settings.beginGroup("color_settings")
settings.setValue('color_key', self.colorKeyComboBox.currentIndex())
settings.setValue(rc.method_stalta, self.colorMethodStaLtaButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.method_stalta_takanami, self.colorMethodStaLtaTakanamiButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.method_ampa, self.colorMethodAmpaButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.method_ampa_takanami, self.colorMethodAmpaTakanamiButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.method_takanami, self.colorMethodTakanamiButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.method_other, self.colorMethodOtherButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.mode_manual, self.colorModeManualButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.mode_automatic, self.colorModeAutomaticButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.status_preliminary, self.colorStatusPreliminaryButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.status_reviewed, self.colorStatusReviewedButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.status_confirmed, self.colorStatusConfirmedButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.status_rejected, self.colorStatusRejectedButton.palette().color(QtGui.QPalette.Background))
settings.setValue(rc.status_final, self.colorStatusFinalButton.palette().color(QtGui.QPalette.Background))
settings.endGroup()
settings.beginGroup("specgram_settings")
settings.setValue('window_len', SPECGRAM_WINDOW_LENGTHS[self.windowlenComboBox.currentIndex()])
settings.setValue('noverlap', self.noverlapSpinBox.value())
settings.setValue('window', plotting.SPECGRAM_WINDOWS[self.windowComboBox.currentIndex()])
settings.endGroup()
self.saved.emit()
def onclick(self, button):
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self.saveSettings()
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.saveSettings()
def _itemChanged(self, current, previous):
item_name = current.text(0)
if item_name in self._settingsMenus:
if self.currentMenu != self._settingsMenus[item_name]:
self.currentMenu.setVisible(False)
self._settingsMenus[item_name].setVisible(True)
self.currentMenu = self._settingsMenus[item_name]
def _keyChanged(self, index):
if index == 0:
self.colorMethodButtonsWidget.setVisible(True)
self.colorModeButtonsWidget.setVisible(False)
self.colorStatusButtonsWidget.setVisible(False)
elif index == 1:
self.colorMethodButtonsWidget.setVisible(False)
self.colorModeButtonsWidget.setVisible(True)
self.colorStatusButtonsWidget.setVisible(False)
elif index == 2:
self.colorMethodButtonsWidget.setVisible(False)
self.colorModeButtonsWidget.setVisible(False)
self.colorStatusButtonsWidget.setVisible(True)
def _colorButtonClicked(self):
button = self.sender()
mBackgroundColor = button.palette().color(QtGui.QPalette.Background)
mColor = QtGui.QColorDialog.getColor(mBackgroundColor)
button.setStyleSheet("background-color: %s" % mColor.name()) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/settingsdialog.py | settingsdialog.py |
from PySide import QtGui
from PySide import QtCore
import numpy as np
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['figure.dpi'] = 65
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from apasvo.utils import plotting
from apasvo.utils import clt
class TracePlot(QtCore.QObject):
def __init__(self, parent, trace, fig_nrows=1, fig_ncols=1, ax_pos=1):
super(TracePlot, self).__init__()
self.parent = parent
self.fig = parent.fig
self.ax = self.fig.add_subplot(fig_nrows, fig_ncols, ax_pos, visible=False)
self.trace = trace
# Get trace dataseries
self.signal = trace.signal
self.time = np.linspace(0, len(self.signal) / trace.fs, num=len(self.signal), endpoint=False)
self.xmin, self.xmax = 0, self.time[-1]
# Plot current data
self._plot_data = self.ax.plot(self.time, self.signal, color='black', rasterized=True)[0]
self.ax.callbacks.connect('xlim_changed', self.on_xlim_change)
self.ax.set_xlim(self.xmin, self.xmax)
# Format axes
axes_formatter = FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, trace.starttime))
self.ax.xaxis.set_major_formatter(axes_formatter)
plt.setp(self.ax.get_xticklabels(), visible=False)
plt.setp(self.ax.get_yticklabels(), visible=False)
self.ax.grid(True, which='both')
# Set event markers
self.marker_select_color = 'r'
self.marker_color = 'b'
self.markers = {}
self.update_markers()
# Selection parameters
self.selected = False
self.selector = self.ax.axvspan(0, self.xmax, fc='LightCoral', ec='r', alpha=0.5, visible=False)#, animated=True)
# Place legend
at = AnchoredText(self.trace.short_name, prop=dict(size=12), frameon=True, loc=2)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
self.ax.add_artist(at)
def on_xlim_change(self, ax):
xmin, xmax = ax.get_xlim()
if self.xmin <= xmin <= xmax <= self.xmax:
# Update data
self.update_data(ax)
else:
xmin = max(self.xmin, xmin)
xmax = min(self.xmax, xmax)
ax.set_xlim(xmin, xmax)
def update_data(self, ax=None):
ax = self.ax if ax is None else ax
xmin, xmax = self.ax.get_xlim()
xmin = int(max(0, self.xmin) * self.trace.fs)
xmax = int(min(self.xmax, xmax) * self.trace.fs)
pixel_width = np.ceil(self.fig.get_figwidth() * self.fig.get_dpi())
x_data, y_data = plotting.reduce_data(self.time, self.trace.signal, pixel_width, xmin, xmax)
self._plot_data.set_xdata(x_data)
self._plot_data.set_ydata(y_data)
self.parent.draw()
def create_marker(self, event, **kwargs):
event_id = event.resource_id.uuid
position = event.stime / self.trace.fs
marker = self.ax.axvline(position, color=self.marker_color, ls='--', lw=3, visible=False)#, animated=True)
self.markers[event_id] = marker
self.markers[event_id].set(**kwargs)
def delete_marker(self, event_id):
marker = self.markers[event_id]
if marker is not None:
self.ax.lines.remove(marker)
self.markers.pop(event_id)
def update_markers(self, draw=False):
for event_id in self.markers.keys():
self.delete_marker(event_id)
for event in self.trace.events:
self.create_marker(event)
if draw:
self.parent.draw()
def set_selected_marker(self, selected):
color = self.marker_select_color if selected else self.marker_color
for marker in self.markers:
marker.set(color=color)
def set_event_selection(self, event_list):
event_id_list = [event.resource_id.uuid for event in event_list]
for event_id in self.markers.keys():
self.markers[event_id].select_marker(event_id in event_id_list)
self.parent.draw()
def set_selected(self, selected):
if self.selected != selected:
self.selected = selected
if self.selected:
self.selector.set_visible(True)
# self.ax.set_axis_bgcolor('LightCoral')
else:
self.selector.set_visible(False)
# self.ax.set_axis_bgcolor('white')
def remove(self):
self.fig.delaxes(self.ax)
self.parent.subplots_adjust()
self.parent.draw()
class StreamViewerWidget(QtGui.QWidget):
"""Shows the entire signal and allows the user to navigate through it.
Provides an scrollable selector over the entire signal.
Attributes:
xmin: Selector lower limit (measured in h-axis units).
xmax: Selector upper limit (measured in h-axis units).
step: Selector length (measured in h-axis units).
"""
trace_selected = QtCore.Signal(int)
selection_made = QtCore.Signal(bool)
def __init__(self, parent, stream=None):
super(StreamViewerWidget, self).__init__(parent)
self.fig = plt.figure()
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.canvas.setMinimumHeight(320)
self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)
self.canvas.setFocus()
self.graphArea = QtGui.QScrollArea(self)
self.graphArea.setWidget(self.canvas)
self.graphArea.setWidgetResizable(True)
self.graphArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
# Set the layout
self.layout = QtGui.QVBoxLayout(self)
self.layout.addWidget(self.graphArea)
# Animation related attrs.
self.background = []
self.animated = False
self.size = (self.fig.bbox.width, self.fig.bbox.height)
# Set TracePlot list
self.trace_plots = []
self.stream = None
if stream is not None:
self.set_stream(stream)
# Event handling
self.visible_axes = []
self._selected_traces = set()
self.shift_pressed = False
self.press_selector = None
self.fig.canvas.mpl_connect('motion_notify_event', self.on_move)
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.fig.canvas.mpl_connect('key_release_event', self.on_key_release)
@property
def selected_traces(self):
if self.stream is not None:
return [self.stream.traces[i] for i in self._selected_traces]
return []
def on_move(self, event):
axes_selected = False
for i, axes in enumerate(self.fig.axes):
if axes.get_visible():
ymin, ymax = axes.get_position().ymin, axes.get_position().ymax
xmin, xmax = axes.get_position().xmin, axes.get_position().xmax
xfig, yfig = self._event_to_fig_coords(event)
if ymin <= yfig <= ymax and xmin <= xfig <= xmax:
self.canvas.setToolTip(self.stream.traces[i].name)
axes_selected = True
break
if not axes_selected:
self.canvas.setToolTip("")
def on_key_press(self, event):
if event.key == 'control':
self.shift_pressed = True
def on_key_release(self, event):
self.shift_pressed = False
def on_press(self, event):
trace_selected = False
if event.button == 1:# and event.dblclick:
for i, ax in enumerate(self.fig.axes):
if ax.get_visible():
ymin, ymax = ax.get_position().ymin, ax.get_position().ymax
xmin, xmax = ax.get_position().xmin, ax.get_position().xmax
xfig, yfig = self._event_to_fig_coords(event)
if ymin <= yfig <= ymax and xmin <= xfig <= xmax:
trace_selected = True
if self.shift_pressed:
if self._selected_traces:
self.trace_selected.emit(i)
self.selection_made.emit(True)
self._selected_traces.add(i)
else:
self.trace_selected.emit(i)
self.selection_made.emit(True)
self._selected_traces = {i}
break
# if the user clicked out of any trace (and he's not using shift), then deselect all
if not trace_selected and not self.shift_pressed:
self._selected_traces = set()
self.selection_made.emit(False)
# Now update selection status on plots
for i, plot in enumerate(self.trace_plots):
plot.set_selected(i in self._selected_traces)
self.draw()
def _event_to_fig_coords(self, event):
inv = self.fig.transFigure.inverted()
return inv.transform((event.x, event.y))
def set_stream(self, stream):
self.stream = stream
self._selected_traces = set()
# Clear canvas
for plot in self.trace_plots:
plot.remove()
self.trace_plots = []
# Plot stream traces
for i, trace in enumerate(self.stream.traces):
self.trace_plots.append(TracePlot(self, trace, fig_nrows=len(stream), ax_pos=i + 1))
# Draw canvas
self.subplots_adjust()
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.fig.bbox)
self.draw()
def refresh_stream_data(self):
for plot in self.trace_plots:
plot.update_data()
def draw(self):
self.canvas.draw()
#self.draw_animate()
def draw_animate(self):
size = self.fig.bbox.width, self.fig.bbox.height
if size != self.size:
self.size = size
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.fig.bbox)
self.canvas.restore_region(self.background)
for artist in self._get_animated_artists():
if artist.get_visible():
ax = artist.get_axes()
if ax is not None:
if artist.get_axes().get_visible():
self.fig.draw_artist(artist)
else:
self.fig.draw_artist(artist)
self.canvas.blit(self.fig.bbox)
def _get_animated_artists(self):
artists = []
for ax in self.fig.axes:
artists.extend(ax.images)
artists.extend(ax.lines)
artists.append(ax.xaxis)
artists.append(ax.yaxis)
artists.extend(ax.patches)
artists.extend(ax.spines.values())
for artist in artists:
if artist.get_animated():
yield artist
def set_visible(self, value):
self.canvas.setVisible(value)
def get_visible(self):
return self.canvas.isVisible()
def remove_trace(self, idx):
self.trace_plots.pop(idx).remove()
def subplots_adjust(self):
visible_subplots = [ax for ax in self.fig.get_axes() if ax.get_visible()]
for i, ax in enumerate(visible_subplots):
correct_geometry = (len(visible_subplots), 1, i + 1)
if correct_geometry != ax.get_geometry():
ax.change_geometry(len(visible_subplots), 1, i + 1)
# Adjust space between subplots
self.fig.subplots_adjust(left=0.02, right=0.98, bottom=0.02,
top=0.98, hspace=0.05)
def showEvent(self, event):
self.draw()
def resizeEvent(self, event):
self.draw()
def update_markers(self):
for plot in self.trace_plots:
plot.update_markers()
self.draw()
def visualize_stream_range(self, start_trace=None, end_trace=None):
for i, ax in enumerate(self.fig.axes):
ax.set_visible(start_trace <= i < end_trace)
self.subplots_adjust()
self.canvas.draw() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/tsvwidget.py | tsvwidget.py |
from PySide import QtCore
from PySide import QtGui
from apasvo.gui.models import filterlistmodel
from apasvo.gui.delegates import dsbdelegate
from apasvo._version import _application_name
from apasvo._version import _organization
class AmpaDialog(QtGui.QDialog):
"""
"""
def __init__(self, stream, trace_list=None, parent=None):
super(AmpaDialog, self).__init__(parent)
traces = stream.traces if not trace_list else trace_list
self.step = 1.0 / max([trace.fs for trace in traces])
self.max_value = min([((len(trace) - 1) / trace.fs) for trace in traces])
self.nyquist_freq = max([trace.fs for trace in traces]) / 2.0
self.setup_ui()
self._filters = filterlistmodel.FilterListModel([])
self.filtersTable.setModel(self._filters)
self._filters.sizeChanged.connect(self._on_size_changed)
self._filters.dataChanged.connect(self._on_data_changed)
filterDelegate = dsbdelegate.DoubleSpinBoxDelegate(self.filtersTable,
minimum=self.step,
maximum=self.max_value - self.step,
step=self.step)
self.filtersTable.setItemDelegateForColumn(0, filterDelegate)
self.ampawindowSpinBox.valueChanged.connect(self.on_ampa_window_changed)
self.ampawindowstepSpinBox.valueChanged.connect(self.on_ampa_window_step_changed)
self.takanamiCheckBox.toggled.connect(self.takanamiMarginLabel.setEnabled)
self.takanamiCheckBox.toggled.connect(self.takanamiMarginSpinBox.setEnabled)
self.startfSpinBox.valueChanged.connect(self.on_startf_changed)
self.endfSpinBox.valueChanged.connect(self.on_endf_changed)
self.bandwidthSpinBox.valueChanged.connect(self.on_bandwidth_changed)
self.actionAddFilter.triggered.connect(self.addFilter)
self.actionRemoveFilter.triggered.connect(self.removeFilter)
model = self.filtersTable.selectionModel()
model.selectionChanged.connect(self._on_filter_selected)
self.buttonBox.clicked.connect(self.onclick)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.load_settings()
def setup_ui(self):
self.setWindowTitle("AMPA Settings")
self.verticalLayout = QtGui.QVBoxLayout(self)
self.setMinimumWidth(480)
# Set AMPA General Settings Group Box
self.ampaGeneralSettingsGroupBox = QtGui.QGroupBox("General settings", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.ampaGeneralSettingsGroupBox.sizePolicy().hasHeightForWidth())
self.ampaGeneralSettingsGroupBox.setSizePolicy(sizePolicy)
self.formLayout_3 = QtGui.QFormLayout(self.ampaGeneralSettingsGroupBox)
self.formLayout_3.setLabelAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.formLayout_3.setContentsMargins(12, 12, 12, 12)
self.formLayout_3.setHorizontalSpacing(24)
self.formLayout_3.setObjectName("formLayout_3")
self.ampawindowLabel = QtGui.QLabel("Sliding Window Length (in seconds):", self.ampaGeneralSettingsGroupBox)
self.formLayout_3.setWidget(0, QtGui.QFormLayout.LabelRole, self.ampawindowLabel)
self.ampawindowSpinBox = QtGui.QDoubleSpinBox(self.ampaGeneralSettingsGroupBox)
self.ampawindowSpinBox.setAccelerated(True)
self.ampawindowSpinBox.setMaximum(self.max_value)
self.ampawindowSpinBox.setSingleStep(self.step)
self.formLayout_3.setWidget(0, QtGui.QFormLayout.FieldRole, self.ampawindowSpinBox)
self.ampawindowstepLabel = QtGui.QLabel("Sliding Window Step (in seconds):", self.ampaGeneralSettingsGroupBox)
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.ampawindowstepLabel)
self.ampawindowstepSpinBox = QtGui.QDoubleSpinBox(self.ampaGeneralSettingsGroupBox)
self.ampawindowstepSpinBox.setAccelerated(True)
self.ampawindowstepSpinBox.setMinimum(self.step)
self.ampawindowstepSpinBox.setSingleStep(self.step)
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.ampawindowstepSpinBox)
self.ampanoisethresholdLabel = QtGui.QLabel("Noise Threshold Percentile:", self.ampaGeneralSettingsGroupBox)
self.formLayout_3.setWidget(2, QtGui.QFormLayout.LabelRole, self.ampanoisethresholdLabel)
self.ampanoisethresholdSpinBox = QtGui.QSpinBox(self.ampaGeneralSettingsGroupBox)
self.ampanoisethresholdSpinBox.setAccelerated(True)
self.ampanoisethresholdSpinBox.setMinimum(1)
self.formLayout_3.setWidget(2, QtGui.QFormLayout.FieldRole, self.ampanoisethresholdSpinBox)
self.verticalLayout.addWidget(self.ampaGeneralSettingsGroupBox)
# Set AMPA filter bank settings Group Box
self.filterbankGroupBox = QtGui.QGroupBox("Filter Bank Settings", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.filterbankGroupBox.sizePolicy().hasHeightForWidth())
self.filterbankGroupBox.setSizePolicy(sizePolicy)
self.formLayout_2 = QtGui.QFormLayout(self.filterbankGroupBox)
self.formLayout_2.setLabelAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.formLayout_2.setContentsMargins(12, 12, 12, 12)
self.formLayout_2.setHorizontalSpacing(24)
self.startfLabel = QtGui.QLabel("Start Frequency (Hz):", self.filterbankGroupBox)
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.startfLabel)
self.startfSpinBox = QtGui.QDoubleSpinBox(self.filterbankGroupBox)
self.startfSpinBox.setAccelerated(True)
self.startfSpinBox.setMinimum(0.0)
self.startfSpinBox.setSingleStep(0.01)
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.startfSpinBox)
self.endfLabel = QtGui.QLabel("Max. End Frequency (Hz):", self.filterbankGroupBox)
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.endfLabel)
self.endfSpinBox = QtGui.QDoubleSpinBox(self.filterbankGroupBox)
self.endfSpinBox.setAccelerated(True)
self.endfSpinBox.setMaximum(self.nyquist_freq)
self.endfSpinBox.setSingleStep(0.01)
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.endfSpinBox)
self.bandwidthLabel = QtGui.QLabel("Channel Bandwidth (Hz):", self.filterbankGroupBox)
self.formLayout_2.setWidget(2, QtGui.QFormLayout.LabelRole, self.bandwidthLabel)
self.bandwidthSpinBox = QtGui.QDoubleSpinBox(self.filterbankGroupBox)
self.bandwidthSpinBox.setAccelerated(True)
self.bandwidthSpinBox.setMinimum(0.1)
self.bandwidthSpinBox.setSingleStep(0.01)
self.formLayout_2.setWidget(2, QtGui.QFormLayout.FieldRole, self.bandwidthSpinBox)
self.overlapLabel = QtGui.QLabel("Channel Overlap (Hz):", self.filterbankGroupBox)
self.formLayout_2.setWidget(3, QtGui.QFormLayout.LabelRole, self.overlapLabel)
self.overlapSpinBox = QtGui.QDoubleSpinBox(self.filterbankGroupBox)
self.overlapSpinBox.setAccelerated(True)
self.overlapSpinBox.setMinimum(0.0)
self.overlapSpinBox.setSingleStep(0.01)
self.formLayout_2.setWidget(3, QtGui.QFormLayout.FieldRole, self.overlapSpinBox)
self.verticalLayout.addWidget(self.filterbankGroupBox)
# Set AMPA filters Group Box
self.ampaFiltersGroupBox = QtGui.QGroupBox("Filter Lengths", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.ampaFiltersGroupBox.sizePolicy().hasHeightForWidth())
self.ampaFiltersGroupBox.setSizePolicy(sizePolicy)
self.verticalLayout_2 = QtGui.QVBoxLayout(self.ampaFiltersGroupBox)
self.verticalLayout_2.setContentsMargins(12, 12, 12, 12)
self.ampafiltersToolBar = QtGui.QToolBar(self.ampaFiltersGroupBox)
self.ampafiltersToolBar.setMovable(False)
self.actionAddFilter = QtGui.QAction(self)
self.actionAddFilter.setIcon(QtGui.QIcon(":/add.png"))
self.actionRemoveFilter = QtGui.QAction(self)
self.actionRemoveFilter.setIcon(QtGui.QIcon(":/remove.png"))
self.actionRemoveFilter.setEnabled(False)
self.ampafiltersToolBar.addAction(self.actionAddFilter)
self.ampafiltersToolBar.addAction(self.actionRemoveFilter)
self.filtersTable = QtGui.QTableView(self.ampaFiltersGroupBox)
self.filtersTable.setCornerButtonEnabled(True)
self.filtersTable.horizontalHeader().setStretchLastSection(True)
self.filtersTable.verticalHeader().setVisible(False)
self.filtersTable.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.filtersTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.filtersTable.setShowGrid(False)
self.verticalLayout_2.addWidget(self.ampafiltersToolBar)
self.verticalLayout_2.addWidget(self.filtersTable)
self.verticalLayout.addWidget(self.ampaFiltersGroupBox)
# Set Takanami Group Box
self.takanamiGroupBox = QtGui.QGroupBox("Takanami Settings", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.takanamiGroupBox.sizePolicy().hasHeightForWidth())
self.takanamiGroupBox.setSizePolicy(sizePolicy)
self.takanamiformLayout = QtGui.QFormLayout(self.takanamiGroupBox)
self.takanamiformLayout.setLabelAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.takanamiformLayout.setContentsMargins(12, 12, 12, 12)
self.takanamiformLayout.setHorizontalSpacing(24)
self.takanamiCheckBox = QtGui.QCheckBox("Apply Takanami on results", self.takanamiGroupBox)
self.takanamiCheckBox.setChecked(True)
self.takanamiformLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.takanamiCheckBox)
self.takanamiMarginLabel = QtGui.QLabel("Takanami Max. Margin (in seconds):", self.takanamiGroupBox)
self.takanamiformLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.takanamiMarginLabel)
self.takanamiMarginSpinBox = QtGui.QDoubleSpinBox(self.takanamiGroupBox)
self.takanamiMarginSpinBox.setAccelerated(True)
self.takanamiMarginSpinBox.setMinimum(1.0)
self.takanamiMarginSpinBox.setMaximum(20.0)
self.takanamiMarginSpinBox.setSingleStep(self.step)
self.takanamiformLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.takanamiMarginSpinBox)
self.verticalLayout.addWidget(self.takanamiGroupBox)
# Button Box
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.RestoreDefaults |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
def on_ampa_window_changed(self, value):
self.ampawindowstepSpinBox.setMaximum(value)
def on_ampa_window_step_changed(self, value):
pass
def on_startf_changed(self, value):
self.endfSpinBox.setMinimum(value + self.endfSpinBox.singleStep())
self.bandwidthSpinBox.setMaximum(self.nyquist_freq - value - self.bandwidthSpinBox.singleStep())
def on_endf_changed(self, value):
self.startfSpinBox.setMaximum(value - self.startfSpinBox.singleStep())
def on_bandwidth_changed(self, value):
self.overlapSpinBox.setMaximum(value - self.overlapSpinBox.singleStep())
def addFilter(self, value=10.0):
self._filters.addFilter(value)
self.ampawindowSpinBox.setMinimum(max(self._filters.list()) +
self.step)
def removeFilter(self):
if len(self.filtersTable.selectionModel().selectedRows()) > 0:
self._filters.removeRow(self.filtersTable.currentIndex().row())
if self._filters.rowCount() <= 1:
self.actionRemoveFilter.setEnabled(False)
self.ampawindowSpinBox.setMinimum(max(self._filters.list()) +
self.step)
def load_settings(self):
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("ampa_settings")
self.ampawindowSpinBox.setValue(float(settings.value('window_len', 100.0)))
self.ampawindowstepSpinBox.setValue(float(settings.value('overlap', 50.0)))
self.ampanoisethresholdSpinBox.setValue(int(settings.value('noise_threshold', 90)))
self._filters.clearFilters()
for value in self._load_filters():
self.addFilter(float(value))
settings.beginGroup("filter_bank_settings")
self.startfSpinBox.setValue(float(settings.value('startf', 2.0)))
self.endfSpinBox.setValue(float(settings.value('endf', 12.0)))
self.bandwidthSpinBox.setValue(float(settings.value('bandwidth', 3.0)))
self.overlapSpinBox.setValue(float(settings.value('overlap', 1.0)))
settings.endGroup()
settings.endGroup()
settings.beginGroup("takanami_settings")
self.takanamiCheckBox.setChecked(int(settings.value('takanami', True)))
self.takanamiMarginSpinBox.setValue(float(settings.value('takanami_margin', 5.0)))
settings.endGroup()
def save_settings(self):
"""Saves settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("ampa_settings")
settings.setValue('window_len', self.ampawindowSpinBox.value())
settings.setValue('overlap', self.ampawindowstepSpinBox.value())
settings.setValue('step', self.ampawindowstepSpinBox.value())
settings.setValue('noise_threshold', self.ampanoisethresholdSpinBox.value())
settings.setValue('filters', self._filters.list())
settings.beginGroup("filter_bank_settings")
settings.setValue('startf', self.startfSpinBox.value())
settings.setValue('endf', self.endfSpinBox.value())
settings.setValue('bandwidth', self.bandwidthSpinBox.value())
settings.setValue('overlap', self.overlapSpinBox.value())
settings.endGroup()
settings.endGroup()
settings.beginGroup("takanami_settings")
settings.setValue('takanami', self.takanamiCheckBox.checkState())
settings.setValue('takanami_margin', self.takanamiMarginSpinBox.value())
settings.endGroup()
def onclick(self, button):
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.RestoreDefaults:
self.load_settings()
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self.save_settings()
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_settings()
def _on_size_changed(self, size):
if size <= 1:
self.actionRemoveFilter.setEnabled(False)
def _on_data_changed(self, top_left, bottom_right):
self.ampawindowSpinBox.setMinimum(max(self._filters.list()))
def _load_filters(self, default=None):
if default is None:
default = [30.0, 20.0, 10.0, 5.0, 2.5]
settings = QtCore.QSettings(_organization, _application_name)
filters = settings.value('ampa_settings/filters', default)
if filters:
if isinstance(filters, list):
filter_list = list(filters)
else:
filter_list = [filters]
else:
filter_list = default
# Drop filter lengths larger than max_value
return [float(f) for f in filter_list if 0 < float(f) < self.max_value]
def _on_filter_selected(self, s, d):
self.actionRemoveFilter.setEnabled(len(self.filtersTable.selectionModel()
.selectedRows()) > 0) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/ampadialog.py | ampadialog.py |
from PySide import QtCore
from PySide import QtGui
from apasvo._version import _application_name
from apasvo._version import _organization
class StaLtaDialog(QtGui.QDialog):
"""
"""
def __init__(self, stream, trace_list=None, parent=None):
super(StaLtaDialog, self).__init__(parent)
traces = stream.traces if not trace_list else trace_list
self.step = 1.0 / max([trace.fs for trace in traces])
self.max_value = min([((len(trace) - 1) / trace.fs) for trace in traces])
self.setup_ui()
self.staSpinBox.valueChanged.connect(self.on_sta_changed)
self.ltaSpinBox.valueChanged.connect(self.on_lta_changed)
self.takanamiCheckBox.toggled.connect(self.takanamiMarginLabel.setEnabled)
self.takanamiCheckBox.toggled.connect(self.takanamiMarginSpinBox.setEnabled)
self.buttonBox.clicked.connect(self.onclick)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.load_settings()
def setup_ui(self):
self.setWindowTitle("STA-LTA settings")
self.verticalLayout = QtGui.QVBoxLayout(self)
# Set STA-LTA Group Box
self.staltaGroupBox = QtGui.QGroupBox("STA-LTA", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.staltaGroupBox.sizePolicy().hasHeightForWidth())
self.staltaGroupBox.setSizePolicy(sizePolicy)
self.staltaGroupBox.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.formLayout = QtGui.QFormLayout(self.staltaGroupBox)
self.formLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setRowWrapPolicy(QtGui.QFormLayout.DontWrapRows)
self.formLayout.setLabelAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.formLayout.setFormAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.formLayout.setContentsMargins(12, 12, 12, 12)
self.formLayout.setHorizontalSpacing(24)
self.staLabel = QtGui.QLabel("STA window (in seconds):", self.staltaGroupBox)
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.staLabel)
self.staSpinBox = QtGui.QDoubleSpinBox(self.staltaGroupBox)
self.staSpinBox.setAccelerated(True)
self.staSpinBox.setMinimum(self.step)
self.staSpinBox.setSingleStep(self.step)
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.staSpinBox)
self.ltaLabel = QtGui.QLabel("LTA window (in seconds):", self.staltaGroupBox)
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.ltaLabel)
self.ltaSpinBox = QtGui.QDoubleSpinBox(self.staltaGroupBox)
self.ltaSpinBox.setAccelerated(True)
self.ltaSpinBox.setMaximum(self.max_value)
self.ltaSpinBox.setSingleStep(self.step)
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.ltaSpinBox)
self.verticalLayout.addWidget(self.staltaGroupBox)
# Set Takanami Group Box
self.takanamiGroupBox = QtGui.QGroupBox("Takanami Settings", self)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHeightForWidth(self.takanamiGroupBox.sizePolicy().hasHeightForWidth())
self.takanamiGroupBox.setSizePolicy(sizePolicy)
self.takanamiformLayout = QtGui.QFormLayout(self.takanamiGroupBox)
self.takanamiformLayout.setLabelAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.takanamiformLayout.setContentsMargins(12, 12, 12, 12)
self.takanamiformLayout.setHorizontalSpacing(24)
self.takanamiCheckBox = QtGui.QCheckBox("Apply Takanami on results", self.takanamiGroupBox)
self.takanamiCheckBox.setChecked(True)
self.takanamiformLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.takanamiCheckBox)
self.takanamiMarginLabel = QtGui.QLabel("Takanami Max. Margin (in seconds):", self.takanamiGroupBox)
self.takanamiformLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.takanamiMarginLabel)
self.takanamiMarginSpinBox = QtGui.QDoubleSpinBox(self.takanamiGroupBox)
self.takanamiMarginSpinBox.setAccelerated(True)
self.takanamiMarginSpinBox.setMinimum(1.0)
self.takanamiMarginSpinBox.setMaximum(20.0)
self.takanamiMarginSpinBox.setSingleStep(self.step)
self.takanamiformLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.takanamiMarginSpinBox)
self.verticalLayout.addWidget(self.takanamiGroupBox)
# Button Box
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.RestoreDefaults |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.buttonBox)
def on_sta_changed(self, value):
self.ltaSpinBox.setMinimum(value + self.step)
def on_lta_changed(self, value):
self.staSpinBox.setMaximum(value - self.step)
def load_settings(self):
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('stalta_settings')
self.staSpinBox.setValue(float(settings.value('sta_window_len', 5.0)))
self.ltaSpinBox.setValue(float(settings.value('lta_window_len', 100.0)))
settings.endGroup()
settings.beginGroup("takanami_settings")
self.takanamiCheckBox.setChecked(int(settings.value('takanami', True)))
self.takanamiMarginSpinBox.setValue(float(settings.value('takanami_margin', 5.0)))
settings.endGroup()
def save_settings(self):
"""Saves settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("stalta_settings")
settings.setValue('sta_window_len', self.staSpinBox.value())
settings.setValue('lta_window_len', self.ltaSpinBox.value())
settings.endGroup()
settings.beginGroup("takanami_settings")
settings.setValue('takanami', self.takanamiCheckBox.checkState())
settings.setValue('takanami_margin', self.takanamiMarginSpinBox.value())
settings.endGroup()
def onclick(self, button):
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.RestoreDefaults:
self.load_settings()
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self.save_settings()
if self.buttonBox.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_settings() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/staltadialog.py | staltadialog.py |
from PySide import QtGui
from PySide import QtCore
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['figure.dpi'] = 65
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.ticker import FuncFormatter
import matplotlib.pyplot as plt
import numpy as np
import datetime
from apasvo.gui.views import takanamidialog
from apasvo.gui.views import settingsdialog
from apasvo.picking import envelope as env
from apasvo.picking import apasvotrace as rc
from apasvo.utils import plotting
from apasvo.utils import clt
from apasvo._version import _application_name
from apasvo._version import _organization
class SpanSelector(QtCore.QObject):
"""Allows the user to manually select a piece of a seismic signal on a
SignalViewerWidget object.
Attributes:
xleft: Current selection lower limit (measured in h-axis units).
xright: Current selection upper limit (measured in h-axis units).
xmin: Minimum selection lower limit allowed (in h-axis units).
xmax: Maximum selection upper limit allowed (in h-axis units).
active: Indicates whether the selector object is active or not.
minstep: Minimun selection step allowed.
Signals:
toogled: 'active' state changes.
valueChanged: 'xleft', 'xright' values changes.
"""
toggled = QtCore.Signal(bool)
valueChanged = QtCore.Signal(float, float)
right_clicked = QtCore.Signal()
def __init__(self, fig, fs=50.0, xmin=0.0, xmax=0.0):
super(SpanSelector, self).__init__()
self.fig = fig
self._xleft_in_samples = 0
self._xright_in_samples = 0
self.fs = fs
self._xmin_in_samples = int(xmin * self.fs)
self._xmax_in_samples = int(xmax * self.fs)
self.active = False
self.enabled = True
self.selectors = [ax.axvspan(0, 1, fc='LightCoral', ec='r', alpha=0.7, picker=5)
for ax in self.fig.axes]
for s in self.selectors:
s.set_visible(False)
self.pick_threshold = None
self.press_selector = None
self.canvas = self.fig.canvas
self.canvas.mpl_connect('pick_event', self.on_pick)
self.canvas.mpl_connect('button_press_event', self.onpress)
self.canvas.mpl_connect('button_release_event', self.onrelease)
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.background = None
self.animated = False
@property
def xleft(self):
return self._xleft_in_samples / self.fs
@property
def xright(self):
return self._xright_in_samples / self.fs
@property
def xmin(self):
return self._xmin_in_samples / self.fs
@property
def xmax(self):
return self._xmax_in_samples / self.fs
@xmin.setter
def xmin(self, value):
self._xmin_in_samples = int(value * self.fs)
@xmax.setter
def xmax(self, value):
self._xmax_in_samples = int(value * self.fs)
def on_pick(self, pick_event):
if self.active:
if pick_event.mouseevent.button == 3: # Right button clicked
if pick_event.artist in self.selectors:
if not self.canvas.widgetlock.locked():
self.right_clicked.emit()
def onpress(self, event):
if self.enabled:
if event.button == 1: # Left button clicked
if not self.canvas.widgetlock.locked():
self.canvas.widgetlock(self)
if self.active:
self.set_active(False)
self.press_selector = event
# Start animation
self._set_animated(True)
xpos = self.get_xdata(self.press_selector)
self.set_selector_limits(xpos, xpos, adjust_to_viewport=True)
def onrelease(self, event):
if self.canvas.widgetlock.isowner(self):
self.press_selector = None
# End animation
self._set_animated(False)
self.canvas.widgetlock.release(self)
def onmove(self, event):
if self.press_selector is not None:
xleft = self.get_xdata(self.press_selector)
xright = self.get_xdata(event)
if xright < xleft:
xleft, xright = xright, xleft
if not self.active:
self.set_active(True)
self.set_selector_limits(xleft, xright, adjust_to_viewport=True)
def get_xdata(self, event):
inv = self.fig.axes[0].transData.inverted()
xdata, _ = inv.transform((event.x, event.y))
return xdata
def set_selector_limits(self, xleft, xright, adjust_to_viewport=False):
xleft = int(xleft * self.fs)
xright = int(xright * self.fs)
if (xleft, xright) != (self._xleft_in_samples, self._xright_in_samples):
if adjust_to_viewport:
xmin, xmax = self.fig.axes[0].get_xlim()
xmin, xmax = int(xmin * self.fs), int(xmax * self.fs)
if xleft < xmin:
xleft = xmin
elif xleft > xmax:
xleft = xmax
if xright > xmax:
xright = xmax
elif xright < xmin:
xright = xmin
if xleft < self._xmin_in_samples:
xleft = self._xmin_in_samples
if xright > self._xmax_in_samples:
xright = self._xmax_in_samples
self._xleft_in_samples, self._xright_in_samples = xleft, xright
for s in self.selectors:
s.xy[:2, 0] = self.xleft
s.xy[2:4, 0] = self.xright
self.valueChanged.emit(self.xleft, self.xright)
self.draw()
def get_selector_limits(self):
return self.xleft, self.xright
def set_selection_limits(self, xmin, xmax):
self.xmin, self.xmax = xmin, xmax
def get_selection_limits(self):
return self.xmin, self.xmax
def set_active(self, value):
if value != self.active:
self.active = value
self.toggled.emit(value)
for s in self.selectors:
s.set_visible(value)
self.draw()
def set_enabled(self, value):
if value != self.enabled:
self.enabled = value
for s in self.selectors:
if value == True:
s.set_edgecolor('Red')
s.set_facecolor('LightCoral')
else:
s.set_edgecolor('DarkSlateGray')
s.set_facecolor('Gray')
def draw(self):
if self.animated:
self._draw_animate()
else:
self.canvas.draw_idle()
def _draw_animate(self):
self.canvas.restore_region(self.background)
if self.active:
for s in self.selectors:
if s.get_axes().get_visible():
self.fig.draw_artist(s)
self.canvas.blit(self.fig.bbox)
def _set_animated(self, value):
if self.animated != value:
self.animated = value
for s in self.selectors:
s.set_animated(value)
if self.animated == True:
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.fig.bbox)
class EventMarker(QtCore.QObject):
"""Plots a vertical line marker to indicate the arrival time of
a detected event on a SignalViewerWidget object.
Attributes:
event: Marked event.
Signals:
valueChanged: 'event' arrival time changed.
"""
event_selected = QtCore.Signal(rc.ApasvoEvent)
right_clicked = QtCore.Signal(rc.ApasvoEvent)
def __init__(self, fig, minimap, document, event, color='b', selected_color='r'):
super(EventMarker, self).__init__()
self.fig = fig
self.minimap = minimap
self.event = event
self.document = document
self.position = self.event.stime
self.selected = False
self.color = color
self.selected_color = selected_color
self.markers = []
# draw markers
pos = self.event.stime / self.event.trace.fs
for ax in self.fig.axes:
marker = ax.axvline(pos)
marker.set(color=self.color, ls='--', lw=2, picker=5)
self.markers.append(marker)
# draw minimap marker
self.minimap.create_marker(event.resource_id.uuid, pos, color=self.color, ls='-', lw=1)
# draw label
bbox = dict(boxstyle="round", fc="LightCoral", ec="r", alpha=0.8)
self.position_label = self.fig.text(0, 0,
"Time: 00:00:00.000 seconds\nCF value: 0.000",
bbox=bbox)
self.position_label.set_visible(False)
self.canvas = self.fig.canvas
self.canvas.mpl_connect('pick_event', self.onpick)
self.canvas.mpl_connect('button_release_event', self.onrelease)
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.pick_event = None
# Animation related attrs.
self.background = None
self.animated = False
self.draw()
def onpick(self, pick_event):
if pick_event.artist in self.markers:
if not self.canvas.widgetlock.locked():
if pick_event.mouseevent.button == 1: # left button clicked
self.canvas.widgetlock(self)
self.pick_event = pick_event
xfig, yfig = self._event_to_fig_coords(pick_event.mouseevent)
self.position_label.set_position((xfig, yfig))
self.event_selected.emit(self.event)
self.draw()
elif pick_event.mouseevent.button == 3: # Right button clicked
self.event_selected.emit(self.event)
self.draw()
self.right_clicked.emit(self.event)
def onrelease(self, mouse_event):
if self.canvas.widgetlock.isowner(self):
self.position_label.set_visible(False)
self.pick_event = None
# End animation
self.draw()
self._set_animated(False)
self.canvas.widgetlock.release(self)
if self.position != self.event.stime:
self.document.editEvent(self.event, stime=self.position,
evaluation_mode=rc.mode_manual,
method=rc.method_other)
def onmove(self, mouse_event):
if self.pick_event is not None:
xdata = self.get_xdata(mouse_event)
self.set_position(xdata)
xfig, yfig = self._event_to_fig_coords(mouse_event)
self.position_label.set_position((xfig, yfig))
self.position_label.set_visible(True)
self._set_animated(True)
self.draw()
def get_xdata(self, event):
inv = self.fig.axes[0].transData.inverted()
xdata, _ = inv.transform((event.x, event.y))
return xdata
def _event_to_fig_coords(self, event):
inv = self.fig.transFigure.inverted()
return inv.transform((event.x, event.y))
def set_position(self, value):
time_in_samples = int(value * self.event.trace.fs)
if time_in_samples != self.position:
if 0 <= time_in_samples <= len(self.event.trace.signal):
self.position = time_in_samples
time_in_seconds = time_in_samples / float(self.event.trace.fs)
for marker in self.markers:
marker.set_xdata(time_in_seconds)
if 0 <= self.position < len(self.event.trace.cf):
cf_value = self.event.trace.cf[self.position]
else:
cf_value = np.nan
self.position_label.set_text("Time: %s seconds.\nCF value: %.6g" %
(clt.float_secs_2_string_date(time_in_seconds,
starttime=self.event.trace.starttime), cf_value))
self.minimap.set_marker_position(self.event.resource_id.uuid, time_in_seconds)
def remove(self):
for ax, marker in zip(self.fig.axes, self.markers):
ax.lines.remove(marker)
self.minimap.delete_marker(self.event.resource_id.uuid)
self.draw()
def set_selected(self, value):
if self.selected != value:
self.selected = value
color = self.selected_color if self.selected else self.color
for marker in self.markers:
marker.set(color=color)
self.minimap.set_marker(self.event.resource_id.uuid, color=color)
def update(self):
if self.event.stime != self.position:
self.set_position(self.event.stime / float(self.event.trace.fs))
self.draw()
def draw(self):
if self.animated:
self._draw_animate()
else:
self.canvas.draw_idle()
self.minimap.draw()
def _draw_animate(self):
self.canvas.restore_region(self.background)
for marker in self.markers:
if marker.get_axes().get_visible() and marker.get_visible():
self.fig.draw_artist(marker)
if self.position_label.get_visible():
self.fig.draw_artist(self.position_label)
self.canvas.blit(self.fig.bbox)
def _set_animated(self, value):
if self.animated != value:
self.animated = value
for marker in self.markers:
marker.set_animated(value)
self.position_label.set_animated(value)
if self.animated == True:
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.fig.bbox)
class ThresholdMarker(QtCore.QObject):
"""Plots an horizontal line marker on a SignalViewerWidget to
indicate a selected threshold value for the computed
characteristic function.
Attributes:
threshold: A threshold value. Default: 0.0.
active: Indicates whether the marker is active or not.
Signals:
thresholdChanged: 'threshold' value changed.
"""
thresholdChanged = QtCore.Signal(float)
def __init__(self, ax, threshold=0.0):
super(ThresholdMarker, self).__init__()
self.ax = ax
self.threshold = threshold
self.active = False
# Set threshold line
self.figThreshold = self.ax.axhline(self.threshold)
self.figThreshold.set(color='b', ls='--', lw=2, alpha=0.8, picker=5)
self.figThreshold.set_visible(False)
# Set threshold label
bbox = dict(boxstyle="round", fc="Lightblue", ec="b", alpha=0.8)
self.figThresholdLabel = self.ax.text(0, 0, "0.00", bbox=bbox)
self.figThresholdLabel.set_visible(False)
self.pick_threshold = None
self.canvas = self.ax.figure.canvas
self.canvas.mpl_connect('pick_event', self.onpick)
self.canvas.mpl_connect('button_release_event', self.onrelease)
self.canvas.mpl_connect('motion_notify_event', self.onmove)
# Animation related attrs.
self.background = None
self.animated = False
def onpick(self, event):
if self.active:
if event.mouseevent.button == 1: # left button clicked
if event.artist == self.figThreshold:
if not self.canvas.widgetlock.locked():
self.canvas.widgetlock(self)
self.pick_threshold = event
xdata, ydata = self.get_data(event.mouseevent)
# Draw legend
self.figThresholdLabel.set_position((xdata, ydata))
self.figThresholdLabel.set_visible(True)
self.draw()
def onrelease(self, event):
if self.canvas.widgetlock.isowner(self):
self.figThresholdLabel.set_visible(False)
self.pick_threshold = None
# End animation
self._set_animated(False)
self.draw()
self.canvas.widgetlock.release(self)
def onmove(self, event):
if self.pick_threshold is not None:
xdata, ydata = self.get_data(event)
self.set_threshold(round(ydata, 2))
# Draw legend
self.figThresholdLabel.set_position((xdata, ydata))
self._set_animated(True)
self.draw()
def get_data(self, event):
inv = self.ax.transData.inverted()
xdata, ydata = inv.transform((event.x, event.y))
ymin, ymax = self.ax.get_ylim()
xmin, xmax = self.ax.get_xlim()
if ydata < ymin:
ydata = ymin
elif ydata > ymax:
ydata = ymax
if ydata < 0.0:
ydata = 0.0
if xdata < xmin:
xdata = xmin
elif xdata > xmax:
xdata = xmax
return xdata, ydata
def set_threshold(self, value):
if self.threshold != value:
if value >= 0:
self.threshold = value
self.thresholdChanged.emit(self.threshold)
self.figThreshold.set_ydata(self.threshold)
self.figThresholdLabel.set_text("Threshold: %.2f" % self.threshold)
self.draw()
def set_visible(self, value):
if self.active != value:
self.figThreshold.set_visible(value)
self.active = value
self.draw()
def get_visible(self):
return self.active
def draw(self):
if self.animated:
self._draw_animate()
else:
self.canvas.draw_idle()
def _draw_animate(self):
self.canvas.restore_region(self.background)
if self.ax.get_visible() and self.figThreshold.get_visible():
self.ax.draw_artist(self.figThreshold)
if self.figThresholdLabel.get_visible():
self.ax.draw_artist(self.figThresholdLabel)
self.canvas.blit(self.ax.bbox)
def _set_animated(self, value):
if self.animated != value:
self.animated = value
self.figThreshold.set_animated(value)
self.figThresholdLabel.set_animated(value)
if self.animated == True:
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
class PlayBackMarker(QtCore.QObject):
"""Plots a vertical line marker on a SignalViewerWidget when
signal is played to indicate the current position.
Attributes:
position: Current position of the marker.
active: Indicates whether the marker is active or not.
"""
def __init__(self, fig, parent, position=0.0, active=False):
super(PlayBackMarker, self).__init__()
self.fig = fig
self.parent = parent
self.position = position
self.active = active
# Set lines
self.markers = []
for ax in self.fig.axes:
marker = ax.axvline(self.position)
marker.set(color='k', lw=1, alpha=0.6)
marker.set_visible(self.active)
self.markers.append(marker)
self.canvas = self.fig.canvas
if self.active:
self.parent.draw()
def set_position(self, value):
if value != self.position:
self.position = value
for marker in self.markers:
marker.set_xdata(self.position)
if self.active:
self.parent.draw()
def set_visible(self, value):
if value != self.active:
self.active = value
for marker in self.markers:
marker.set_visible(self.active)
self.parent.draw()
def get_visible(self):
return self.active
class MiniMap(QtGui.QWidget):
"""Shows the entire signal and allows the user to navigate through it.
Provides an scrollable selector over the entire signal.
Attributes:
xmin: Selector lower limit (measured in h-axis units).
xmax: Selector upper limit (measured in h-axis units).
step: Selector length (measured in h-axis units).
"""
def __init__(self, parent, ax, record=None):
super(MiniMap, self).__init__(parent)
self.ax = ax
self.xmin = 0.0
self.xmax = 0.0
self.step = 10.0
self.xrange = np.array([])
self.minimapFig = plt.figure()
self.minimapFig.set_figheight(0.75)
self.minimapFig.add_axes((0, 0, 1, 1))
self.minimapCanvas = FigureCanvas(self.minimapFig)
self.minimapCanvas.setFixedHeight(64)
self.minimapSelector = self.minimapFig.axes[0].axvspan(0, self.step,
color='gray',
alpha=0.5,
animated=True)
self.minimapSelection = self.minimapFig.axes[0].axvspan(0, self.step,
color='LightCoral',
alpha = 0.5,
animated=True)
self.minimapSelection.set_visible(False)
self.minimapBackground = []
self.minimapSize = (self.minimapFig.bbox.width,
self.minimapFig.bbox.height)
self.press_selector = None
self.playback_marker = None
self.minimapCanvas.mpl_connect('button_press_event', self.onpress)
self.minimapCanvas.mpl_connect('button_release_event', self.onrelease)
self.minimapCanvas.mpl_connect('motion_notify_event', self.onmove)
# Animation related attrs.
self.background = None
self.animated = False
# Set the layout
self.layout = QtGui.QVBoxLayout(self)
self.layout.addWidget(self.minimapCanvas)
# Animation related attributes
self.parentViewer = parent
# Set Markers dict
self.markers = {}
self.record = None
if record is not None:
self.set_record(record)
def set_record(self, record, step):
self.record = record
self.step = step
self.xrange = np.linspace(0, len(self.record.signal) / self.record.fs,
num=len(self.record.signal), endpoint=False)
self.xmin = self.xrange[0]
self.xmax = self.xrange[-1]
self.markers = {}
ax = self.minimapFig.axes[0]
ax.lines = []
formatter = FuncFormatter(lambda x, pos: str(datetime.timedelta(seconds=x)))
ax.xaxis.set_major_formatter(formatter)
ax.grid(True, which='both')
# Set dataseries to plot
xmin = self.xmin * self.record.fs
xmax = self.xmax * self.record.fs
pixel_width = np.ceil(self.minimapFig.get_figwidth() * self.minimapFig.get_dpi())
x_data, y_data = plotting.reduce_data(self.xrange, self.record.signal, pixel_width, xmin, xmax)
# self._plot_data.set_xdata(x_data)
# self._plot_data.set_ydata(y_data)
ax.plot(x_data, y_data, color='black', rasterized=True)
ax.set_xlim(self.xmin, self.xmax)
plotting.adjust_axes_height(ax)
# Set the playback marker
self.playback_marker = PlayBackMarker(self.minimapFig, self)
self.playback_marker.markers[0].set_animated(True)
# Draw canvas
self.minimapCanvas.draw()
self.minimapBackground = self.minimapCanvas.copy_from_bbox(self.minimapFig.bbox)
self.draw_animate()
def onpress(self, event):
self.press_selector = event
xdata = round(self.get_xdata(event), 2)
xmin = round(xdata - (self.step / 2.0), 2)
xmax = round(xdata + (self.step / 2.0), 2)
self.parentViewer._set_animated(True)
self.set_selector_limits(xmin, xmax)
def onrelease(self, event):
self.press_selector = None
# Finish parent animation
self.parentViewer._set_animated(False)
def onmove(self, event):
if self.press_selector is not None:
xdata = round(self.get_xdata(event), 2)
xmin = round(xdata - (self.step / 2.0), 2)
xmax = round(xdata + (self.step / 2.0), 2)
self.set_selector_limits(xmin, xmax)
def get_xdata(self, event):
inv = self.minimapFig.axes[0].transData.inverted()
xdata, _ = inv.transform((event.x, event.y))
return xdata
def set_selector_limits(self, xmin, xmax):
step = xmax - xmin
if step >= self.xmax - self.xmin:
xleft = self.xmin
xright = self.xmax
if xmin < self.xmin:
xleft = self.xmin
xright = self.step
elif xmax > self.xmax:
xleft = self.xmax - step
xright = self.xmax
else:
xleft = xmin
xright = xmax
if (xleft, xright) != (self.minimapSelector.xy[1, 0], self.minimapSelector.xy[2, 0]):
self.step = step
self.minimapSelector.xy[:2, 0] = xleft
self.minimapSelector.xy[2:4, 0] = xright
self.ax.set_xlim(xleft, xright)
self.draw_animate()
else:
self.parentViewer.draw()
def get_selector_limits(self):
return self.minimapSelector.xy[0, 0], self.minimapSelector.xy[2, 0]
def draw(self):
self.draw_animate()
def draw_animate(self):
size = self.minimapFig.bbox.width, self.minimapFig.bbox.height
if size != self.minimapSize:
self.minimapSize = size
self.minimapCanvas.draw()
self.minimapBackground = self.minimapCanvas.copy_from_bbox(self.minimapFig.bbox)
self.minimapCanvas.restore_region(self.minimapBackground)
self.minimapFig.draw_artist(self.minimapSelection)
self.minimapFig.draw_artist(self.minimapSelector)
self.minimapFig.draw_artist(self.playback_marker.markers[0])
for marker in self.markers.values():
self.minimapFig.draw_artist(marker)
self.minimapCanvas.blit(self.minimapFig.bbox)
def set_visible(self, value):
self.minimapCanvas.setVisible(value)
def get_visible(self):
return self.minimapCanvas.isVisible()
def set_selection_limits(self, xleft, xright):
self.minimapSelection.xy[:2, 0] = xleft
self.minimapSelection.xy[2:4, 0] = xright
self.draw_animate()
def set_selection_visible(self, value):
self.minimapSelection.set_visible(value)
self.draw_animate()
def create_marker(self, key, position, **kwargs):
if self.xmin <= position <= self.xmax:
marker = self.minimapFig.axes[0].axvline(position, animated=True)
self.markers[key] = marker
self.markers[key].set(**kwargs)
def set_marker_position(self, key, value):
marker = self.markers.get(key)
if marker is not None:
if self.xmin <= value <= self.xmax:
marker.set_xdata(value)
def set_marker(self, key, **kwargs):
marker = self.markers.get(key)
if marker is not None:
kwargs.pop("animated", None) # marker's animated property must be always true to be drawn properly
marker.set(**kwargs)
def delete_marker(self, key):
marker = self.markers.get(key)
if marker is not None:
self.minimapFig.axes[0].lines.remove(marker)
self.markers.pop(key)
class SignalViewerWidget(QtGui.QWidget):
"""Shows different visualizations of a seismic signal (magnitude, envelope,
spectrogram, characteristic function).
Allows the user to manipulate it (navigate through it, zoom in/out,
edit detected events, select threshold value, etc...)
"""
CF_loaded = QtCore.Signal(bool)
event_selected = QtCore.Signal(rc.ApasvoEvent)
def __init__(self, parent, document=None):
super(SignalViewerWidget, self).__init__(parent)
self.document = document
self.xmin = 0.0
self.xmax = 0.0
self.xleft = 0.0
self.xright = 0.0
self.time = np.array([])
self.fs = 0.0
self.signal = None
self.envelope = None
self.cf = None
self.time = None
self._signal_data = None
self._envelope_data = None
self._cf_data = None
self.fig, _ = plt.subplots(3, 1)
self.signal_ax = self.fig.axes[0]
self.cf_ax = self.fig.axes[1]
self.specgram_ax = self.fig.axes[2]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.canvas.setMinimumHeight(320)
self.graphArea = QtGui.QScrollArea(self)
self.graphArea.setWidget(self.canvas)
self.graphArea.setWidgetResizable(True)
self.eventMarkers = {}
self.last_right_clicked_event = None
self.thresholdMarker = None
self.playback_marker = None
self.selector = SpanSelector(self.fig)
self.minimap = MiniMap(self, self.signal_ax, None)
# Load Spectrogram settings
self.update_specgram_settings()
# Animation related attributes
self.background = None
self.animated = False
# Create context menus
self.event_context_menu = QtGui.QMenu(self)
self.takanami_on_event_action = QtGui.QAction("Apply Takanami to Event", self)
self.takanami_on_event_action.setStatusTip("Refine event position by using Takanami algorithm")
self.event_context_menu.addAction(self.takanami_on_event_action)
self.takanami_on_event_action.triggered.connect(self.apply_takanami_to_selected_event)
self.selection_context_menu = QtGui.QMenu(self)
self.create_event_action = QtGui.QAction("Create New Event on Selection", self)
self.create_event_action.setStatusTip("Create a new event on selection")
self.takanami_on_selection_action = QtGui.QAction("Apply Takanami to Selection", self)
self.takanami_on_selection_action.setStatusTip("Apply Takanami algorithm to selection")
self.selection_context_menu.addAction(self.create_event_action)
self.selection_context_menu.addAction(self.takanami_on_selection_action)
self.create_event_action.triggered.connect(self.create_event_on_selection)
self.takanami_on_selection_action.triggered.connect(self.apply_takanami_to_selection)
# format axes
formatter = FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, self.document.record.starttime))
for ax in self.fig.axes:
ax.callbacks.connect('xlim_changed', self.on_xlim_change)
ax.xaxis.set_major_formatter(formatter)
plt.setp(ax.get_xticklabels(), visible=True)
ax.grid(True, which='both')
self.specgram_ax.callbacks.connect('ylim_changed', self.on_ylim_change)
self.specgram_ax.set_xlabel('Time (seconds)')
plt.setp(self.signal_ax.get_yticklabels(), visible=False)
#self.signal_ax.set_ylabel('Signal Amp.')
self.cf_ax.set_ylabel('CF Amp.')
self.specgram_ax.set_ylabel('Frequency (Hz)')
# Set the layout
self.layout = QtGui.QVBoxLayout(self)
self.layout.addWidget(self.graphArea)
self.layout.addWidget(self.minimap)
self.selector.toggled.connect(self.minimap.set_selection_visible)
self.selector.valueChanged.connect(self.minimap.set_selection_limits)
self.selector.right_clicked.connect(self.on_selector_right_clicked)
if self.document is not None:
self.set_record(document)
@property
def data_loaded(self):
return self.document is not None
def set_record(self, document, step=120.0):
self.document = document
self.fs = self.document.record.fs
self.signal = self.document.record.signal
self.envelope = env.envelope(self.signal)
self.cf = self.document.record.cf
self.time = np.linspace(0, len(self.signal) / self.fs, num=len(self.signal), endpoint=False)
self.xmax = self.time[-1]
# Draw minimap
self.minimap.minimapSelector.set(visible=False) # Hide minimap selector while loading
self.minimap.set_record(self.document.record, step)
# Plot signal
step_samples = step * self.fs
self._signal_data = self.signal_ax.plot(self.time[:step_samples],
self.signal[:step_samples],
color='black',
rasterized=True)[0]
# Plot envelope
self._envelope_data = self.signal_ax.plot(self.time[:step_samples],
self.envelope[:step_samples],
color='red',
rasterized=True)[0]
# Adjust y axis for signal plot
signal_yaxis_max_value = max(np.max(self.signal), np.max(self.envelope))
signal_yaxis_min_value = np.min(self.signal)
plotting.adjust_axes_height(self.signal_ax,
max_value=signal_yaxis_max_value,
min_value=signal_yaxis_min_value)
# Plot CF
cf_loaded = (self.cf.size != 0)
self.set_cf_visible(cf_loaded)
self.CF_loaded.emit(cf_loaded)
cf_step_samples = min(step_samples,len(self.cf))
self._cf_data = self.cf_ax.plot(self.time[:cf_step_samples],
self.cf[:cf_step_samples],
color='black',
rasterized=True)[0]
# Adjust y axis for CF plot
if cf_loaded:
plotting.adjust_axes_height(self.cf_ax,
max_value=np.max(self.cf),
min_value=np.min(self.cf))
self.thresholdMarker = ThresholdMarker(self.cf_ax)
# Plot espectrogram
plotting.plot_specgram(self.specgram_ax, self.signal, self.fs,
nfft=self.specgram_windowlen,
noverlap=self.specgram_noverlap,
window=self.specgram_window)
# Set the span selector
self.selector.fs = self.fs
self.selector.set_active(False)
self.selector.set_selection_limits(self.xmin, self.xmax)
# Set the playback marker
self.playback_marker = PlayBackMarker(self.fig, self)
# Set the initial xlimits
self.set_xlim(0, step)
self.subplots_adjust()
# Set event markers
self.eventMarkers = {}
for event in self.document.record.events:
self.create_event(event)
# Now activate selector again on minimap
self.minimap.minimapSelector.set(visible=True)
self.minimap.draw()
def unset_record(self):
self.document = None
self.signal = None
self.envelope = None
self.cf = None
self.time = None
self._signal_data = None
self._envelope_data = None
self._cf_data = None
self.xmin, self.xmax = 0.0, 0.0
self.eventMarkers = {}
# Clear axes
self.signal_ax.lines = []
self.cf_ax.lines = []
self.specgram_ax.lines = []
self.specgram_ax.images = []
self.CF_loaded.emit(False)
def update_cf(self):
if self.data_loaded:
self.cf = self.document.record.cf
self._cf_data.set_xdata(self.time[:len(self.cf)])
self._cf_data.set_ydata(self.cf)
plotting.adjust_axes_height(self.cf_ax)
cf_loaded = (self.cf.size != 0)
self.CF_loaded.emit(cf_loaded)
self.set_cf_visible(cf_loaded)
self.draw()
def create_events(self, new_events_set):
for event in new_events_set.get(self.document.record.uuid, []):
self.create_event(event)
def create_event(self, event):
event_id = event.resource_id.uuid
if event_id not in self.eventMarkers:
marker = EventMarker(self.fig, self.minimap, self.document, event)
self.eventMarkers[event_id] = marker
marker.event_selected.connect(self.event_selected.emit)
marker.right_clicked.connect(self.on_event_right_clicked)
def delete_events(self, new_events_set):
for event in new_events_set.get(self.document.record.uuid, []):
self.delete_event(event)
def delete_event(self, event):
event_id = event.resource_id.uuid
self.eventMarkers[event_id].remove()
self.eventMarkers.pop(event_id)
def update_event(self, event):
self.eventMarkers[event.resource_id.uuid].update()
def set_xlim(self, l, r):
xmin = max(0, l)
xmax = min(self.xmax, r)
self.signal_ax.set_xlim(xmin, xmax)
def on_xlim_change(self, ax):
xmin, xmax = ax.get_xlim()
if (self.xleft, self.xright) != (xmin, xmax):
self.xleft, self.xright = xmin, xmax
if self.xmin <= xmin <= xmax <= self.xmax:
# Update minimap selector
if (xmin, xmax) != self.minimap.get_selector_limits():
self.minimap.set_selector_limits(xmin, xmax)
# Update axes
for axes in self.fig.axes:
if ax != axes:
axes.set_xlim(xmin, xmax)
# Update data
xmin = int(max(0, xmin) * self.fs)
xmax = int(min(self.xmax, xmax) * self.fs)
pixel_width = np.ceil(self.fig.get_figwidth() * self.fig.get_dpi())
if self._signal_data is not None:
x_data, y_data = plotting.reduce_data(self.time, self.signal,
pixel_width, xmin, xmax)
self._signal_data.set_xdata(x_data)
self._signal_data.set_ydata(y_data)
if self._envelope_data is not None:
x_data, y_data = plotting.reduce_data(self.time, self.envelope,
pixel_width, xmin, xmax)
self._envelope_data.set_xdata(x_data)
self._envelope_data.set_ydata(y_data)
if self._cf_data is not None and self.cf_ax.get_visible():
x_data, y_data = plotting.reduce_data(self.time[:len(self.cf)],
self.cf, pixel_width,
xmin, xmax)
self._cf_data.set_xdata(x_data)
self._cf_data.set_ydata(y_data)
# Draw graph
self.draw()
else:
xmin = max(self.xmin, xmin)
xmax = min(self.xmax, xmax)
ax.set_xlim(xmin, xmax)
def on_ylim_change(self, ax):
if self.data_loaded:
if ax == self.specgram_ax:
ymin, ymax = ax.get_ylim()
nyquist_freq = (self.fs / 2.0)
if ymin < 0.0:
ax.set_ylim(0.0, ymax)
elif ymax > nyquist_freq:
ax.set_ylim(ymin, nyquist_freq)
def set_event_selection(self, events):
event_id_list = [event.resource_id.uuid for event in events]
for event_id in self.eventMarkers:
self.eventMarkers[event_id].set_selected(event_id in event_id_list)
self.draw()
self.minimap.draw()
def set_position(self, pos):
""""""
xmin, xmax = self.signal_ax.get_xlim()
mrange = xmax - xmin
l, r = pos - mrange / 2.0, pos + mrange / 2.0
if l < self.xmin:
l, r = self.xmin, mrange
elif r > self.xmax:
l, r = self.xmax - mrange, self.xmax
self.set_xlim(l, r)
def goto_event(self, event):
if event.resource_id.uuid in self.eventMarkers:
self.set_position(event.stime / self.fs)
def showEvent(self, event):
self.draw()
self.minimap.draw_animate()
def resizeEvent(self, event):
self.draw()
self.minimap.draw_animate()
def set_signal_amplitude_visible(self, show_sa):
if self._signal_data is not None and self._envelope_data is not None:
if self._signal_data.get_visible() != show_sa:
self._signal_data.set_visible(show_sa)
show_axis = (self._signal_data.get_visible() +
self._envelope_data.get_visible())
self.signal_ax.set_visible(show_axis)
if self.data_loaded:
self.subplots_adjust()
self.draw()
def set_signal_envelope_visible(self, show_se):
if self._signal_data is not None and self._envelope_data is not None:
if self._envelope_data.get_visible() != show_se:
self._envelope_data.set_visible(show_se)
show_axis = (self._signal_data.get_visible() +
self._envelope_data.get_visible())
self.signal_ax.set_visible(show_axis)
if self.data_loaded:
self.subplots_adjust()
self.draw()
def set_cf_visible(self, show_cf):
if self.cf_ax.get_visible() != show_cf:
if self.data_loaded:
if len(self.cf) <= 0:
self.cf_ax.set_visible(False)
else:
self.cf_ax.set_visible(show_cf)
self.subplots_adjust()
self.draw()
def set_espectrogram_visible(self, show_eg):
if self.specgram_ax.get_visible() != show_eg:
self.specgram_ax.set_visible(show_eg)
if self.data_loaded:
self.subplots_adjust()
self.draw()
def set_minimap_visible(self, show_mm):
if self.minimap.get_visible() != show_mm:
self.minimap.set_visible(show_mm)
self.minimap.draw_animate()
def set_threshold_visible(self, show_thr):
if self.thresholdMarker:
if self.thresholdMarker.get_visible() != show_thr:
self.thresholdMarker.set_visible(show_thr)
self.draw()
def subplots_adjust(self):
visible_subplots = [ax for ax in self.fig.get_axes() if ax.get_visible()]
for i, ax in enumerate(visible_subplots):
correct_geometry = (len(visible_subplots), 1, i + 1)
if correct_geometry != ax.get_geometry():
ax.change_geometry(len(visible_subplots), 1, i + 1)
# Adjust space between subplots
self.fig.subplots_adjust(left=0.06, right=0.95, bottom=0.14,
top=0.95, hspace=0.22)
def get_selector_limits(self):
return self.selector.get_selector_limits()
def set_selector_limits(self, xleft, xright):
self.selector.set_selector_limits(xleft, xright)
def set_selection_enabled(self, value):
self.selector.set_enabled(value)
def set_playback_position(self, position):
if self.playback_marker is not None:
self.playback_marker.set_position(position)
self.minimap.playback_marker.set_position(position)
def set_playback_marker_visible(self, show_marker):
if self.playback_marker is not None:
self.playback_marker.set_visible(show_marker)
self.minimap.playback_marker.set_visible(show_marker)
def on_event_right_clicked(self, event):
self.last_right_clicked_event = event
self.event_context_menu.exec_(QtGui.QCursor.pos())
def apply_takanami_to_selected_event(self):
takanamidialog.TakanamiDialog(self.document,
seismic_event=self.last_right_clicked_event).exec_()
def apply_takanami_to_selection(self):
xleft, xright = self.get_selector_limits()
takanamidialog.TakanamiDialog(self.document, xleft, xright).exec_()
def create_event_on_selection(self):
xleft, xright = self.get_selector_limits()
xleft, xright = xleft * self.fs, xright * self.fs
cf = self.cf[xleft:xright]
if cf.size > 0:
time = (xleft + np.argmax(cf))
else:
time = (xleft + ((xright - xleft) / 2.0))
self.document.createEvent(time=time)
def draw(self):
if self.animated:
self._draw_animate()
else:
self.canvas.draw_idle()
def _draw_animate(self):
self.canvas.restore_region(self.background)
for artist in self._get_animated_artists():
if artist.get_visible():
ax = artist.get_axes()
if ax is not None:
if artist.get_axes().get_visible():
self.fig.draw_artist(artist)
else:
self.fig.draw_artist(artist)
self.canvas.blit(self.fig.bbox)
def _set_animated(self, value):
if self.animated != value:
self.animated = value
for artist in self._get_animated_artists():
artist.set_animated(value)
if self.animated == True:
images = []
for ax in self.fig.axes:
images.extend(ax.images)
for image in images:
image.set_visible(False)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.fig.bbox)
for image in images:
image.set_visible(True)
def _get_animated_artists(self):
artists = []
for ax in self.fig.axes:
artists.extend(ax.images)
artists.extend(ax.lines)
artists.append(ax.xaxis)
artists.append(ax.yaxis)
artists.extend(ax.patches)
artists.extend(ax.spines.values())
for artist in artists:
yield artist
def update_specgram_settings(self):
# load specgram settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("specgram_settings")
self.specgram_windowlen = int(settings.value('window_len', settingsdialog.SPECGRAM_WINDOW_LENGTHS[4]))
self.specgram_noverlap = int(settings.value('noverlap', self.specgram_windowlen / 2))
self.specgram_window = settings.value('window', plotting.SPECGRAM_WINDOWS[2])
settings.endGroup()
if self.data_loaded:
# Plot espectrogram
self.specgram_ax.images = []
# Save x-axis limits
limits = self.signal_ax.get_xlim()
# Draw spectrogram
plotting.plot_specgram(self.specgram_ax, self.signal, self.fs,
nfft=self.specgram_windowlen,
noverlap=self.specgram_noverlap,
window=self.specgram_window)
# Restore x-axis limits
self.signal_ax.set_xlim(*limits)
def paintEvent(self, paintEvent):
super(SignalViewerWidget, self).paintEvent(paintEvent)
def on_selector_right_clicked(self):
xleft, xright = self.get_selector_limits()
self.takanami_on_selection_action.setEnabled((xright - xleft) >=
(takanamidialog.MINIMUM_MARGIN_IN_SECS * 2))
self.selection_context_menu.exec_(QtGui.QCursor.pos()) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/svwidget.py | svwidget.py |
from PySide import QtCore
from PySide import QtGui
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from apasvo.gui.views import navigationtoolbar
from apasvo.gui.views import processingdialog
from apasvo.utils import clt
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, lfilter, freqz
import numpy as np
import traceback
from apasvo.picking import apasvotrace as rc
from apasvo.picking import takanami
from apasvo._version import _application_name
from apasvo._version import _organization
MINIMUM_MARGIN_IN_SECS = 0.5
class FilterDesignTask(QtCore.QObject):
"""A class to handle a Takanami exec. task.
Attributes:
record: An opened seismic record.
start: Start point of the signal segment where
the algorithm is going to be applied.
end: End point of the signal segment where
the algorithm is going to be applied.
Signals:
finished: Task finishes.
position_estimated: Return values of Takanami method are ready.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
position_estimated = QtCore.Signal(int, np.ndarray, int)
def __init__(self, record):
super(FilterDesignTask, self).__init__()
self.record = record
class FilterDesignDialog(QtGui.QDialog):
"""A dialog to apply Takanami's AR picking method to a selected piece of a
seismic signal.
Attributes:
document: Current opened document containing a seismic record.
seismic_event: A seismic event to be refined by using Takanami method.
If no event is provided, then a new seismic event will be created
by using the estimated arrival time after clicking on 'Accept'
"""
def __init__(self, stream, trace_list=None, parent=None):
super(FilterDesignDialog, self).__init__(parent)
# Calc max. frequency
traces = stream.traces if not trace_list else trace_list
self.max_freq = max([trace.fs for trace in traces])
self._init_ui()
self.load_settings()
# Initial draw
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data = self.module_axes.plot(w, h_db, 'b')[0]
self._phase_data = self.phase_axes.plot(w, angles, 'g')[0]
self.module_axes.set_ylim([-60,10])
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle()
self.start_point_spinbox.valueChanged.connect(self.on_freq_min_changed)
self.end_point_spinbox.valueChanged.connect(self.on_freq_max_changed)
self.start_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.end_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.number_coefficient_spinbox.valueChanged.connect(self._draw_filter_response)
self.zeroPhaseCheckBox.toggled.connect(self._draw_filter_response)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
self.button_box.clicked.connect(self.on_click)
def _init_ui(self):
self.setWindowTitle("Filter Design (Butterworth-Bandpass Filter)")
self.fig, _ = plt.subplots(1, 1, sharex=True)
# Set up filter axes
self.module_axes = self.fig.axes[0]
self.phase_axes = self.module_axes.twinx()
self.module_axes.set_title('Digital filter frequency response (Butterworth-Bandpass filter)')
self.module_axes.set_xlabel('Frequency [Hz]')
self.module_axes.set_ylabel('Amplitude [dB]', color='b')
self.module_axes.axis('tight')
self.module_axes.grid(which='both', axis='both')
self.phase_axes.set_ylabel('Angle (radians)', color='g')
self.canvas = FigureCanvas(self.fig)
self.canvas.setMinimumSize(self.canvas.size())
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.toolBarNavigation = navigationtoolbar.NavigationToolBar(self.canvas, self)
self.group_box = QtGui.QGroupBox(self)
self.group_box2 = QtGui.QGroupBox(self)
self.group_box3 = QtGui.QGroupBox(self)
self.group_box4 = QtGui.QGroupBox(self)
self.group_box.setTitle("")
self.group_box2.setTitle("")
self.group_box3.setTitle("Parameters")
self.start_point_label = QtGui.QLabel("Lower cutoff frequency (Hz): ")
self.start_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.start_point_spinbox = QtGui.QDoubleSpinBox(self.group_box)
self.start_point_spinbox.setMinimum(1.0)
self.start_point_spinbox.setSingleStep(1.00)
self.start_point_spinbox.setAccelerated(True)
self.start_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_label = QtGui.QLabel("Higher cutoff frequency (Hz):")
self.end_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.end_point_spinbox = QtGui.QDoubleSpinBox(self.group_box4)
self.end_point_spinbox.setMinimum(1.0)
self.end_point_spinbox.setSingleStep(1.00)
self.end_point_spinbox.setAccelerated(True)
self.end_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_spinbox.setValue(5.0)
#######################################################################
self.number_coefficient_label = QtGui.QLabel("Order: ")
self.number_coefficient_label2 = QtGui.QLabel("")
self.number_coefficient_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_label2.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_spinbox = QtGui.QSpinBox(self.group_box3)
self.number_coefficient_spinbox.adjustSize()
self.number_coefficient_spinbox.setMinimum(1)
self.number_coefficient_spinbox.setSingleStep(1)
self.number_coefficient_spinbox.setAccelerated(True)
self.zeroPhaseCheckBox = QtGui.QCheckBox("Zero phase filtering", self.group_box2)
self.zeroPhaseCheckBox.setChecked(True)
#######################################################################
self.group_box_layout = QtGui.QHBoxLayout(self.group_box)
self.group_box_layout.setContentsMargins(9, 9, 9, 9)
self.group_box_layout.setSpacing(12)
self.group_box_layout.addWidget(self.start_point_label)
self.group_box_layout.addWidget(self.start_point_spinbox)
self.group_box4_layout = QtGui.QHBoxLayout(self.group_box4)
self.group_box4_layout.setContentsMargins(9, 9, 9, 9)
self.group_box4_layout.setSpacing(12)
self.group_box4_layout.addWidget(self.end_point_label)
self.group_box4_layout.addWidget(self.end_point_spinbox)
#####################################################################
self.group_box2_layout = QtGui.QHBoxLayout(self.group_box2)
self.group_box2_layout.setContentsMargins(9, 9, 9, 9)
self.group_box2_layout.setSpacing(12)
self.group_box2_layout.addWidget(self.zeroPhaseCheckBox)
###################################################################
self.group_box3_layout = QtGui.QHBoxLayout(self.group_box3)
self.group_box3_layout.setContentsMargins(9, 9, 9, 9)
self.group_box3_layout.setSpacing(12)
self.group_box3_layout.addWidget(self.number_coefficient_label)
self.group_box3_layout.addWidget(self.number_coefficient_spinbox)
self.group_box3_layout.addWidget(self.number_coefficient_label2)
#####################################################################
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(9, 9, 9, 9)
self.layout.setSpacing(6)
self.layout.addWidget(self.toolBarNavigation)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.group_box3)
self.layout.addWidget(self.group_box)
self.layout.addWidget(self.group_box4)
#self.layout.addWidget(self.group_box2)
self.layout.addWidget(self.zeroPhaseCheckBox)
self.layout.addWidget(self.button_box)
def on_freq_min_changed(self, value):
self.end_point_spinbox.setMinimum(value + 1.0)
def on_freq_max_changed(self, value):
self.start_point_spinbox.setMaximum(value - 1.0)
def on_click(self, button):
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_settings()
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self._draw_filter_response()
def save_settings(self):
"""Save settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
#self.default_margin = int(float(settings.value('filterdesign_margin', 5.0)) *
#self.record.fs)
settings.setValue('freq_min', self.start_point_spinbox.value())
settings.setValue('freq_max', self.end_point_spinbox.value())
settings.setValue('coef_number', self.number_coefficient_spinbox.value())
settings.setValue('zero_phase', self.zeroPhaseCheckBox.isChecked())
settings.endGroup()
def load_settings(self):
"""Loads settings from persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
self.start_point_spinbox.setValue(float(settings.value('freq_min', 0.0)))
self.end_point_spinbox.setValue(float(settings.value('freq_max', self.max_freq * 0.5)))
self.number_coefficient_spinbox.setValue(int(settings.value('coef_number', 1)))
self.zeroPhaseCheckBox.setChecked(bool(settings.value('zero_phase', True)))
settings.endGroup()
def _butter_bandpass(self, lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def _retrieve_filter_plot_data(self):
b, a = self._butter_bandpass(self.start_point_spinbox.value(), self.end_point_spinbox.value(), self.max_freq, order=self.number_coefficient_spinbox.value())
#w, h = freqz(b, a)
w, h = freqz(b, a,1024)
angles = np.unwrap(np.angle(h))
#return (self.max_freq * 0.5 / np.pi) * w, 20 * np.log10(abs(h)), angles
f= (self.max_freq/2)*(w/np.pi)
return f, 20 * np.log10(abs(h)), angles
def _draw_filter_response(self, *args, **kwargs):
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data.set_xdata(w)
self._module_data.set_ydata(h_db)
self._phase_data.set_xdata(w)
self._phase_data.set_ydata(angles)
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/FilterDesing.py | FilterDesing.py |
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['agg.path.chunksize'] = 80000
import numpy as np
import traceback
import os
from apasvo.picking import stalta
from apasvo.picking import ampa
from apasvo.picking import apasvotrace as rc
from PySide import QtGui, QtCore
from apasvo._version import __version__
from apasvo._version import _application_name
from apasvo._version import _organization
from apasvo.gui.views.generated import ui_mainwindow
from apasvo.utils.formats import rawfile
from apasvo.gui.views.generated import qrc_icons
from apasvo.gui.delegates import cbdelegate
from apasvo.gui.models import pickingtask
from apasvo.gui.models import eventcommands as commands
from apasvo.gui.views import aboutdialog
from apasvo.gui.views import svwidget
from apasvo.gui.views import navigationtoolbar
from apasvo.gui.views import loaddialog
from apasvo.gui.views import savedialog
from apasvo.gui.views import save_events_dialog
from apasvo.gui.views import settingsdialog
from apasvo.gui.views import takanamidialog
from apasvo.gui.views import FilterDesing
from apasvo.gui.views import trace_selector_dialog
from apasvo.gui.views import staltadialog
from apasvo.gui.views import ampadialog
from apasvo.gui.views import playertoolbar
from apasvo.gui.views import error
from apasvo.gui.views import processingdialog
format_csv = 'csv'
format_xml = 'xml'
format_nlloc = 'hyp'
format_json = 'json'
format_other = 'other'
binary_files_filter = 'Binary Files (*.bin)'
text_files_filter = 'Text Files (*.txt)'
all_files_filter = 'All Files (*.*)'
csv_files_filter = 'CSV Files (*.csv)'
xml_files_filter = 'XML Files (*.xml)'
nlloc_files_filter = 'NLLoc Files (*.hyp)'
json_files_filter = 'JSON Files (*.json)'
APASVO_URL = 'https://github.com/jemromerol/apasvo/wiki'
class MainWindow(QtGui.QMainWindow, ui_mainwindow.Ui_MainWindow):
"""Application Main Window class. SDI GUI style.
Attributes:
record: Current opened seismic document.
isModified: Indicates whether there are any changes in results to save
or not.
saved_filename: Name of the file where results are being saved.
"""
windowList = [] # A list of opened application windows
MaxRecentFiles = 10 # Recent files list max size
_file_filters = {binary_files_filter: rawfile.format_binary,
text_files_filter: rawfile.format_text,
all_files_filter: format_other}
_cf_file_filters = {binary_files_filter: rawfile.format_binary,
text_files_filter: rawfile.format_text,
all_files_filter: format_other}
_summary_file_filters = {xml_files_filter: format_xml,
nlloc_files_filter: format_nlloc,
json_files_filter: format_json,
text_files_filter: rawfile.format_text,
all_files_filter: format_other}
def __init__(self, parent=None, filename=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.stream = rc.ApasvoStream([])
self.document_list = []
self.current_document_idx = -1
self.document = None
self.isModified = False
self.saved_filename = None
self.saved_event_format = None
self.saved_cf_filename = None
self.saved_cf_format = None
self.saved_cf_dtype = None
self.saved_cf_byteorder = None
# Create context menu for events table
self.event_context_menu = QtGui.QMenu(self)
self.event_context_menu.addAction(self.actionDelete_Selected)
self.EventsTableView.customContextMenuRequested.connect(lambda: self.event_context_menu.exec_(QtGui.QCursor.pos()))
self.EventsTableView.clicked.connect(self.goto_event_position)
self.actionOpen.triggered.connect(self.load)
self.actionSaveEvents.triggered.connect(self.save_events)
self.actionSaveEvents_As.triggered.connect(self.save_events_as)
self.actionSaveCF.triggered.connect(self.save_cf)
self.actionSaveCF_As.triggered.connect(self.save_cf_as)
self.actionClose.triggered.connect(lambda: self.command_stack.push(commands.CloseTraces(self, [self.current_document_idx])))
self.actionQuit.triggered.connect(QtGui.qApp.closeAllWindows)
self.actionClearRecent.triggered.connect(self.clear_recent_list)
self.actionSettings.triggered.connect(self.edit_settings)
self.actionSTA_LTA.triggered.connect(self.doSTALTA)
self.actionAMPA.triggered.connect(self.doAMPA)
self.actionTakanami.triggered.connect(self.doTakanami)
self.actionFilterDesing.triggered.connect(self.doFilterDesing)
self.actionClear_Event_List.triggered.connect(self.clear_events)
self.actionDelete_Selected.triggered.connect(self.delete_selected_events)
self.actionAbout.triggered.connect(self.show_about)
self.actionOnlineHelp.triggered.connect(lambda: QtGui.QDesktopServices.openUrl(QtCore.QUrl(APASVO_URL)))
# Create stream viewer dialog
self.trace_selector = trace_selector_dialog.TraceSelectorDialog(self.stream, parent=self)
self.action_show_trace_selector.toggled.connect(self.trace_selector.setVisible)
self.trace_selector.closed.connect(lambda: self.action_show_trace_selector.setChecked(False))
self.trace_selector.selection_changed.connect(self.toogle_document)
self.viewFilteredCheckBox.toggled.connect(self.toggle_filtered)
# add navigation toolbar
self.signalViewer = svwidget.SignalViewerWidget(self.splitter)
self.splitter.addWidget(self.signalViewer)
self.toolBarNavigation = navigationtoolbar.NavigationToolBar(self.signalViewer.canvas, self)
self.toolBarNavigation.setEnabled(False)
self.toolBarNavigation.view_restored.connect(self.signalViewer.subplots_adjust)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBarNavigation)
self.addToolBarBreak()
# add analysis toolbar
self.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBarAnalysis)
self.addToolBarBreak()
# add media toolbar
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('player_settings')
fs = int(settings.value('playback_freq', playertoolbar.DEFAULT_REAL_FREQ))
bd = settings.value('bit_depth', playertoolbar.DEFAULT_BIT_DEPTH)
settings.endGroup()
self.toolBarMedia = playertoolbar.PlayerToolBar(self, sample_freq=fs, bd=bd)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBarMedia)
self.toolBarMedia.intervalChanged.connect(self.signalViewer.set_selector_limits)
self.toolBarMedia.intervalSelected.connect(self.signalViewer.selector.set_active)
self.toolBarMedia.tick.connect(self.signalViewer.set_playback_position)
self.toolBarMedia.playingStateChanged.connect(lambda x: self.signalViewer.set_selection_enabled(not x))
self.toolBarMedia.playingStateSelected.connect(lambda: self.signalViewer.set_playback_marker_visible(True))
self.toolBarMedia.stoppedStateSelected.connect(lambda: self.signalViewer.set_playback_marker_visible(False))
self.signalViewer.selector.toggled.connect(self.toolBarMedia.toggle_interval_selected)
self.signalViewer.selector.valueChanged.connect(self.toolBarMedia.set_limits)
self.addToolBarBreak()
self.actionEvent_List.toggled.connect(self.EventsTableView.setVisible)
self.actionSignal_Amplitude.toggled.connect(self.signalViewer.set_signal_amplitude_visible)
self.actionSignal_Envelope.toggled.connect(self.signalViewer.set_signal_envelope_visible)
self.actionEspectrogram.toggled.connect(self.signalViewer.set_espectrogram_visible)
self.actionCharacteristic_Function.toggled.connect(self.signalViewer.set_cf_visible)
self.actionSignal_MiniMap.toggled.connect(self.signalViewer.set_minimap_visible)
self.signalViewer.selector.toggled.connect(self.on_selection_toggled)
self.signalViewer.selector.valueChanged.connect(self.on_selection_changed)
self.signalViewer.CF_loaded.connect(self.actionCharacteristic_Function.setEnabled)
self.signalViewer.CF_loaded.connect(self.actionCharacteristic_Function.setChecked)
self.signalViewer.event_selected.connect(self.on_event_picked)
self.actionActivateThreshold.toggled.connect(self.toggle_threshold)
self.actionMain_Toolbar.toggled.connect(self.toolBarMain.setVisible)
self.actionMedia_Toolbar.toggled.connect(self.toolBarMedia.setVisible)
self.actionAnalysis_Toolbar.toggled.connect(self.toolBarAnalysis.setVisible)
self.actionNavigation_Toolbar.toggled.connect(self.toolBarNavigation.setVisible)
# Connect trace selector to signal viewer
self.trace_selector.detection_performed.connect(self.signalViewer.update_cf)
self.set_title()
self.set_recent_menu()
def load(self, filename=None):
"""Opens a new document.
Opens selected document in the current window if it isn't currently
showing a document, otherwise the document is opened in a new
window.
Args:
filename: Selected document. If None is provided launches a
file dialog to let the user select a file.
Default: None.
"""
if filename is None:
filename, _ = QtGui.QFileDialog.getOpenFileName(self, "Open Data File", ".",
";;".join(self._file_filters), all_files_filter)
if filename != '':
dialog = loaddialog.LoadDialog(self, filename)
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
try:
values = dialog.get_values()
# Load and visualize the opened record
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.analysis_label.setText("Loading {}...".format(os.path.basename(filename)))
self.analysis_progress_bar.show()
stream = rc.read(filename, **values)
self.command_stack.push(commands.OpenStream(self, stream))
# Update recent list
self.push_recent_list(filename)
except Exception as e:
error.display_error_dlg(str(e), traceback.format_exc())
finally:
self.analysis_progress_bar.hide()
self.analysis_label.setText("")
QtGui.QApplication.restoreOverrideCursor()
def open_recent(self):
"""Opens a document from recent opened list."""
action = self.sender()
if action:
self.load(action.data())
def save_events(self):
"""Saves event list to file.
If no events has been saved yet, opens a save file dialog.
"""
if self.saved_filename is None:
return self.save_events_as()
else:
return self.save_event_list(self.saved_filename)
def save_events_as(self):
"""Opens a save file dialog to save event list to file."""
filename, _ = QtGui.QFileDialog.getSaveFileName(self, "Save Event List to File", ".",
";;".join(self._summary_file_filters))
if filename != '':
# Show dialog
dialog = save_events_dialog.SaveEventsDialog(self, fmt=self.saved_event_format)
return_code = dialog.exec_()
# Save Events to file and store settings
if return_code == QtGui.QDialog.Accepted:
values = dialog.get_values()
self.save_event_list(filename, format=values.get('fmt'))
def save_event_list(self, filename, **kwargs):
"""Saves a results summary to file.
Generates a results CSV summary.
Args:
filename: Output file name.
"""
rc.ApasvoStream([self.document.record]).export_picks(filename, **kwargs)
self.saved_filename = filename
self.saved_event_format = kwargs.get('format')
def save_cf(self):
"""Saves characteristic function to file.
If no characteristic function has been saved yet, opens a save file dialog.
"""
if self.saved_cf_filename is None:
return self.save_cf_as()
else:
return self.document.record.save_cf(self.saved_cf_filename,
fmt=self.saved_cf_format,
dtype=self.saved_cf_dtype,
byteorder=self.saved_cf_byteorder)
def save_cf_as(self):
"""Open a save file dialog to save computed characteristic function."""
filename, selected_filter = QtGui.QFileDialog.getSaveFileName(self, "Save Characteristic Function to File", ".",
";;".join(self._cf_file_filters.keys()))
if filename != '':
# Set defaults
if self._cf_file_filters[selected_filter] != format_other:
fmt = self._cf_file_filters[selected_filter]
elif self.saved_cf_format is not None:
fmt = self.saved_cf_format
else:
fmt = rawfile.format_binary
if self.saved_cf_dtype is not None:
dtype = self.saved_cf_dtype
else:
dtype = rawfile.datatype_float64
if self.saved_cf_byteorder is not None:
byteorder = self.saved_cf_byteorder
else:
byteorder = rawfile.byteorder_native
# Show dialog
dialog = savedialog.SaveDialog(self, fmt=fmt,
dtype=dtype,
byteorder=byteorder)
return_code = dialog.exec_()
# Save CF to file and store settings
if return_code == QtGui.QDialog.Accepted:
values = dialog.get_values()
self.document.record.save_cf(filename, **values)
self.saved_cf_filename = filename
self.saved_cf_format = values['fmt']
self.saved_cf_dtype = values['dtype']
self.saved_cf_byteorder = values['byteorder']
def close(self):
"""Closes current document.
If there are any changes to save, shows a dialog asking
the user whether to save data or not.
"""
if self.maybeSave():
if self.document is not None:
# Disconnect all signals!!
self.document = None
self.set_modified(False)
self.saved_filename = None
self.saved_event_format = None
self.signalViewer.unset_record()
self.toolBarMedia.disconnect_path()
# Update GUI
self.centralwidget.setVisible(False)
self.actionClose.setEnabled(False)
self.actionClear_Event_List.setEnabled(False)
self.actionSTA_LTA.setEnabled(False)
self.actionAMPA.setEnabled(False)
self.toolBarNavigation.setEnabled(False)
self.toolBarAnalysis.setEnabled(False)
self.adjustSize()
self.set_title()
def toogle_document(self, document_idx):
"""
:param document:
:return:
"""
self.current_document_idx = document_idx
document = self.document_list[document_idx]
self.disconnect_document()
# Load and visualize the opened record
self.document = document
self.document.record.use_filtered = self.viewFilteredCheckBox.isChecked()
self.document.emptyList.connect(self.set_modified)
########
self.EventsTableView.setModel(self.document)
model = self.EventsTableView.selectionModel()
model.selectionChanged.connect(self.on_event_selection)
# Connect Delegates
for i, attribute in enumerate(self.document.attributes):
if attribute.get('attribute_type') == 'enum' and attribute.get('editable', False):
delegate = cbdelegate.ComboBoxDelegate(self.EventsTableView,
attribute.get('value_list', []))
self.EventsTableView.setItemDelegateForColumn(i, delegate)
else:
self.EventsTableView.setItemDelegateForColumn(i, None)
# connect trace selector to document
self.trace_selector.events_created.connect(lambda x: self.document.updateList())
self.trace_selector.events_deleted.connect(lambda x: self.document.updateList())
self.trace_selector.events_created.connect(self.signalViewer.create_events)
self.trace_selector.events_deleted.connect(self.signalViewer.delete_events)
# connect document model to signalViewer
self.document.eventCreated.connect(self.signalViewer.create_event)
self.document.eventCreated.connect(self.trace_selector.update_events)
self.document.eventDeleted.connect(self.signalViewer.delete_event)
self.document.eventDeleted.connect(self.trace_selector.update_events)
self.document.eventModified.connect(self.signalViewer.update_event)
self.document.eventModified.connect(self.trace_selector.update_events)
self.document.detectionPerformed.connect(self.signalViewer.update_cf)
self.document.detectionPerformed.connect(self.toolBarNavigation.update)
# load document data into signal viewer
self.signalViewer.unset_record()
self.signalViewer.set_record(self.document)
self.signalViewer.thresholdMarker.thresholdChanged.connect(self.thresholdSpinBox.setValue)
self.signalViewer.set_signal_amplitude_visible(self.actionSignal_Amplitude.isChecked())
self.signalViewer.set_signal_envelope_visible(self.actionSignal_Envelope.isChecked())
self.signalViewer.set_cf_visible(self.actionCharacteristic_Function.isChecked())
self.signalViewer.set_espectrogram_visible(self.actionEspectrogram.isChecked())
self.signalViewer.set_minimap_visible(self.actionSignal_MiniMap.isChecked())
self.signalViewer.set_threshold_visible(self.actionActivateThreshold.isChecked())
self.signalViewer.thresholdMarker.set_threshold(self.thresholdSpinBox.value())
self.thresholdSpinBox.valueChanged.connect(self.signalViewer.thresholdMarker.set_threshold)
self.toolBarMedia.load_data(self.document.record.signal, self.document.record.fs)
self.toolBarMedia.connect_path()
# Update GUI
self.centralwidget.setVisible(True)
self.actionClose.setEnabled(True)
self.actionClear_Event_List.setEnabled(True)
self.actionSTA_LTA.setEnabled(True)
self.actionAMPA.setEnabled(True)
self.actionFilterDesing.setEnabled(True)
self.toolBarNavigation.setEnabled(True)
self.toolBarAnalysis.setEnabled(True)
self.toolBarMedia.set_enabled(True)
self.set_title()
def disconnect_document(self):
if self.document is not None:
# Disconnect existing signals
self.trace_selector.events_created.disconnect()
self.trace_selector.events_deleted.disconnect()
# Disconnect document signal
self.document.emptyList.disconnect(self.set_modified)
self.document.eventCreated.disconnect()
self.document.eventDeleted.disconnect()
self.document.eventModified.disconnect()
self.document.detectionPerformed.disconnect(self.signalViewer.update_cf)
self.document.detectionPerformed.disconnect(self.toolBarNavigation.update)
model = self.EventsTableView.selectionModel()
model.selectionChanged.disconnect(self.on_event_selection)
def toggle_filtered(self, value):
if self.document is not None:
self.document.record.use_filtered = value
self.toogle_document(self.current_document_idx)
def edit_settings(self):
"""Opens settings dialog."""
dialog = settingsdialog.SettingsDialog(self)
dialog.saved.connect(self.update_settings)
dialog.exec_()
def update_settings(self):
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
settings = QtCore.QSettings(_organization, _application_name)
# update player settings
settings.beginGroup('player_settings')
fs = int(settings.value('playback_freq', playertoolbar.DEFAULT_REAL_FREQ))
bd = settings.value('bit_depth', playertoolbar.DEFAULT_BIT_DEPTH)
settings.endGroup()
self.toolBarMedia.set_audio_format(fs, bd)
# update event colors
if self.document is not None:
self.document.loadColorMap()
# update spectrogram
if self.signalViewer is not None:
self.signalViewer.update_specgram_settings()
QtGui.QApplication.restoreOverrideCursor()
def push_recent_list(self, filename):
"""Adds a document to recent opened list.
Args:
filename: Name of the file to add.
"""
settings = QtCore.QSettings(_organization, _application_name)
files = self.get_recent_list()
if filename in files:
files.remove(filename)
files.insert(0, filename)
settings.setValue('recentFileList', files)
self.set_recent_menu()
def get_recent_list(self):
"""Gets a list of recent opened documents.
Returns:
out: A list of filenames.
"""
settings = QtCore.QSettings(_organization, _application_name)
files = settings.value('recentFileList')
if files:
if isinstance(files, list):
return list(files)
else:
return [files]
return []
def clear_recent_list(self):
"""Clears recent opened documents list."""
settings = QtCore.QSettings(_organization, _application_name)
settings.setValue('recentFileList', [])
self.set_recent_menu()
def set_recent_menu(self):
"""Fills 'File -> Open Recent' menu with a list of recent opened
docs.
"""
files = self.get_recent_list()
files_no = len(files)
num_recent_files = min(files_no, MainWindow.MaxRecentFiles)
self.menuOpen_Recent.clear()
for i in xrange(num_recent_files):
action = QtGui.QAction("&%d %s" %
(i + 1, self.strippedName(files[i])), self)
action.setData(files[i])
action.triggered.connect(self.open_recent)
self.menuOpen_Recent.addAction(action)
self.menuOpen_Recent.addSeparator()
self.menuOpen_Recent.addAction(self.actionClearRecent)
if num_recent_files == 0:
self.actionClearRecent.setEnabled(False)
else:
self.actionClearRecent.setEnabled(True)
def maybeSave(self):
"""If there are any changes to save, shows a dialog asking
the user whether to save data or not.
Returns:
out: If the user cancels the dialog returns True, otherwise returns
True.
"""
if self.isModified:
ret = QtGui.QMessageBox.warning(self, "Save changes",
"The document has been modified.\nDo you want to save "
"your changes?",
QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard |
QtGui.QMessageBox.Cancel)
if ret == QtGui.QMessageBox.Save:
self.save_events()
self.save_cf()
elif ret == QtGui.QMessageBox.Cancel:
return False
return True
def closeEvent(self, event):
"""Current window's close event"""
if self.maybeSave():
# prevent toolBarMedia firing signals if it's on playing or paused state
self.toolBarMedia.blockSignals(True)
self.toolBarMedia.disconnect_path()
event.accept()
else:
event.ignore()
def set_modified(self, value):
"""Sets 'isModified' attribute's value"""
self.isModified = value
self.actionSaveEvents.setEnabled(value)
self.actionSaveEvents_As.setEnabled(value)
# If already computed, enable save CF
cf_computed = False if self.document is None else len(self.document.record.cf) != 0
self.actionSaveCF.setEnabled(cf_computed)
self.actionSaveCF_As.setEnabled(cf_computed)
def set_title(self):
"""Sets current window's title."""
prefix = '' if self.document is None else "%s - " % self.document.record.name
self.setWindowTitle('%s%s v.%s' % (prefix, _application_name, __version__))
def strippedName(self, fullFileName):
return QtCore.QFileInfo(fullFileName).fileName()
def toggle_threshold(self, value):
"""Set threshold checkbox's value"""
self.thresholdLabel.setEnabled(value)
self.thresholdSpinBox.setEnabled(value)
self.signalViewer.thresholdMarker.set_visible(value)
def doSTALTA(self):
"""Performs event detection/picking by using STA-LTA method."""
dialog = staltadialog.StaLtaDialog(self.stream,
trace_list=[self.document.record])
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('stalta_settings')
sta_length = float(settings.value('sta_window_len', 5.0))
lta_length = float(settings.value('lta_window_len', 100.0))
settings.endGroup()
# Get threshold value
if self.actionActivateThreshold.isChecked():
threshold = self.thresholdSpinBox.value()
else:
threshold = None
# Create an STA-LTA algorithm instance with selected settings
alg = stalta.StaLta(sta_length, lta_length)
# perform task
self._analysis_task = pickingtask.PickingTask(self.document, alg,
threshold)
self.launch_analysis_task(self._analysis_task,
label="Applying %s..." % alg.__class__.__name__.upper())
def doAMPA(self):
"""Performs event detection/picking by using AMPA method."""
dialog = ampadialog.AmpaDialog(self.stream,
trace_list=[self.document.record])
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('ampa_settings')
wlen = float(settings.value('window_len', 100.0))
wstep = float(settings.value('step', 50.0))
nthres = float(settings.value('noise_threshold', 90))
filters = settings.value('filters', [30.0, 20.0, 10.0,
5.0, 2.5])
filters = list(filters) if isinstance(filters, list) else [filters]
filters = np.array(filters).astype(float)
settings.beginGroup('filter_bank_settings')
startf = float(settings.value('startf', 2.0))
endf = float(settings.value('endf', 12.0))
bandwidth = float(settings.value('bandwidth', 3.0))
overlap = float(settings.value('overlap', 1.0))
settings.endGroup()
settings.endGroup()
# Get threshold value
if self.actionActivateThreshold.isChecked():
threshold = self.thresholdSpinBox.value()
else:
threshold = None
# Create an AMPA algorithm instance with selected settings
alg = ampa.Ampa(wlen, wstep, filters, noise_thr=nthres,
bandwidth=bandwidth, overlap=overlap,
f_start=startf, f_end=endf)
# perform task
self._analysis_task = pickingtask.PickingTask(self.document, alg,
threshold)
self.launch_analysis_task(self._analysis_task,
label="Applying %s..." % alg.__class__.__name__.upper())
def launch_analysis_task(self, task, label=""):
wait_dialog = processingdialog.ProcessingDialog(label_text=label)
wait_dialog.setWindowTitle("Event detection")
wait_dialog.run(task)
def doTakanami(self):
xleft, xright = self.signalViewer.get_selector_limits()
takanamidialog.TakanamiDialog(self.document, xleft, xright).exec_()
def doFilterDesing(self):
"""Performs event filtering using bandpass filter ."""
dialog = FilterDesing.FilterDesignDialog(self.stream, trace_list=[self.document.record])
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
self.apply_filter()
def apply_filter(self):
if self.document is not None:
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('filterdesign_settings')
freq_1 = float(settings.value('freq_min', 0.0))
freq_2 = float(settings.value('freq_max', 25))
coefficients = float(settings.value('coef_number', 3))
zero_phase = settings.value('zero_phase', True)
settings.endGroup()
self.document.record.bandpass_filter(freq_1, freq_2, corners=coefficients, zerophase=zero_phase)
self.toogle_document(self.current_document_idx)
def clear_events(self):
if self.document is not None:
self.document.clearEvents()
def delete_selected_events(self):
if self.document is not None:
selected_rows = self.EventsTableView.selectionModel().selectedRows()
self.document.removeRows([row.row() for row in selected_rows])
def goto_event_position(self, index):
self.signalViewer.goto_event(self.document.record.events[index.row()])
def on_event_selection(self, s, d):
selected_events = [self.document.getEventByRow(index.row())
for index in self.EventsTableView.selectionModel().selectedRows()]
self.actionDelete_Selected.setEnabled(len(selected_events) > 0)
self.signalViewer.set_event_selection(selected_events)
def on_event_picked(self, event):
if self.document is not None:
self.EventsTableView.selectionModel().clear()
self.EventsTableView.selectionModel().select(self.document.index(self.document.indexOf(event), 0),
QtGui.QItemSelectionModel.ClearAndSelect |
QtGui.QItemSelectionModel.Rows)
def on_selection_toggled(self, value):
self.on_selection_changed(*self.signalViewer.get_selector_limits())
def on_selection_changed(self, xleft, xright):
selection_length = abs(xleft - xright)
enable_takanami = (self.signalViewer.selector.active and
(selection_length >= (takanamidialog.MINIMUM_MARGIN_IN_SECS * 2)))
self.actionTakanami.setEnabled(enable_takanami)
def show_about(self):
aboutdialog.AboutDialog(self).exec_() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/mainwindow.py | mainwindow.py |
from PySide import QtCore
from PySide import QtGui
import numpy as np
from apasvo.gui.views import processingdialog
from apasvo.gui.views import tsvwidget
from apasvo.gui.views import staltadialog
from apasvo.gui.views import ampadialog
from apasvo.picking import stalta
from apasvo.picking import ampa
from apasvo.gui.views import FilterDesing
from apasvo.gui.models import pickingtask
from apasvo.gui.models import eventcommands as commands
from apasvo._version import _application_name
from apasvo._version import _organization
class TraceSelectorDialog(QtGui.QMainWindow):
"""A dialog to apply Takanami's AR picking method to a selected piece of a
seismic signal.
Attributes:
document: Current opened document containing a seismic record.
seismic_event: A seismic event to be refined by using Takanami method.
If no event is provided, then a new seismic event will be created
by using the estimated arrival time after clicking on 'Accept'
"""
closed = QtCore.Signal()
selection_changed = QtCore.Signal(int)
events_created = QtCore.Signal(dict)
events_deleted = QtCore.Signal(dict)
detection_performed = QtCore.Signal()
def __init__(self, stream, parent):
super(TraceSelectorDialog, self).__init__(parent)
self.main_window = parent
self.stream = stream
self.skip = 0
self.step = 5
self._init_ui()
def _init_ui(self):
self.set_title()
# Create main structure
self.centralwidget = QtGui.QWidget(self)
self.centralwidget.setVisible(False)
self.setCentralWidget(self.centralwidget)
self.layout = QtGui.QVBoxLayout(self.centralwidget)
self.stream_viewer = tsvwidget.StreamViewerWidget(self)
self.layout.addWidget(self.stream_viewer)
# Add main toolbar
self.tool_bar_main = QtGui.QToolBar(self)
# self.action_save = QtGui.QAction(self)
# self.action_save.setIcon(QtGui.QIcon(":/save.png"))
# self.action_save.setEnabled(False)
self.action_close = QtGui.QAction(self)
self.action_close.setIcon(QtGui.QIcon(":/close.png"))
self.action_close.setEnabled(False)
self.action_previous_view = QtGui.QAction(self)
self.action_previous_view.setIcon(QtGui.QIcon(":/go-previous-view.png"))
self.action_previous_view.setEnabled(True)
self.action_next_view = QtGui.QAction(self)
self.action_next_view.setIcon(QtGui.QIcon(":/go-next-view.png"))
self.action_next_view.setEnabled(True)
# self.tool_bar_main.addAction(self.action_save)
self.tool_bar_main.addAction(self.action_close)
self.tool_bar_main.addSeparator()
self.tool_bar_main.addAction(self.action_previous_view)
self.skip_label = QtGui.QLabel(" Skip: ", parent=self.tool_bar_main)
self.tool_bar_main.addWidget(self.skip_label)
self.skip_spinbox = QtGui.QSpinBox(self.tool_bar_main)
self.skip_spinbox.setMinimum(0)
self.skip_spinbox.setValue(self.skip)
self.skip_spinbox.setAccelerated(True)
self.skip_spinbox.setToolTip("Number of traces to skip")
self.tool_bar_main.addWidget(self.skip_spinbox)
self.step_label = QtGui.QLabel(" Step: ", parent=self.tool_bar_main)
self.tool_bar_main.addWidget(self.step_label)
self.step_spinbox = QtGui.QSpinBox(self.tool_bar_main)
self.step_spinbox.setMinimum(1)
self.step_spinbox.setValue(self.step)
self.step_spinbox.setAccelerated(True)
self.step_spinbox.setToolTip("Number of traces shown at once")
self.tool_bar_main.addWidget(self.step_spinbox)
self.tool_bar_main.addAction(self.action_next_view)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.tool_bar_main)
# Add analysis toolbar
self.tool_bar_analysis = QtGui.QToolBar(self)
self.action_sta_lta = QtGui.QAction(self)
self.action_sta_lta.setIcon(QtGui.QIcon(":/stalta.png"))
self.action_sta_lta.setEnabled(False)
self.action_sta_lta.setToolTip("Apply STA-LTA algorithm")
self.action_ampa = QtGui.QAction(self)
self.action_ampa.setIcon(QtGui.QIcon(":/ampa.png"))
self.action_ampa.setEnabled(False)
self.action_ampa.setToolTip("Apply AMPA algorithm")
##############################################################################
self.action_filter_design = QtGui.QAction(self)
self.action_filter_design.setIcon(QtGui.QIcon(":/filter.png"))
self.action_filter_design.setEnabled(False)
self.action_filter_design.setToolTip("Filter design")
self.viewFilteredCheckBox = QtGui.QCheckBox("View filtered signal")
self.viewFilteredCheckBox.setChecked(True)
###############################################################################
self.tool_bar_analysis.addAction(self.action_sta_lta)
self.tool_bar_analysis.addAction(self.action_ampa)
self.tool_bar_analysis.addAction(self.action_filter_design)
self.tool_bar_analysis.addWidget(self.viewFilteredCheckBox)
# self.tool_bar_analysis.addSeparator()
# self.action_activate_threshold = QtGui.QAction(self)
# self.action_activate_threshold.setIcon(QtGui.QIcon(":/threshold.png"))
# self.action_activate_threshold.setCheckable(True)
# self.action_activate_threshold.setChecked(False)
# self.action_activate_threshold.setToolTip("Enable/Disable Threshold")
# self.tool_bar_analysis.addAction(self.action_activate_threshold)
# self.threshold_label = QtGui.QLabel(" Threshold value: ", parent=self.tool_bar_analysis)
# self.threshold_label.setEnabled(False)
# self.tool_bar_analysis.addWidget(self.threshold_label)
# self.threshold_spinbox = QtGui.QDoubleSpinBox(self.tool_bar_analysis)
# self.threshold_spinbox.setMinimum(0.0)
# self.threshold_spinbox.setMaximum(20.0)
# self.threshold_spinbox.setSingleStep(0.01)
# self.threshold_spinbox.setValue(1.0)
# self.threshold_spinbox.setAccelerated(True)
# self.threshold_spinbox.setEnabled(False)
# self.tool_bar_analysis.addWidget(self.threshold_spinbox)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.tool_bar_analysis)
# Add navigation toolbar
# self.tool_bar_navigation = navigationtoolbar.NavigationToolBar(self.stream_viewer.canvas, self)
# self.addToolBar(QtCore.Qt.TopToolBarArea, self.tool_bar_navigation)
# self.addToolBarBreak()
# Connect actions
self.action_sta_lta.triggered.connect(self.doSTALTA)
self.action_ampa.triggered.connect(self.doAMPA)
self.action_filter_design.triggered.connect(self.doFilterDesing)
self.action_close.triggered.connect(self.close_selected_traces)
self.action_previous_view.triggered.connect(self.on_previous_view)
self.action_next_view.triggered.connect(self.on_next_view)
self.step_spinbox.valueChanged.connect(self.on_step_modified)
self.skip_spinbox.valueChanged.connect(self.on_skip_modified)
# set up status bar
self.statusbar = QtGui.QStatusBar(self)
self.statusbar.setObjectName("statusbar")
self.setStatusBar(self.statusbar)
self.analysis_label = QtGui.QLabel("", self.statusbar)
self.analysis_progress_bar = QtGui.QProgressBar(self.statusbar)
self.analysis_progress_bar.setOrientation(QtCore.Qt.Horizontal)
self.analysis_progress_bar.setRange(0, 0)
self.analysis_progress_bar.hide()
self.statusbar.addPermanentWidget(self.analysis_label)
self.statusbar.addPermanentWidget(self.analysis_progress_bar)
# Connect widget signals
self.events_created.connect(self.stream_viewer.update_markers)
self.events_deleted.connect(self.stream_viewer.update_markers)
self.stream_viewer.trace_selected.connect(lambda x: self.selection_changed.emit(x))
self.stream_viewer.selection_made.connect(self.action_close.setEnabled)
self.viewFilteredCheckBox.toggled.connect(self.toggle_filtered)
def closeEvent(self, event):
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("geometry")
settings.setValue("trace_selector", self.saveGeometry())
settings.endGroup()
self.closed.emit()
super(TraceSelectorDialog, self).closeEvent(event)
def showEvent(self, event):
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("geometry")
self.restoreGeometry(settings.value("trace_selector"))
settings.endGroup()
super(TraceSelectorDialog, self).showEvent(event)
def set_title(self):
traces_description = " - ".join([trace.short_name for trace in self.stream.traces[:3]])
if len(self.stream) > 3:
traces_description = "{0} ...".format(traces_description)
self.setWindowTitle("{0} Traces Opened - {1}".format(len(self.stream.traces), traces_description))
def set_stream(self, stream):
self.stream = stream
self.stream_viewer.set_stream(self.stream)
self._visualize_current_stream_range()
stream_has_any_trace = len(self.stream)
self.centralwidget.setVisible(stream_has_any_trace)
self.action_sta_lta.setEnabled(stream_has_any_trace)
self.action_ampa.setEnabled(stream_has_any_trace)
self.action_filter_design.setEnabled(stream_has_any_trace)
self.set_title()
def update_events(self, *args, **kwargs):
self.stream_viewer.update_markers()
def doSTALTA(self):
"""Performs event detection/picking by using STA-LTA method."""
selected_traces = self.stream_viewer.selected_traces
dialog = staltadialog.StaLtaDialog(self.stream, trace_list=selected_traces)
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('stalta_settings')
sta_length = float(settings.value('sta_window_len', 5.0))
lta_length = float(settings.value('lta_window_len', 100.0))
settings.endGroup()
# # Create an STA-LTA algorithm instance with selected settings
alg = stalta.StaLta(sta_length, lta_length)
# perform task
selected_traces = self.stream_viewer.selected_traces
selected_traces = selected_traces if selected_traces else self.stream_viewer.stream.traces
analysis_task = pickingtask.PickingStreamTask(self,
alg,
trace_list=selected_traces)
self.launch_analysis_task(analysis_task,
label="Applying %s..." % alg.__class__.__name__.upper())
def doAMPA(self):
"""Performs event detection/picking by using AMPA method."""
selected_traces = self.stream_viewer.selected_traces
dialog = ampadialog.AmpaDialog(self.stream, trace_list=selected_traces)
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('ampa_settings')
wlen = float(settings.value('window_len', 100.0))
wstep = float(settings.value('step', 50.0))
nthres = float(settings.value('noise_threshold', 90))
filters = settings.value('filters', [30.0, 20.0, 10.0,
5.0, 2.5])
filters = list(filters) if isinstance(filters, list) else [filters]
filters = np.array(filters).astype(float)
settings.beginGroup('filter_bank_settings')
startf = float(settings.value('startf', 2.0))
endf = float(settings.value('endf', 12.0))
bandwidth = float(settings.value('bandwidth', 3.0))
overlap = float(settings.value('overlap', 1.0))
settings.endGroup()
settings.endGroup()
# Create an AMPA algorithm instance with selected settings
alg = ampa.Ampa(wlen, wstep, filters, noise_thr=nthres,
bandwidth=bandwidth, overlap=overlap,
f_start=startf, f_end=endf)
# perform task
selected_traces = self.stream_viewer.selected_traces
selected_traces = selected_traces if selected_traces else self.stream_viewer.stream.traces
analysis_task = pickingtask.PickingStreamTask(self,
alg,
trace_list=selected_traces)
self.launch_analysis_task(analysis_task,
label="Applying %s..." % alg.name)
def doFilterDesing(self):
"""Performs event filtering using bandpass filter ."""
selected_traces = self.stream_viewer.selected_traces
selected_traces = selected_traces if selected_traces else self.stream_viewer.stream.traces
dialog = FilterDesing.FilterDesignDialog(self.stream, trace_list= selected_traces)
return_code = dialog.exec_()
if return_code == QtGui.QDialog.Accepted:
# Read settings
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup('filterdesign_settings')
freq_1 = float(settings.value('freq_min', 0.0))
freq_2 = float(settings.value('freq_max', 25))
coefficients = float(settings.value('coef_number', 3))
zero_phase = (settings.value('zero_phase', True))
settings.endGroup()
for trace in selected_traces:
trace.bandpass_filter(freq_1, freq_2, corners=coefficients, zerophase=zero_phase)
self.stream_viewer.refresh_stream_data()
def launch_analysis_task(self, task, label=""):
wait_dialog = processingdialog.ProcessingDialog(label_text=label)
wait_dialog.setWindowTitle("Event detection")
wait_dialog.run(task)
def toggle_filtered(self, value):
for trace in self.stream.traces:
trace.use_filtered = value
self.stream_viewer.refresh_stream_data()
def close_selected_traces(self):
selected_traces_idx = [self.stream.traces.index(trace) for trace in self.stream_viewer.selected_traces]
if selected_traces_idx:
self.main_window.command_stack.push(commands.CloseTraces(self.main_window, selected_traces_idx))
def on_skip_modified(self, skip):
self.skip = skip
self._visualize_current_stream_range()
def on_step_modified(self, step):
self.step = step
self._visualize_current_stream_range()
def on_previous_view(self):
self.skip_spinbox.setValue(max(0, self.skip - self.step))
def on_next_view(self):
self.skip_spinbox.setValue(self.skip + self.step)
def _visualize_current_stream_range(self):
self.stream_viewer.visualize_stream_range(start_trace=self.skip, end_trace=self.skip + self.step) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/trace_selector_dialog.py | trace_selector_dialog.py |
from PySide import QtCore
from PySide import QtGui
from apasvo.gui.views import error
class ProcessingDialog(QtGui.QDialog):
def __init__(self, label_text='', cancel_button_text='&Cancel',
cancel_label_text='Canceling...'):
QtGui.QDialog.__init__(self)
self.label_text = label_text
self.cancel_button_text = cancel_button_text
self.cancel_label_text = cancel_label_text
self._init_ui()
def _init_ui(self):
self.label = QtGui.QLabel(self.label_text)
self.pbar_widget = QtGui.QWidget(self)
self.pbar = QtGui.QProgressBar(self.pbar_widget)
self.pbar.setMinimum(0)
self.pbar.setMaximum(0)
self.button_cancel = QtGui.QPushButton(self.cancel_button_text,
self.pbar_widget)
self.hlayout = QtGui.QHBoxLayout(self.pbar_widget)
self.hlayout.addWidget(self.pbar)
self.hlayout.addWidget(self.button_cancel)
self.layout = QtGui.QVBoxLayout(self)
self.layout.addWidget(self.label)
self.layout.addWidget(self.pbar_widget)
self.button_cancel.clicked.connect(self.reject)
def run(self, task):
self.label.setText(self.label_text)
self._task = task
self._thread = QtCore.QThread(self)
self._task.moveToThread(self._thread)
self._thread.started.connect(self._task.run)
self._task.finished.connect(self._thread.quit)
self._task.finished.connect(self.accept)
self._task.finished.connect(self._task.deleteLater)
self._task.error.connect(self.on_error)
self._thread.finished.connect(self._thread.deleteLater)
self._thread.start()
return self.exec_()
def set_label_text(self, label_text):
self.label_text = label_text
self.label.setText(self.label_text)
def set_cancel_button_text(self, cancel_button_text):
self.cancel_button_text = cancel_button_text
self.button_cancel.setText(self.cancel_button_text)
def reject(self):
self.label.setText(self.cancel_label_text)
self._task.abort()
self._thread.quit()
self._thread.wait()
return QtGui.QDialog.reject(self)
def on_error(self, *args, **kwargs):
error.display_error_dlg(*args, **kwargs)
self.reject() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/processingdialog.py | processingdialog.py |
# Resource object code
#
# Created: dom feb 7 18:18:05 2016
# by: The Resource Compiler for PySide (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = "\x00\x00\x07\x06\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x07tIME\x07\xdb\x05\x02\x16\x024\xdd\xedHB\x00\x00\x06\x86IDATX\xc3\xa5\x97]\x88]W\x15\xc7\x7fk\xed}?\x92I2C\xbeD[m XM\x8d\x9a\x07\x05i$\x1a4\x94\xfa \x92\x8e_P_E\xa4\x0f\x82\xe2\x8bJ\x11\x0aA\x11\x8c\xf8$-\xbeY\xc4\x88\x22\x8a\xa8\x89\x11#TI\x03M\x8b\x09\xfd\xc8\x84&\xc1$\x9dd&3w\xe6\xce\xdc{\xcf\xd9{\xf9\xb0\xf7\xb9\xe7\xf4N*)n\xe6r\xce\xd9g\x9d\xbd\xfek\xfd\xffk\xed=Bc\xec\xdd\xbb\xb7377\xb7\x1bh\xf3\xd6\xc3\x1a\xf7\x92\x9f\xe7\x8f\x1c9\xb2z\xf2\xe4I\xfe\xdfq\xc0\xde\xc6\x08ei\xab++v\xf9\xf2\xe5\xab{\xf6\xecy\xe4\xe0\xc1\x83\xfav\x1d\xca\xc4\xf3'\xcd\xecok\xfd~m \xb5\x89\x99af\x84\x10(\xcb\x92\xe1pH\xaf\xd7cqq\xd1>|\xe0\x80\x1c|\xf8\xe1\xd9m\xd3\xd3\xbf=s\xe6L\xbcW\x00o\x89XDPUD\xa4\xfe5A\x89 \xaa\xf8V\x0b@.^\xbch\xa7N\x9d\xfa\xb5\xf7\xfe\xe8\xa1C\x87\xee9\x13\xfa\xa5\xa7\xfe\xdc|\xeeW\x0e\xc6\x91\x9b\xd5\xa47\x00i\xfe9UZ\xad\x16\x8b\x0b\x0b2=3c?\x7f\xe6\x99\x13\xdb\xb7o?\xba\x7f\xff\xfe{\x02\xa1\xbf\xfc\xee#|\xf1\xa9\xbf\x9e\xfc\xdc\x93\x7f\xb4\x07\x1f\xfd\xe6\xd9\xb1\xc8*\xc7\xcd,Ldh\x0cF\x95\x95\x95\x15\xbc\xf7\xe2\x9c\xb3\x9f\x1e?~b\xffC\xfb\x1e\xbb\x17\x00\x1e`\xf9\xce\xfc\xa7\x1f\x9f}\x94\xdf\xad^\xadu\xd1\xc8\x82d@\x93\xf3\xe3{\x11b~\xbfyjJ\xd6\xfa};v\xec\x07\xbf\x0a\xef\xfe\x14\xe5\xd4{P\x84\x80\xe1\x80\xd2@\xc5\x0a)\x06G~s\xec\xb1\xbf{\x80a\x7f\x99\x97^\xefq\xe9z\x7f\xbc0\x0d\x0a\x10\x01U,FD\x04\xcb\xfck\xb6SUbL\xbak\xb7\xdb`&kk}~\xf8\xb5\xc3<\xfd\xa7\xd7\xd8\xb1\xe7\x83\xb4\x9d\x07\x01\x15\xa1\x14\xf3\xff:w\xe1[@\x02\x10F\x03\xe6\x17\xfa`\xc5F1\xaabf\x1b\xca\xa5\x02Z\x09U5Q\xee[-\x10\xc1\x8053\xber\xf8\x01\x9e|\xf69\xfc\xce\xf7\xe1\x9d\x12\x02\xec\x9c\xee\xca\xed\x9b\xff\xd1\xba\x0a\x9c\xa3\x1c\xae3\x1a\x8d6TBu5\xb37q\xae\xaa8\xe7p\xce\xd1j\xb5\xd8\xb2e\xcb\xd8\xd6{O\xbb\xd3a\xf3\xe6)\xba\x9b\xa6\xf8\xce\xec{\xe9];\xcfp\xb0\x8e\x95C\x16W\x07\xac,\xdd\xb4\xb1\x06\xcc\x8c\xf5\xd1\x80\xb2\x1c\xdd\xb5\xf6+\xa7f\xb6a\xce9G\xb7\xdbezz\x1a\x80N\xa7\x03@\xb7\xdb\x85\xad[\xd9\xb1k\x17eq\x1f?\xf9\xfa4_\xfd\xd1_\x98\xb9\xff\x03\xb8h\x14\x83\xf50\x06P\x8e\x0a\xcabH,G\x1bzl\x15urn\x84`\x181kRp\xce\xd3\xednb\xe7\xae]<\x7f\xee\x1cEQ\x82\x19\xa2\x99\x16K\xba\x98\xde\xb6\x8d_|\xff\xf3|\xe1{'\x98\xb9\xef!\xb0X\x03\x88aD(F\xc4\xb2\xa8\x85\xa7\x8aT@\xa2Q\x84\x92\xa5^\x9f\xe5\xde\x1aEY\x12b\xc4\xcc\x88!P\x86@Y\x94\x94\xc1\x88!U\x84a\x98%0\xb17\xe0\xda\xcdEF\xc5\x88'>\xbb\x8f\xe3\xbf\xbfD\x88\xe6\xc6\x00\x8a\xa2`0Xg\x905\x10,\x22\xc1\x10IX\xa2\xc1\xcd[K\x9c}\xf1\x15.\xbcv\x85\xfe\xda \xe7\xc7\xd2\x9f%Z\xa2\x19X\xca\x8e\x8d\xdf\x19\x16\x0d\x13#\x84\xc8\xee]\xdby\xf9\xfcst;\x9d\xd1\x18\xc0h\xb4F\x7fy\x9eP\x0c\xb9z\xfd\x16\xb7\x17\x97k\xd5\x0b\x94\xa1d\xee\xca\x1b\x9c\xfe\xe7\x8b\xcc>\xfee\xda\xdd.w\xfa%\x22\xff{\x93\xb1\xbb\xcd\x9b\xf0\xb1\xcf\x1c\x05\x98\xbdq\xed\x89\xd3\x1e\xe0\x8d\x1b\xd7Y\xec\xad\xd3[^\xe4g\xcf\xfe\x81~\x7f\x1d\xcd\xf97 \x04\xe3N\xafO\x7fX\xe0|\x97\xb3/-\xe0\xf2;i8s\xf9\x1a\x1b \xe2\x04\x18\xcd\xf7-u\xbc|\xfe\xf9\xc3\xfe\xdbO\x9f>\xfb\x89\x8f\x7f\x84\xb2(\x11\xa0(#&\x96\xd1\xe6\x0f\x05\x14\xc1\x0c.\x5cZ\xc0\x11q\x02!\x82\x0a\x04\x03'PV\xa7\x85L\x9d\x0a\xa8\xd56*\x102\xd0\x02%D\xc3\xcf]\xb9\xfd\xd1w\xed^\xa0_\x94\xe3\x08&\xd1V\xcf\x16\x0d\xa7\xa9*\xca\x98\xa2\x8bc\x0dd\x9b\x1c\xae\x90\x00:I\xed7\x92@di\xa4\x00\x0c<\x06E4\x1c\x96\xd0\xe5h\xaa\xc5[\x0a\x85%\xd4h\x16\x15)\x12\xb1\xdc\xc9$S!P\xc6\xdc\xc5\xabu\xaa,Zm3\xa6\xce\xc0\x1b\xa4\xb2\xc9\x11\x89\xa6\x8f\xa8R\x96\xe7\xadq\x18\xd3\xea^\xeb\xa8\x84D\xb85*\xc7\xe7 $\x7f\x14b\x9a3 \xc4\x08f\xb9\x13\x02\x11K)\x0f\x10$\x19\x16\xb1\xd6\x80\x00-I\xd9\xb0\x9c\xda\x10\x92C\xa1Nm\x15\x9d\xe6\x94\x8f\x05\x183-\x96S\x9f\xa9\xf1f\x10\xa3!V\x7f \x96\xc5\x22\xf5\x8a\xa3X\x9f\xdfT\x12\xb8\xa6m'sJ\xe5$\x8b-J\xb2\xa9F\x09l\x12(,\xd1\xee\x05C\x0d\x06\xd1\xf09B\x80\xb6\xd6\xe9'\xd4\xe5\x16\xf3\x9en\x99\xcf\x22\xebd\x10\x19\xef\xf7\xbey|\xce`\xaaJ\xf0\xd9V\xc7\xb6\x22\xa8w\xb4%\x1d\x18\x5c3\x8d\xbe\xe67S\xfe\xa6\x9a\xf6\xd4\xce\xc4\xa5\xf7\x15\xc7m`\xd4\xe8\x0f>\x0b\xb7\xb2u\xeaq\x22\xf8\x17\xe6\xe6\xf9\xf7\x8d\x7f \x16\x93\xb2'\x1a\xc8\xe4\xd1\xb9\x02r\xb7f\xb3a#k\xdc\xc7\x89\xb5\x1d\xca\xda\x9dy\xfc\xfd;6\xb1}\xf7;\x89\x96t \xb9t\x94Z\x07\xd5sl,\xa0Y|fuF\xaa\xbdc\xb2\x0dK\xb6\x8dV\x03\x14\xa7\xdc\x1c]\xc3\x87`\x0c\x86E\xda\x80&\x9aP\x988\xb7[c\xdegA\xc9]\xa2\x9e\xfc\x17\xaa\x99\xb1\x8a\x12T)B\xc4\xbf\xff\x81\xdd|h\xdf>,\x04\xa2\x82d\x81\x14@G\x1b\xa5\x98W\x8a\x11Z\xa4UB\xc8\xb5^5\xa9X_c\xf2\x81\x8b\x89\xf3\x22$\xc7UO\x88\xa2\xbc\xbey\x09\x7fki\x8d^\x7f\xc8j\x11\xe8d\xc7M\xe4\x9a#\xad2\xe1r?\xd7F\xdd\x87\x86\x18c\xc3\xb6\x02\xa1\x19PSc\xa8r\xeb\xce*\x1e\x81\x85\xfe:N\x8c\xa1\xa5\xb3\xbf\xcby,-\x95\xe3(\xa6:/\x1ay\x0d\xcd\x8a\xc9\x1bQ\xd5\x8a}\xae\x7f2\xd0\xd0\xa0H\xa5j\xc7\xe9\xe0\xea\x05]\x9a_\x5c\x99\x11\x97#\x91\xba\xb9\x18\xd0\x01\x06\x0d\xe4\x15\x9f\xd5b\xce\xf2.\xd8\xd0E%V\xdf\xc8\x9e\xe5\x9d\xb1\x02/\xd1\xd8:\xf3\x8e9\x01\xda[\x1f<\xf4\x0d\x8c\xa9\xa4\xea\xa8\x06\x22\x22*f\x1a,\x8a\x80X4A\xac\x12\xbf`\xa6`bf\x22\xaa\x11\xb0\x181U\xa2\xa5\xfd<\x22b*\x12M0A\xd2IM\xc4D$\x22\xb2\xbe\xf2\xea\x99\x1f\xff\x17\x07v\xb3_\x89\xda\xcfC\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x05\xfa\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x05wIDATx\xda\xbd\x96\x7fh\xd4e\x1c\xc7_\xcf\xf7{\xf7\xbd\xddyssns:u\x9bfNm\x1a\x88\xc3\xd0\x96s\xf8_a4$\xff3\xc3@P\x98\x04\x92\xc5\xb4H\x9b\xce_S\x98\xe2\xef\x90\x08\x84\x880,\x0cZS\x14\xc96\x95\x99\x99cK\x97\xce\xa9\x1b\xcen\xbb\xdf\xbf\x9e\x1e\xbe\x83yz\xf7\xf5\xa6D\x1f\xf8\xf0\xf9\xdeq\xc7\xeb\xfd\xbc\xdf\xcf\xf3\xf0\xb5I)\xf9/\xeb\xa8\x10\x93\x9c\xf0\x89\x0b\xe6I\xc8\x0e\xa9\xb6\xc3\x03\xe0[\x1d\xbe|[\xca[$\x94\x8d\x84\x12B\x14\x03\xaa\x9f\xbf\xc6\x80m=\xd4\xbaa\xc1\xe4\x8a\x0a[\xf1\x92%\xb8\xa7N%\x06x;;\xc7v\x9f>=\xf3\xd6\x85\x0b\xb5;\x85\xf8\xde\x07\xef|*e<I\x00\x0a\x1e\x0a\x85\x9a\x83\xc1\x10\x08\x10\x88\xa7\xa6\xa9rh&<\xc7c1~\xa8\xaa\xc2\xdb\xd6FyM\x0d\xd9\xa5\xa5\xe8\x00\xc1\xa09GO\x9c\xc8\x8cU\xab(\x98?_\xfc\xd6\xd8\xb84#\x1c>\xd3,\xc4\xe2J)\xa3O\x0b@\xc1\x19\xf4z\xd14\x81\x10I\xfdX\x84j\xc0\x9c\xe7V\xaed\xb0\xa5\x859\xcb\x97\xe3\xccp\x10\xec\xb9\x83=\x1eCJ\x901\x88)\x818\x1c\xd8T\xcf\xa9\xae\xe6\xf2\x89\x13\xaf_\x93\xf2\xc4b!\x96%\x09\x10\x82\x04\xb8fNM{\x0cK\x04\x0b\xc0\xd3\xde\xce\xdd\x93')\x9eT\x08\x8e8\x8e\x05\xe5\xe4\xcc*\xe3\x8f]\x9b\xc8\xd6$\x01\x7f\x84\x90p\xf2\xca\xc7\x9f\xd3\xdb\xdaJ\x7fG\x07\x93\x0a\x0b\xb9\xd1\xdd\xfdV\x0d\xbc\x9c$\x80!p\xb2\x03\x00)\x9e[\xeb\xeb\x19%%\xeel\x17\xc1\xd0\x03F\xcf\x98\x89\xab\xb0\x88\x19\xebji\xda\xb0\x02\xb7r\xa4\xfc\xb3\xa38U\x0c9\x910\xd7v\xefb\x94\xd3I\x16\x18%\xb09\xd9\x01Dj\xfb\x01\x9e\xfc\xac\x80!z\xcf\x9f'\x1f\x10\x8f:0z{8\xb3\xeeM*\x1aN\x92=y\x1ao\xd4\x1d1\x7f\x9b9y\x0a\x9e[\xed\xfc\xf8\xdeR\xc6\xdd\xbeK\xbc\xc7\xc7(\xe0\x1fx-U\x04V\xd9\x9b\x93D\x01\xc1 \x9a\xd7\x8b\xae\x81\xe1\x968\xed^\x8a\xec\x7f\xf2\xcd\x07\x15T\x1fl&\xa7x&\x00\xfd7\xaf\xf1\xf5\x8a*^5zq\x8d\x05\xbf\x1f\x1c}\x00\xb8-\x22\x10\xe9E\x00\x86\xdd\x8ef\xb3\xa1E\x876\x1b\x1a\xf8b\xa8\xef$\x9a\xcep\xc5\xe2\x12\x87K2:\x0f4\x07\xc4\x02j\xf6\x81\x1b\x1c)\x22`d\x02T\xc7\xe3q\x5c\x86\x81\xf0A\xc4\x0f\x8f\x06\xc0c\xe4S\xbd\xff\x17\xb2'\x95\xe2\xef\xbdA,\x06y/\x95\xf1\xee\xe1&\xcen\xa8bjv\x1fR\xc3l=\x8e\xffE\x1c0\x9b!\x01dL\x9f\x8e\xbcp\x81\x81\x87\x10\x1d?\x9a\x8a\x9dMd\x17\xcd\xc4\xd7s\x9d\xd6\xbd\x8b\xd1\x9d\x82\xb9k\x9a\xc9\x9a\x5c\xc6\xa2\xadM\x5cZ\xbf\x90\x88R\xaa\xfeJ?\x5c}\x11\x07\x86\xdb\xe9t2f\xedZ|\x17/\x12\x0e\xc6\x88\xde\x19TG\xed\x1c*\x15\xda\xea\xaa\xc8\xcbR\x99;\xe0\xafC\x95LY\xd5\xcc`\xc7YB\x7f\x0f\x12\xee\x03\x1d\xe4\xcfpx\xa4\x0e\xa4\x14\xe6v\xbb\x99\xb6p!\xbf\xce\x99\x83\xfb\xf2ed\x9f\xe4\xea\xba5\xdc.\xcd\xc4p\x0f\x10\x0cC<\x00\xba\xed>7w\xcc\xe7\xd1\xef\x1e\xa27\xc0\x1e\x81.h\xf9\x09.&;`\xd5\x16\xc2rrr\x18SW\x87\x7f\xf5j2\xbb\xba\x18\xec\x95\xf4{\x07\xb0\xe7\x83?\x17tch\x83\x06\xba<\x88\x87\xe0\x0aC/\xdc\xfc\x10\xd6\x03\xf7\x92\x1d\x18\x86\xa5o\x00\xc30\x985{6m\x8d\x8dx6n$\xff\xca\x15\xbc^\x08\xa8\xf6w\x810@\xc6\x87\xc0\x99*\x9bKD;>\x82\x9a \x5c\x91R\x06RF`v\x1ax\xa2\xd8\xdc\xdc\x5c\xe6\x96\x97s\xbd\xa1\x81\xf63g\xc8=u\x8a\xac\xceND \x80M\xb7\x11\xcf\xb0s\x7fF\x11w\x97-\xa3\xa6\xb6\xb6\x11hU\xf0A\x80gnB\xac\xe1O\x88\xd04\xcd\x8cb\xde\xbcyLTW\xee\xdd\xcaJ\xba\xbb\xbb\xf1\xf9|\xb8\x5c.s\xb3\x16\xe6\xe5QRR\x02\xb5\xb5\xed\x0a\xde\x97\xf4>`\xb5\x0f\xb0X9\x90$$##\xc3\x84\x14\x14\x14PVV\x86\xae\xeb\x84\xc3a\x14pX\x08\x10\x02\xb0\x12\x90l?XB\xd5L)D\xc1\xccNWV\x11<\x86\xa6\xb6>\xad\x88\xc4\x92R&*O#\xe0i0\xa4\x85&\xc3M\xa8yK&N\x00\xa1\xe9#\x12\x90\x08N\x09\xc3Z\x98\xf9\xf6\xa3\xa0f\xf6\x91H\x04\xf5\xd9l\x00_8\xea\x05\x02#v \xa9\xd2\xb8\xa1@&T\xbdW\x9a'\xc0\xe3\xf1\xa8\xe9\x07!\xc8\xcc\x1a\x13\xd9Z\xbfe/\xd0\x92v\x13\x0a+\xa0u\x99\xab\x8eF\xa3\xe6;\xc2\xbd{\xf7y\xf0\xe0>\x0eG\x06\xf9\xe3\xf2\xd15]n\xd9\xb6\xed\xc0\xf1#\xc76\xa98\xe2\xcf\x12\x90\x0a\x9e&\x82\xc7\x02\xd4\xca\xcdU\xf7\xf4\xf4PY\xb9H9\x125\xc5|\xb1c\xfb\xf1\xa3\x07\x0f\xac\x1b\x86[\x0a\xb0\x82\xa7qC\xc1\x87\xed\x7f\xf8\xb0\x1f\xc3p\x0c\xc3w\xec\xdd\xf3\xdd\xa1\xc6}\xefKU\xc0\xc8O\xc1\xf3\x94\x94\xd2\xb4\xdf\xeb\xf5200\xc0\x84\x09\x13\x86\xe1\x8d\xbb\x1b\xaaS\xc0\xd3;\xc0\x88\xa3H\x8c L\x1c\xb0\x1bv\xb9e{\xfd\xf1\xc3\xfb\xf6'\xad\xfc\xb9\x8e\xe1H\xc5)\x06\xaaM\xdb\xf3\xc6\x8d\x8fn\xdeV\xbf\xff\xcb\xa1\xcc\xe5s\xdf\x84\xba\xae\x0f\x9d\x84\xa4\xcc\xad/\x1d\xa1i\xa0\xd9\x88h\x86w\xe7\xf6\xba=_\x1d3w\xbb|\x91\xab\xb8K\x09\xa8\xe4\xc5\xcb\x0f\xb4X\xc0\xd3\x0b\x90Rv\x01\xaa\xff\xbf\xfa\x17\x1ej[\x84\xf4\xbf\xc4\xa8\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03\x88\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x03\x05IDATX\x85\xbd\xd7Kh]U\x14\xc6\xf1\xdf\x92\xf4!\xd1\xd6\xd6\xa6\xb6\xd6\xa2`\x06\x95\xd4G'\x828\xb0C':\xab\x04,\xbe\xc0\x81\x95\xaa\x93\x8eUD\x07\x82\x8a\x95\xcc\x9c\x14A\x1d8q\xe6\xa0\x22\x86ZD\x05\x05\x15c)\xb4\xda\x16\xd1\xabIS[LA\xb3\x1c\xecs\x9a\x93\x9b\xd7\xcd\xcdM\x17\x1c\xb8\xfb\x9c\xfd\xf8\xaf\xef[{\x9fs#3u\x1b\x11\xd1\x07\x99\xf9o\xb7s\x5c\xd3\xf5\xea%^\xc5\xcb+\x99`\xa5\x00[q\xdd\xaa\x01D\xc4R\x93\x0f\xa0\x7fU\x00\x22b#\x8eE\xc4b\x90\x03VQ\x81'q7\x1e\xea\x04 \x22\x86:P\xac3\x80\x88\x08\xec\xc3E\xbc\xb0\x04@m\xc1\xfb\xb8\xb7'\x00x\x10\x9f\xe1=\xec\x8d\x88{\xe6\x81\x5c\xafd_g}=nX.\x80\xcc\x9cs\xe1c\xdc\x82!$\x8e\xcc\xd3gg\xf5\xec\xa7\xaa\xfd\xa7b[?\xbe\x9fo\xde\xf9\xae9\x0aD\xc4 \xa62\xf3lf\xfe\x88\xcf1\x1c\x11\xdb\xda\xba\x0eT\x8b\xd6\x0al\xa8\x14\xd8\x8c\xdd\x11\xb1\xa1\x13\x01\xe6\xb3\xe0Y\x8c4\xda#X\x8b\x03m\xfd\xb6\xe24\xfa+;\xd6`#65\x00\x97\x8c\xbef#\x22\xfa\xb1\x1f\x9b\x22\xe2\xa9\xb6>\xcfD\xc4k\x999\xd5X\xe0\x94\xb2S\xealk\x05` \x22&\xb1-3\x7f\xe8\x08\x00\x8f\xe1\xb0R|\xcd\xf8\x0b\xcfUp\xef6\x00\xce(*ni\x004\x15\xd8\x85\xa7q\xffB\x00W,\x88\x8858\x88\x91\xcc<\xdd\xbc\xf0&\xa6\xcd\xde\x92u\x0d\x5c\xc4\xf6\xea^\xd3\x82-\xd8\x81\xdb\x16Z|\x16\x00\x1e\xc7\xa9\xcc\x1co\xef\x94\x99\xbf\xe0+\x0cE\xc4#\x0d\x80V\x05ps\x058\xcb\x02e'm\x8f\x88u\x8b\x02D\xc4\x13x\x1d{\x22b\x7f{\xa7\xea^\x9d\xe5\xe1\x0a\xa2]\x81\xdf\xcd(0Q=\xdf\x81\xc0\xad\x0b\x01D\xb7\xdf\x03\x11q\x1c\x87\xf06\x8e\xe1\xbej\xd1O\xb0\x07?\xe3\xcej\xf1G+\xb8\xf1\xcc<\xda\x9c\xa7/\x22\x861\xdc\x05\xc3n%\xebZ\x813\x18T,8aF\x81/\x94:8\x80\x7fp4\x22\xde\xc0\xed\xf8\xb0\x0f\xdfa\xca\xf2\xe3\xad\xcc<\x19\x11u\x0d|\xa3d\xb9Y9\xbc\xee\xc2\x8d\xf8\xb2\xfa=\x88\xf3\x11\xf1\x12\xf6\xe2\x15\x8c\xf5e\xe6\x18\xc6\xba\x00\xa8\xe3\x92\xb2\xdd\xc6\xabDv*\x0a\xdc\x81?p\x12\xcf\xe3[\x8c\xe2E<\x90\x99\xa3\xcc=\x07\xba\x89Z\x81\x0b8\xaf\xc8}\x02\xebpN9-o\xc2\x11\xbc\xa3\xbc'F\xeb\xc1+\xfd$\xab\x01\xae\xad\x00&\xb1\x1e\xbf\xe22\xceV\x00p<3\x7f\xcb\xcc\x0f\x9a\x83{\x05\x00\x7f+\x0aLW -\x9c\xcb\xcc\x16>R\x8aqN\xf4\xca\x02f\x14\x98\xcc\xcc\xe9\x88h)\x16\xc8\xcc}\x0b\x0d\xee\xa5\x02u\x0dLT\xed\x96b\xc1\xa2\xb1\x1a\x0a\xd4\x00\x0f\xe3\xbf\xab\x09P\xd7\xc0\x04d\xe6\xe5N\x06\xf7\xda\x82\xa6\x02\x1dE\xaf\x15\xf8Z9\x9e\xaf:\xc0\xa5\xcc\x9c\xc6\xa7\xcb\x1d\xdc+\x0b.t;\xb8\xeb\xd7\xf1\x95\x09\xca\x9f\x98\xb5\x9d\x16]{\xfc\x0f\x8a\x1e.\x8f\xba\xdd\x8b\xb9\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x07\xce\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x07KIDATx\xda\xc5W[l\x9cG\x19=\xdf\xfc\xd7]\xef\xae\xed\xf55Y\xdfs\xb7\x93@\xd3\x5ch\xd2\xd2\x14$\xc2\xfd\x22\xd4\x08\x95\x07x\xe1\xa2\xbe\x10\xd4\xc2\x03\x12*\x95\x0a\x82\x16\x91\x8a\x07\x10\xd0\x07T(T\x89D\x1f@\x82 U\x80B\x15\x10\xa9\xdb&!W\x12\xe3\xfa\x12g\xbd\xeb\xb5w\xff\xfd\xef3\xc3\xfc\xeb\xd4Q\x12\xdb\x84\x12\xa9guvf\xb5;s\xcew\xbeY\xe9\x1f\x92R\xe2\xff\x01\x81H\xe2\xedor\xc7\x06\x0e\x1e%\xad\xb2f\xfd\x039\xab\xf5\xe1\x1e\xab\x7fo!\xd5\xbfvm\xaa\xbf\xad\xc5\xc8\xd3|XZ\x98\x0d\xa6f\xae\x05\x93\xff(\x05\x13\xbf\xd6\xa4\xf7\xf2O\xee=\x19\xdd\x15\x03;_-\xa4m\xdf\xfc\x8aI\xc6c\xdb[v\xb6\x0df\x87\xd0n\xe7\x916l\xd8\x9a\x09\x8e\x08\x95\xb0\x82RT\x82\xe3\xfb\xf0\x03\x17c\xce\xa9y\x08\xf1\xc3\xb8)~\xe6\xc8\xf0\x19\x07\xb7\xe0\xf0\xa5G{6d\xdf\xdd\xfd\xd1\xee/\x9e\x5c\xd5\xc0\xae\xe3}\x9f\xb1\xa0\xfd`O\xfb\x83k\xb6\xe7\x87\x11P\x15\x91\x0c\xb1\x1at2\x00n\xe2\xd4\xdc(\x16\xdc\x89\x22\x11\xbe\xf6\x8b\x9d\xa3\xcf\xe3:\xbe4\xba\xe7[\x9bZ\xf6=\x91\x86^\xf9\xf2\xd0\xd3\xf9e\x0d\x10\x88\xbd\xebwk\x9f.\xb4\xf5<\xf6P\xe1}\x10\xe4\x82K\xfe\xbf\x9e\x0d\xccE\x0b\xb8V\xfd\x17$\x0f\x9e\xf5\xc7\xd6=\x9e]7\xf6\xcd\x8c\xd1\xfd\xc4H\xfb}x}\xe6\xb7\xf8\xd1='H_\xeePmz\xb1\xf3\xa5\x8d=[>>\xd25\x82rT\xc4\xdb\x85\x06\x03\xeb\x9awb\xc1/\x1d*\x0d]\xf8\x90\xadwojk\x1aB)\x9c]2y[\x02\x1b~\xdevx\xf3\xdam\x876\x0f\xac\x87\xc7]\xac\x06[K\x81K\x89X\x04\x90\xb8\xb1\x0f\x03\x81\x96*\x224iM\xd0\xa4\x0e\xc1\x04jQ\x15\x19=\x8b\xb9\xda\x1bX\xb3\xe3o7'0\xf0\xb3\xfcg{\x9az\x0e\x15\x0a\x05\xcc\x06\xe5\xdb+\x22\x0d\x0b\xb1\x87qw\x1a\xa5\xa0\x84Io\xa6\xd1\x9av3\x8fV+\x8b\x1e\xab\x0b\x85T'R\xcc\x04\x11\xe1-O\x01W\x06%\x96L\xead\x81\xcb[\xfe\x05\x85\x9f\x16R\x96\x17\x8e\xdd\x7f\xdf\xfd]\xae\xee\xe1V$\x8bN\xd7.\xe0\xacb\x02F\x84\x9c\x96i\x88\xd4\xb8\xa3D\x0dX\xa4#\xad\xa7\xb0-\xbb\x11\x9dv\x07\x08K\x1e \xaf\xcb'z\x19#\x8b0\xb8\x82\xea\xe5\xbe\x1b\x09\xc4\xe5\xdaS\xc3\x03[\xbbJ4\x0f\xc9on\x0b\x07\xc3\x89\xf2+p\x94P\x07o\x81<\x1b\xa1#P\xa3\x17:\x9ae\x08\xbb\xd5\xca\xd5\x9a\x00\xa3\xcf\x82/=\x5cr.\xa1\x12T0\x90\xee\x87\xa6i7\x89s\x08X<\x82\x90\xc0H\xc7,\x18\x14\xe8 i\xa6c~\xc1\xe8\xb1\xe1D\x1e\xea\x91\xbf\xc40\xe6x\xa5\xfcw\x842@v\xc2B\xee\xaf\x86\xdf\xeaY\xcfhF\xb4\xed\xdcw\x8b\xd93ON5\x0b\xae\x0f\xa6+\xe6\x93\x99\x7f\x9ano\xdc\x85\x0c\x99\xf0\xe2*\xa6\xfcq\xc4q\xd4`\xa4\x18r\xc58F\xa0F\x90\xc4t\xb6\xb6\xd8\x82\xce\xa72\x1f\xeco\xe9\xfb\xbd\xdcn\xde\xdas9\x11L\xcb\xab\xc1\x8c\xd8\x12\xf4\xf2\xe8\xa2{\xce\xb6\xcd\x03\xa7\x0fM\x17\xb1\x0c\xf6<70\xc88\x9dh\xde\x9c\xe9\xaa\xc9\x1a\x04$\xf2\x86:\x13z\x06\x22\xa9\xfez\x0293\x03\x8b\xca0\xa5Xl\x01\xd7\xe4'\xecnK\xceqO\x02H\xc8\x13\xfa2\x8c\x94x\xd4\xa7w\x06\xf2Z8\x9f\xea\xc6\xfeS\x9f\x9f^\xc0\x0a\xd02\xe2\xd1\xf6\xd6|W\xc4B\xa4\xa4\x0e\x92\x1a4\x02\x84\x88\x95\xb8@\xac\xc8\xa1(B\x84\x9e@\xbd\xddG\xc3\x80\x8c\xe4\xb0\xc8\x90\xe7\x8b0\x11\x0e\x15\x03E\xb7\x18\x16]\x00~{\x90v\xddf\xef\xfb\xa7>]ZQ\xfc\x81c\x03_\xef\xe8k{\x9c\x91\x04\x13\x1a\xb2\xd4\x0e\xc6\xccDT\x91\x83+J\x09\xf8\xd2\x05\xe2\x08\x99(F\xb6X_4`\x93e9pJ\x9e\xf0\xc3DP\xd1#\x90\xebJ\xcfk\xd2-/\xf6#\x8fl\x9c\xc0\x0a\xf8\xdc\x99\x87z\x87\xba\x0b\xdf3b\x80%\x95\x93\xd6\x88;\x96\x00'B\xc0\x95\x09\x11\xa2\xce\x1d\x84\xe0h\xd2\xb2\x88<Y\xac{\xbb\x16\x0dd\x0c\x0bspf\x03\x1e\x04o\x19\x10R\xf8\x8a^\xb7\xd5\xe2;\xd3~\xf5\xca\x99r\x05\x1f\xc1\xb2H3t:uY\x9d\xa8\x96\xd5\xce\x04\x8aI \x82$\x0e\xa8wR#) G\x16\x18#\x80\xc5\x13\xaa\xed\x07\x8f<|\x04:\x1a\x10s^\xec\xb2\x1a\x0b\x02\x02\xf9D\xa4\xc4\xb9\x9f\x98a$\x03!\xe1`\x18+\xe2\xc7\xc3\x7fz\x15@\xf3M-\xf9U?\x8e?2\x8e\xff\x86\x86\x81j\x10\x8e+w\xb9\xb2\xac-& \x91\x8c\x8d\xf9\xd5`!\xd8\x9c\xef\x0e6\xeaZ\x0e\xc0\x02\xee\x00\xefy\xa1\x07b\x81\xe9\xbb\x9f\xed\x95BJq\xf2\xab\x93rU\x03N\xe8\xbe\xd9Kk\xba\xc7\xe4\xd2\x01l\x8c\x09\x9d(\x08r\xcdv\xe8T\xeb}\x00N\xe3\x0e\xc0=b\x83\xeb\xdb\xb4+\xa9\x127#\x9d\xed\xfb\xc3\xa04#M\x9a\xb1\x8ec\x9f:/o3@&\xce\xa5|\xdd\x82\x95\x08'\xbcaD'\x16:\x96\x17f\xac\xd4\xc0=\x7f\xee\xbc\xf8\xda\xfeb\x80U\xb0\xf7\x97\xeb\x91\x13)\xebM}N\x5cvK\x88U\x06\x91\xe0\xd2\xffX\xbcl\x0a\x0c\x0949\xaa\xd5\xb4\x0a\x80\x84\xf3\xd7YU\xe2U\x93\xb4\xda\x85Z\xb1V\xe8l\xa9\xeb\xc2\xdc\xb2\xfb/k\xb4U{Z\x87^\xd8\xd2\x82I1\xcf,\xd2I\xad\x87\xff\xe1X\xae\xfaHF \x0c}\xbb\xfd\x11{\x97\x8e\xb3\xd1L\x04 \xd6@\xa1\xc9\xf4\xc8bZd\x90\x16\xe5\x8dt\xc4\x04\xdc~;?_\x0d\xfc\xa2\x88dx\xe2\xc0\xb8lT\xfd\xd2 \xa4'(\x17\xa7\xa9w}+\x1d\xf7/1U9\x8b\x92\xea%\x8f\x93\x04J\x07\xear\xc53 \xd5\xab)\xb6^\x7f\xaf\xb3a\xf0\xac5\x13\x13\x10\x1bL\x8b\x13a\xc5\xd8T\xf3P\xc4\xc9\xdcK\xa5\xcdh!\xf4R[;z\x8dO\xfeq+g!\xf1\x91\x965\xe2t0)\x9b6\x19b\xd4\x1f\x97\x16\xd3\x85\xa6\x0c0\xc9A\xa2\x91\xf3\x8aX\xfa\xcaEx\xfe\xc2\x85k\x95\xdd\xf6\x80\xa3\xa2o\xd0 VW\xa2\x0d\x9aJ\x5c\x19\x09\xc6\xab\xe5\x90k2>\xefN\xf3t\x97\xc5\xad\x1eC\x8cYEi\xf5\xe8r6\xaaa\xf4\xc1)yj\xffU\xa9~+LRd\x9aT\xeb\xef\xec\xa98\xf3\x1d+\xbf\xa3\xaf\xafwfm-\x9e\x8d\x16\xb8\xaa\x84[\xa4\xc8\x12\xaa$H\x8f\xd4\x86\xbe2\x16\xe9\xa4q\x9dH\x10\x91$\x90Lpl\xef\xc5ec^\xf7r\x1b\xedh\xed\xc7\xd1\x1d\xa3r\xc5\x04\x128\xdf\x08\xe6NN\x8dO\x8d\xc4]nNO\xb9:\x98\xa7S\x83JT\x0b\x0c\xc6BU\x952\xd10%,f\xc8\x143\x84\xcd\xf4\x15\xc5\x13\x5c~\x7fYz\x93\xfe\x9d\xdf\x0b\xb2\x87\x8d\xd6\x03\xf7n5\xa6XE\x94\xc3z\x22 \x12A{1\x89H%!\x92\x88U\x12R#\x86\x17w\xbdq\xf7oF\x99\xe7R\xd6\xbe\x0d\x03f\xbe5%\xceWf\x92\x83%mf\xf0\x86\x19\xd2D\xf2\xd9`\x1a^\xd8\xf9\xda\xd2\x06w\xfdj\xd6z\xd4fm\xad\x19-\x04\xc7\x07\x0a\x9b\xe4\xbf+s\xd2$&\x93\xc8\x7f\xb3\xe7\xcc*\xc2w\xf1nHG\x09\xf9\x5c\x8a\xda\xed\x0c\xf6t\xf6\xe1\xf9-'\x97\x16\xdc5\x03\xef$\x18\xdea\xfc\x07;\x00\xdf\x09\xb1\xdb|\x84\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04y\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x04\x00\x00\x00\xd9s\xb2\x7f\x00\x00\x00\x02sBIT\x08\x08U\xecF\x04\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x03\xf8IDATx\xda}\x95Oh\x5cU\x14\xc6\x7f\xf7\xcdd&3\xc9\xa4\x9a\xaad`R\xff \xd8 n\x84:\x82\x88\xb5\xcd\xaa\x8b\xb4 \xf8\x17\xf7\xba\x10,.\xc4\x12#\x86\xa0\xb8\x90\xb8S\xec\xc2\x95\x9aV\x10\xdbE7\x9aP]\x08!\x0b\x11)\x01\x89\xb808\xa1\xa1\x7f\xd2\x99Lf&\xf3\xee\xf5\xf3p\x99\x10Gr>\x0eo\x0e\xef\x9c\xef|\xef\xce{\xe7\xb8@\xbf}\x98\xf7'\xdd\x14\x13\x94\x05\xa8\x09\xab\xe1r\xb2\xf8N\x9b>\xeb#\x98+\x87\x19^\xf1\xa5\x80'\x10\x93H\x90\xd7\xf9\xd2\xcdN\xd7\x0e \x98\x1d\x0c\xd3\x9c\xf5\xc5.\xbb\xb4\xe9\x90\xe2\x81\x84\x0c9\xf2\x0c\x90%i2\xef\xe6fZ\xffK\xf0\xdeX\xf8.TSZl\x93P\xa6B\x89!PTg\x9d\x1a^\xd1\xa0\xc8\xdc\xb2;\xf3\xfeF\x1f\xc1\xf4c\xe1\x8a\xafth\x8a\xe0\x18\x8fp\x88A\xeb\x09)\x1da\x87_Ya\x90\x229\x92uwj\xee\xb7}\x04\xe7\xc6\xc2\x8a\xaf\xb4\xb8\xc3(\xc79\xc2]\xe4q\x06\x08\xa4BWJ6\xb8\xc2MFD#\x8ac\x1fl\xf4\x08\xde\x1e\x0cW}U\xd2y\x5c\xdd\xc7(\x90%#8!\x00^\xd8\xa5C[\x0d~f\xc5\x1e%Yv\xc7?jA\x02\x90N\xfbj\x9b-\x95W\x19gX\xb7\xf3B\xce\x5c\xbf\xcd\x0b\x0c\x09w\xf3\x0cO(\xb3\x8d\xaf\xa6\xd3Q\xc1[\xe5\xb0\xd6-6(\xf1\x12G\xe2\x93gH\xac?f\xc1\xd05\x15;Rq^>L\xb6\xe9\x1e\xfe\xb8\x96@:\xd3-\xb6h1IE\xe5\xb9H \x8a\x1e2\x86\x01\xd3UP\xe9s\xb4\x84n1\x9d\x01\xf7F>l\xee\x96\xb6x\x8ag\x19SyV\x89_\xa8OK\xd7\xb3`v\x8e\x86\xe2<\x9f\xd0\xa5\xad{\xdb,\xb1\xc8!\x06\xea\xee\xde\xc4\x9fHK\x1d%O0\x1a;9\x82\x0anq\x03\xa2y\xb6\xb8\xce\x06\x89\xe9\xc8\x09O\xea\xda!-\xf9\x13Iz:\x15\xeb\xb8\xfdqY\x94\x22\xdf\xa5\xc5\x8e\x10M\xf16\x0d\xb9#!+\xe4(\xf2\xa0\xaaR\xd2\xd3\x89\x9f\x10\x01\x0f\x10\x94\x10\xec\xe80\xa1My\xb4xtM\xc0\x09\x19;\xa3\xa3F\xe0'\xb2\xbe\x9c\xaa`D\x92\x1c)\x01\xd0\xd5\x08\x02\xd1LQ\x03G$0\x9d\xf7\xa8*\xc5\x95E\xe0I\x19F\xf2c\x7f\xf8\x85un2N4\x91\xdd\xe6:\x09D\x8aD\x18E\xfd\xf1\xe5l@\x8870\x87\x06\x7f\xf3;-\x88V\xa7\xc1\x0d\x0ec\xd6S\x12\x0cI\xa8\x05\x1c\x0d\x0b0\xc7d\x96\xe4@\x7f\x1c\xf3nc\x14\xb5\xc4\xffK\xa0\xb0\x8b\x17\x8c\xc6\xceyDn\xb6?\xb6\x0c/l\x1a\x81\xaf%a\x15q\xff\xc9\xae=Sj\x099\x0a\x94\xe4\xd1z\xb1\x95\xc7\xbcUS\x14V\x93p)\xe8\xe7\x9a\xfd)\xddH\x90\xa7\xc8\x88<Z/6\x82\x98wMU\x8a.%a\x89z\x86\x1dVh\xd2\xa1+x\x15\x0cS\x92G\xeb\xc5\x92l\x19\x1d~\xa2)\x02\xeaa\xc9\x05^\xfe4\xbc\xa6\x09\xc8\x9b<\xc4\x10\x85\xf8F:\xfek*\x8f\xdfB\x8dw\xed\xd3r\x9f}\xf5z\x02a64\xc5\xc6\x02wh\xd1\x11$\x11O\xd8_l\xd2uW9\x9f\x13\xd0\x034\xc3,$\xf0u\x8d\xf9\x8c\xf8\xfe\xe2*\x0d\xf1\xb7\x05\x1d\xa9\xc1\x1bR\x83&\xb5\xf5\xff\x81?\xc8\x8b\x80yU\x92\x05`\x8e\xc9L\xb5\xc0\xf78&\xb1\x82}C-NE\xd4]\xe5\xdfR@\xe5\xcb\xaa\xda\x1b\xaa/j\xa8\x06\x9b\xc9\xf7\xf3*\x15r\x82\xbd\xdc\x00H\x85\x95or\x9e5\x9b\xcbNCuao\xa8\x1a\x85\xc6z\xa8\xe8\x90\xf0L\xf14\xc58\x95\xa4 j\xfa\x91opv\xc4*?\xb5\xb0o\xac\x9b\xbd0\x86\x16\x8b7\xa1C\x1c\xe5Q\xee\xe30\x8e[\xea|Mh\xd8\xc0\x13\xe52g.\xf4/\x16\xa3\xb0\xd5\x16\x8avd\xf6F\x04\x88cd\x00;\x13[m\x17Z\x07,\xd7\xe7m\xb9\x86\xd2\xde\xc7\xe5\xcc\x05[\xae\x17\x0fZ\xae=\x92|8\xc9\x94\x9b\x08q\xbd\xbbZX\xe5\xb2[\xbc\xd8\xa6\xcf\xfe\x01\x04\xa8\xe5\xf1\xf0\xc2\xe8\x8a\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x07\xc1\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x07tIME\x07\xdb\x05\x1b\x00/\x07w\xad\xb4l\x00\x00\x07AIDATX\xc3\xbd\x97kpU\xd5\x15\xc7\x7f{\x9fs\xee#OB\x1eD%\x90\x84\xf0\x08\xa1@\xc0\xd1\xc4\x947\xa6L\x91\x19F\xedT\xd0\xa9C5\xd2Z\xa6\xd3\xb1\x1d\xfa\xc12\x8aX\xc7\x19\xa7\x9dQ\x99*\xb6\xa0-\xa1\x8e\xd62N\x19Z\xa0P\x98N\xe1\x03#B\x1b\xd2\x87\xd4\x84\x0b)\xc1\x90'\x09\xb9\xb9\xb9g\xef\xd5\x0f\xe7&\x04\x84\x00U\xbb\xbe\x9c}\xce\xdeg\xef\xb5\xd7\xff\xff_ko\xf8?X~~\xfeu\xfb\x14\xc0\x92%K\xbc\x03\x07\x0e\xe4\x03\xde\xe7\xbc\xb6\x02\xba7o\xde\xdc\xbdn\xdd\xbaQ\x07N\x93/\xc8:::\xfe>\xb4\xd1k\x99\x9bz\xa6\x03|\xf8q;\xbd\xc6C\xac\xc5\x1a\xc1\xf5<\x10\x8bR\x1a\xc1\x82\x0c\xedK\x0d\xb7\x05\xc1X\x83\x12A$\xf8h\x81\xbb\xc7k|\xabioo\xf7\x01\x0d\x98\xd1\x1c\x00 \x1c\x8e\xf0\xfe\xdb;\x984\xad\x82\xb2\xa938|p7\xc6\xf8\xb4\xb6\x9c\xa1\xb8l*ZkD\x84\xb6\xf3\xadD\xa3Q\xa2i\xe9\xb4\x7f\xd2J\xcd\xe2e\xe4\xe6\x15\xa0\x94\x06`\xd0\x08Z;h@k=j\xe8\xafp`p0A\xed\xca\x87\xb1bI\x1a\x9f\xaa\xc5\xcbq\x1d\x07\xad5\xbe\xf1A@\xa1p\x5c\x07D\x10\x04\xa5\x1d\x06\x13\x03$\x8da(,F@)\x85R7&\xc9\x15\x0e\x94\xe6@4\xa20Va\x8c\xc2s\x15Z\x03J\x00\x8dH\x10}\xa5\xc0Z\xc1\xd1\x1a\x94\xa0u\x14k-\xbe\xef#\xd6\x22\x02\xbe\x0d\x9c\x18\x82\xe5\xa6\x1cp\xbc0/o\xfe\x19UU\xd5TV\xce\xe6\x97\xf5\xbf&+;\x9b\xf6\x0b\x17\x18\x18\x18\xa0\xa6\xa6\x86#G\x8e\xe0\xb8.s*+illd``\x00+BNN\x0e\x8b\x16.$77\xf7\x0a\x09\xdc\x08\x82\xa1 \xcd\x15\x91\x0fz\xba:H\x8bF1\xc6`\xc5\x10\x0e\x87\x11\xc0\xd5.h\xcd\xc5\x9e\x1e\xb2\xc6\x8c\x01\x11\xac\xf1\xd1\x8e\x03\xca\x01\xb1\x80\xe0'}|?\x80\xc27\xc1\xec\xe7\xcf\x9fo\x98<yr\xe5M\x91\xb0\xa9\x1b\xfa:,b\x05k,\xa1\xb0`\xad\x05e\x10\x11\x14!\xb8\xd0\x1f\xb8\xad\xc0u\x04k\x13\x18k\xb1\xc6\x80\x5c\xe6@M\xb1\x87o\xd5\xadq \x14\x0a\xb3\xeb\xcd\xad\x14\x97N\xe6\xce\xea\x05\xec\xde\xb9\x9d\xc4\xc0\x00\xd1\xb4t\xb2\xc6\x8c\xa5\xbf\xf7\x22\x91\xb44\xbcP\x88\xb3\xb1fl\xd2'-3\x13%\xf0\x95\x95_\xbf\x1cO\xcbh\xd2\xbf>\x04\xc7\x9b;\xb8d\xc2\x00\xf8~\x12\xcf\xf5p\x5c\x0f+\x16D\x02R\x01\x88\xe0z.\xd6XTj\x8a\xc4`\x02\x11\x1b\xac/PS\x1c\xc2\xb7p\xee\xdc\xb9\x9b\x87\xa04G\x91\x1eu\xb0\xd6b\xc5\xc3u]\x1cG\xa3\x94\xc6Z!\x99\x1c\x1c\xf6Y\x91\xbc\xec\x10\xa0\x94{y;#TpK\x10x\xa1\x08ol}\x93\xf2\xf2r\xaa\xaa\xaax\xebW\xf5degs&\x16\xa3\xa8h\x02\xf7\xdd\xb7<\xe0\x82R\xc1\xe42\xb4\xf8\xe5\xf6\xd5vK*\xe8\xed\xe9&\x1a\x0d\x07$\x14K(\x14\x0e\x068A\xe2I&\x12#v\xac\x86\xb3\xf2P[\x13$\x09\x11!\x19d#ZZZ\x1a\xca\xca\xcanR\x05]\x96K\x9d`}\x83o|\xc2!\x8dEp\x1d\x17k-\x92\xcaI\xdaq\x82* A\xb8\xadX\xb4\x93\xda\xa9\x18\x06\x93>\xf3&E0V\xdd\x10\x86O\xa9\xe0\x17\xaf\xfe\x94\xb9\xd5\x0b(\x9f1\x8b\xdf\xef\xdcN$-\x9d\xd6\xb31\x0a\xef(\xa2\xbb\xbb\x93\xb2\xa9\x15\x9c<v\x94\xec\xb1\xb9\x8c\xcd\xcb'\xde\xdfGn^!\x9d\x1dm\xf4vw16\x7f\x1c\x15s\xef\xa1\xbb+LvN\xde\xad\xa9\xe0D\xac\x93>?\x8c\xb5\x06\xb1\x16\xcf\xf5PZ\xa3RUO;.\xc6\xf8\xb8\x8e;\x9cr\x83\xfa\xa3\xd0J\xa1\xb4\xc6\x18\x9fd\xd2\xf0\xe5\xb2t\xac\xb5\xc4b\xb1\x86\xd2\xd2\xd2\x9b\x83\xa0,\xc7%##\x84\xef\xfbX+x\xae\x8b\x00\xda\xd5\x80\x93\x12\xb8\x9b\xaa\xae\xa3\x07U\xaeA\xc0]\xc0\x8a\x11\xcfk\xd4\x02\x8fM?~\x91\xa5\xf7\xde\xcb\x9c\xcaJ\xde\xda\xbe\x83\xe2\xe2bZZZ\x88\xc7\xe3$\x93If\xce\x9c\xc9\x89\x13'\xc8\xc9\xc9\xa1\xbc\xbc\x9c\xdd\xbbwS[[KSS\x13\x1d\x9d\x9d\xac^\xb3\x86\xc4\xe1#\x84\x0b\x0a\xc8\x9d=\xeb\x0a'V\x00{\x97\xd6nj\xde\xbf\xef\xb5\xf7\xe0\xdc\x83#\xd6\x9e+\x22\xd2w\xb1G\x8c?(~\x22.\x83\xf1K\x22&)b}\x111W\x9ds\xccuO@'\x9f\xd9(\xa7\xea\x1e\x90C\x95\x93\xa4\xe3\x83\x0f%\xd6\xd2\xf2\xb7T\xf8x\x17\xb6wl~^\xf6\x94\xde\xd6\xff.\x14\xd4_\x1d\x81S\x1d\x86\xbeO\x06\x111\x18kp\xb4\xc1s\xbd\x14S\x14\x82\x0d\xf0\x17\x092\xa3v0~\x12\xa558\x9a\xf6\xd7_\xa6\xe4\xe4\x1f(\xacX\xca\x1d\x8f\xdc\xcd\xe1\xba\x07\xf1\xbf\xf9\x94\x05\xcc6\xa8\x9f\xff\xc2\x0f\x1evO_d\xfe\xf7\x7f\x18\xf9\xd3\xab?9\x13\xff\xe7\xd9\x82\xabNDa\xde\xff\xed\x0e\xa6L\x9fI\xd9\xb4\x194\x1c?\xca\xc9\xe3G\xc9\xcd+\xc0\x8apG\xd1D\x0e\x1f\xdc\xcb\xe4i3\xb8\xd4\xd7KFF&\xb3\xab\xe6q`\xf7N\x1c/D\xb4\xbd\x95\xf4\xd3\xa7\xc9,j#~\xf6\x0c\xd3\x97\xdd\xcf\xbe\xfaWf=\x06{\xe7m\xfcN\xed\xc5\xc66\xba\x8cO\xc1\xc41\xaa\xf7?m\xb1t\xe8\xbdB\x05\x7f=\xddE\xaf\xef\xa6R\xb1%\xe4\x84\xd0\x9e\x0b\xd6\x22\xa8@\x09Z\xe1h' \x9a\xd8\xa0\x1e\xa4p6\xda\xa1\xe3\xe0>\xf4K\xdf\xa0\xb4b\x1e\xfd]q\xa2\x13\xf2%9>\xac\x92\x7f\x8e!I\xc3\xed\x8b\xca\xe5\xc0K[\xfe\xfdh\xdcL\xd9v\xb5\x0c/\xf5\xf6\x92\x9e\x91\x86\xf1\x93\x81\x0a<\x0f\xb4\x93\x1a&\xa3T\xb8T~\xb4\x06\xa34g\xf6\x1f\xa2a\xfd*J\x0a\xa73hB\x10M#\xd9\x1f\xe7\xb6\xda\x89\xf2\xc7\x0do\x9c\xaaK0\xf5u\xe0[Ws\xc0\x0d\x85\xd8\xf4\xfc\x0b,X\xb0\x80\xaa\xaa*\xde~\xe77$\x12\x09\xb2\xb2\xb2hkk\xa3\xbf\xbf\x1f\xc7q\xa8\xa8\xa8\xa0\xa9\xa9\x89h4Juu5{\xf6\xec\xc1\xf3<222\xe8\x1f\x18\xc0D\xa2dd\xce@\x17%h\xfd\xf9!\x14\x10\xf1\xa01\xf2%u!\xc1c\xa4\x16\xff\xf4\x89\xa8\xa7\x87H$\x12@`-\x9e\xe7\x0d\xcb\xc8u\x03hnT\x5c\x00\xde\xbb\xffkL\x1b\xdbJ|\xeba,\xe0EB\xd8\x81A2\xca\xc6IC\xe1X\xf5\xaf\xbf\xfc\xe3\xce\x0dpl\xe4?s>\xeb\x05\xe4t,&\xfb\x0f\x1f\x91g\xa7W\xd8\xc6\xb5\xcb\xa4\x09\xa4\x19\xe4\xa3G\xbfj\x9fxh\xa1\xb4\x17\xdfn\x9bA\xda\xa6L\xb0\xf5K\xabe=\xcc\x19ND%%%-J\xa9'\x87.(\xff\xa3\xd9e\x9a\x076\xaf_{\xcf\xb8\x17\xb7 @\xdf\x13\xab\xe5\xa1wv5Gzz\xbf\xb7qE\xed\xef^\xd1\x9e\xf4}\x14S\xab\xac\x96\xbc\xef~\xfbX\xe6\x96m\x85W\x9cb?\xab=3>\xbfl\xcb]\x95\xbe\xdc5\xd7v\xaf]c\x17fe5I\xddj\x00\x16\xc2\xe2\x8d+\x97\x8b\x14\x8d\xb7\xadO?%\xcf)\xb5\xe7s\xbf\x05?\x07l\x80\x09\xaf-_$\x8f\x17\xe4},\x8f?\xa2V\x8d\xe8_\x0e\x8b\xf6>Y'\x1ba\x0f\xc0\xc6/\xe2*\xfe\xf4\x94R~\x94\x9b9nG(\xe4\xd4]\xa3\xff\xc0\xfc\xf9%\x00\xcf\xa6\xde\xff\x0b\xebg!\xe8\xca\xd1m\x8e\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\x8d\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x04\x00\x00\x00\xd9s\xb2\x7f\x00\x00\x00\x02sBIT\x08\x08U\xecF\x04\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x04\x0cIDATx\xda}UMh\x5cU\x14\xfe\xee}o\xfe^2Q\xe3\x0f30V\x05A\x07u#\xd4\x11D\xacmV]\xa4\x05\xc1_\xdcw\xe3B\x5c\x882D\x0cQq\xa1\xd9+\xb82\x98V\x14\xdbE7\x9aP\xbb\x10B6J)\xb3\x89\xb8\xb0\xf0\x02\xf1\xaf\xced\xe6\xbd\x99\xf7\xee\xf1\xf3\xbcKB\x1d\xc9\xfb8\xcc\xbds\xcf\xf9\xcew\xff\xce5\x82\xe9\xef\x83\x8a;e\x16\xd1F\x93\x00b\xa2'\x97\xec\xc6[\xe9\xb4\xef\x14\xc1JS\x96\xf0\x8a\xab\x0b\x1c\x04\xde\x09\x16\xb4>\xd6\xccr7>\x82`\xb9*]\xbc\xee\xa2\x0c\x13\xa4\x18#'\x09\x18\x1c\xa0\x8c\x0aJ\x08a\x87X5+K\xc9\xff\x12\xbc\xd3\x90o\xa4\x93#\xc1>,\xb5\xb7P\xc7\x0c\xc0^\x1f7\x10\xc3\xb1W%\x99\xd92g\xdf\xdd\x9d\x22\xe8>&\x97]k\x8c!\x09\x8e\xe3!\xdc\x86\xaa\xe6\x04u\x8c\x89\x11~\xc26\xff\x8b\xa8\xc6\xde0\xa7W\xae\xddB\xf0vC\xb6]+\xc1\xdf\x98\xc7\x09\x1c\xc3\xed\x94l\x14\x80 '2*\xd9\xc5e\xfc\x819\xd2\x90\xe2\xf8\xfb\xbb\x07\x04oV\xe5\x8a\xebP:\x1eg\xf6\x06j\xcc\x1c\x10\x86\x10\x00\x8e\x98PE\xca\x04?`[\xa7b\xb7\xcc\x89\x0f\x13\xc0\x02\x14\xd9u\x9d\x147\x19\xde\xc1\xbd\x98\xe5p\x85(\xab\xb1\xadV\xc3\x0cq\x07\x9e\xc1\x13\xf4L\xe1:y\xd7+x\xa3);Y4@\x1d/\xe1\x98\x9fy\x00\xab\xf9\x8bO\x14\x99\xaa\x18Q\xc5\xa7\xb4Y\x84C\xf3\xe0G\xb1e\xfe\xa5,J\x90`\x01-\x86\x97=\x01)\x0e\x10(J\xaa\xab\xc6\xd0\xe7\x90\x10Y\x94/\x01\xe6\xb5\x8a\xecM\xea7\xf1\x14\x9eE\x83\xe1!\x1d?c\xb6T1f+\x07T\x8d\xc5{\xfa\xff\x08\xfb\xd8\xc4\x06\xf7\xa9\xd47w\x87\xee\xa4\xab\x8f\x19\xd4\xc6\xbc\xcfd\x00\x95:\xa4\x8d\x94B\x8a\xe3\xa4V\x22\xe1\x04O\xe2*Gl\xdd\x9e\x0c\xf33\x8e\xac\xf7\xeb\xc6\x85\xde-\xe3`\xe2)\xc6\x98x\x82\x92\xaa\x08\x892\x22<\x80\x1d\xfe#g\xack\xe7J \xd8\xa7Y\xcdO\x02\x22U\x8a\x01\xf6\x89\x91\xce\xba\x98J\xa0k\xf40\xc7s\xb8v\xe8\x9a9\x03\xe6\x94?\x87\x00\xfa[\xdc\x86D5\xe4\x80\x06\xc9\x01\x81\xa5\xdd\xa5\xabc\x9a$pl\xcc\xaa\xfc\x22?\xf0#C'^\xc5\x90f8ZE\x0d\xf0\x14\x96\x98\xd7\xab\xe6\x9a\xa1\x80\xf0\x03P\x03%\xff\x8e\xbe\xc7\x80\xe4u\xc2\xa0\x0cx\x0a\x1a!\x0a+\xb1\xb03(h\xd4\xe0s\x04\xba\x5cu\x9a\x1e,B?\xef\xf7WA\x11[\xf7/\x01\xbb\x19(\xc8\xd3\x94t\xbbx\x8c\x99}\x8eV\x1c\xaf\xc0\x87\x8bz\xee)\x81\x8b\xad\xf4\xc0\xa1_0\xd19\xe5\xea\xc0[\xe0O?\xe5\xd3f\xd8\xae\x10\x1a\xee\xfdzJ(=+\x17\x85\xcd\x1d\xdd\x94\xcc\x13Tt\xc9\x22\x0d\x9fS\x8a\x88\xfdjA\xe0\xfd\xae3\x8a\xbd\x8bV6\xd1\x0f0\xc26\x86\xe0\xa9#\x1c\xaa>;\x09\x14\xb3^\x05%\xab\xc7\x18W\xe9M\x05}\xd9\x0c?O_^\xb3\xe7*\xf8\x0e\x8f\x22\xd2\x99Z|L3\xf8\xcf\xa7\xe1\xb9n\xefo\xf8\x8a*-dm-\xb5\x80,\xcb\x90lX\xe7%M0\xf6\x17\xc8A\xa6\x823\x1dM\xf0\x09\x84\x89d(\xcbZP\xbe\x88\xb1\x1a\x90\xf1W\x5c\xc1\x00#\xa4\x04\x97T\xe1\x14E{\xa2##j\xfd\x99\xdeL\xb9\xcaH\x84\xc5c\x80\x85\xa0S\xc3\xb70X(\x02n)j\xbe*\x82\xd9\x19\xfe5j`\xf8\x16\xa3\x0e\x8b\xea\x8b,\xaa\xa25\xf9>\xbc\x8a\x16\xcaDxpx\xa8B\xc3\xf7X\x8bv\xb4.\x1b\x16\xd5\xf5\xc3\xa2\xaa\x14,\xeb\xd2\xca\x90\xd2y\x11O#\xf2U\x89\x0a\xbc\xa6\xef\xf1%\x8c^z\x86\x9f^\xbf6\xf5\xb0\xbc\xd0\x00\x1f\x16\xa7Bgx]\x1f\xc1=\xb8\x93!\x7f2\xf3ub\xa0'\x92\x94[8{~\xfaaQ\x0a}\xda$*\x96,+v\xc2\x97\x91R\xb1&\xfa\xb4\x9dO\x8ex\x5c\x9f\xd7\xc7U\xea\x87\x97\xcb\xa8\x11\xfa\xb8^8\xe2q=$\xa9\xc8),\x9a\xb6\xf8\xe7\xdd\xc4\xd2\xc3%\xb3q!\x9d\xf6\xfd\x07C\x0c\xf2?K\xf9`\x0d\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x05\xcf\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x05LIDATx\xda\xc5\x97\xdboTE\x1c\xc7\xbf3s\xce\x9e\xeeni\x0b\xbd@\x01C1\x05B\x10\x0d\xf4\x22\x18\x95D0i\xc4\x17|21\xc6\x17\xc3\x93\x89\xd1\xc8\x8b\xfaP#oj\x8c\x89O\xfc\x03\xc6K\x0c\xf1\x01!B\xa8\xc1j\x88D\x90@)\x14\xac\xb5\x86KK\xaf\xdb\xdd={\xce\xcc\xef\xe7\xec\x9cficb\xd2\x98\x13f\xf3\xdd93\xbf\x93\x9d\xcf\xfengW03\x1e\xe6\xf0\xb0\xc2\xf1\xca\xe9\x17wok\xda~lh\xe6\xe2\xd6\xbf\x0a#\xab\x94\x92h\xcb\xad\x9d{z\xdd\xfe\x9b\xf9 \xf7\xce\xe1\xad\xef\x0d`\x05Cb\x05\xa3\xe7\xcb\xce\xc7\x0emz\xf9\xfc\xab\x9d\x87\xbb\xf6\xb4\xec[\x15k\x82\x8e\x09\x9d\xf9\xed\x8d\x07\xda\x0fu=\xd5\xd2w\xe6\x83K\x87{S\xf3@N\xe4>\x9d)M{>\xd5\xa1\xab\xad\x17\x1b\x1a\xda\xdd~V\xe6\xb1)\xb7\x05\xe7\xee\x9e\x94\xf3\xa5\xe2'\x00\x9eI\x05\xe0^a\xa2\xe7\xdc\xdf\x03\xe8\x5c\xb3\x05\xf3\xb9k\x10A\xc5\xed\x87\x98\xc4\x99\xdb\xdf\xe1\xd4\xd8\xb7\x18-\x0e\xefL\xcd\x03aTi\x1c\x1c\xff\x11\xdd\x1bw\xa3I\x99\xe5F3\x87+\xf7/\xc2W\xb215\x00\x133 \x08\x11E(\xc7\xcb\x01<\xa3\x9d]2#5\x00\xd2\x04!$\x8c!\x94u\xb4\xccVG1\x8c6\x10@\x9a\x00\x0c\x08\x86!B\x18G\x80X\xea\x01\x03c\xed\x02\x94\x1e\x80Y\x04\xd0d`,\x80\x00j\x10\x01i\x18\xc3\x10\xd6\x9ej\x0e\x08\xc5 bD:^\xe6\x01M\xfa\xffy\xe0\xe8\xe0\xd1\x17\xaeN\x5c\xed\x1f\x18\x1d\xd869w\xafA\x92\x84d\xc0\xbeC\xb1\x80\x94\x02JHw81!2\x0e\xa0\xc6\xa0\x8dv9b \xd0ul\x03{\xbe\x84\xaf\x14<_\xc1\xf7\xecuF\xa15\xd76\xff\xc4\xda\x9e\xeb\x1d\xab\xb6\xf7\xbf\xb4\xf9\xf5\x13\xb5N\xd8\xf7E_\xefTi\xea\xf8\xc6\xfa\x8d=mA[\x03\x99$\xdel\xd8\xcd\xb48\x1bwM\xd0\xdaX\x00\x8d\xd8\x98%\x8a@D\xee^c\xc8\xddk(\xb9\xdfp\xb2W\xef564\xaa\xd6\x9e\xf9h\xea\xf8G\xc3o\xb8\x8e\xe9\x1eF\x1d\x9fw\xfc\xb0\xabm\xd7\x81\xbd\xeb\xf7\x02\xc2`.\x9aAu\x9f\x85\x15\x5c\xdc\x935\x0c\x84\x00r\xb9,\xee\x9a?!\x93$p{\x1b\xbcGQ\xa9\x84P\xd2\x83'\xfd\x9a\x94\xf2\x90\xb1{\x81\xca\xc2g\xcf\xda\x0d\xc6\xc2!\xdc\x89n\x9e\xf9\xec\xc9\xef\x0f\xb8\x10\x8c\xcf\x8dw#\x06z\xda\xbb0T\xb8\x00-4 Q\x93\x84\x80\x14\x02JJ7\xcb(Y\x0b'\xc7\x87\xb1\xe8\x06X\x00L\x9cx\x81\xaa\x22\x042\x8b\xbcX\x8d\x91\x89Q\x9c\x1a:\x8d\xf7\xf7\xbd\x85\xdb4\x8cyLv\xd5r\x80\x88\x9a\xc6\xa6\xc602u\x1dEQN\x0eVp\xb3\x10H\x0e\x96\x02\xcc\xb2\x06\xc2\xe2\x01Dm0\x83\x00x\xc2\xc3\xba\xecz\xcc\x95\x8b\xb81}\x0bgG\xbfF\x1c\x11\x00\xd8\xf50\x8a\xc1md|\xd9T\x03\x00[\x91k\xb5\xa8\xa88\x01 \x07\x91$\x1fK+\x07\x90\x1c\x0e\x86\xaa\x02A8\xefdT\x1d\x9a\x83V\x17\x8e0\x0a1\xb10\x81o\xfe\xf8\x0a\x85J\x11\xa5(\xb6\xa0\x12\x90\xe42V\xb3v\xf9@\x9e\xc0R\x80Y\x10\x9af\x0a\xb3\xb8^\xbc\x09?\xf0\x9c\x82L\x80l\x10\xc0>\xe7\x91\xf7\xb2\x8eI2\x01,]G\x0cD\x00\x22\x8d\xf9h\x1a\xc3\x93\xbf!\xd4\x15h\x22D\xc6\x00\xd2\xc0S\xca\xca@\x19W.N\x19\xcf\xc3\x02\xbb\xf0\xcc.-\xc3A\x10\x0e\x0e\x0e\xff\x82\x1d\xebw .\xc5@9\x09A$\x04H\x85(\x8b\x18\x9e'\xdc\x87\xd65H\xdc\xc1-\x04\x9erkea\x1e\xf1;Q\xd1\x06\xbc\x987\xe43L\x96\xa09)[cU\xefe1e\x93\x97du\x8f\x7f~\x00\xa0q\x04\xc0\xfe\x85\xf2B\xdd\xf9k\xe7\x81x1\x04\x9cP\x0b+)\x92^ \x94\xc0\xb3{z\xe1\xafV\xc8T3\xbc\x0a!%\xc2B\x8c\x9f.\xffj!%\xaa=@YyK\xe4\xfb\x0a\x85\x8cB&c\xd7\x19\xaf\xacY\x1f\xa9\xf5\x01\xfe\x90\xaf\x81\xd0\x03\xe0,\x08\x0b0@M\x1a`;SU\x94\x5ck\x13\xbbC}%\x1d@\xe0{\xaeD\x99\x01\x02\xbb\xba\xa7\x7f\x89\xacx\xce\xdaN\xda\xb9\xfbD\xdf\xc8\xd0\xb2N\xc8\x1f\xf3\x15\x00\xcf\xfdg\xdb|M2\x0b\xd7\xf5\x9cG\xbc*\x80\xb2\x00\x9e\x87Jr\x00$\x0b\x8c\xbd;#R\xf9MH&\xa9\xf3\x98b\xf0b|\x94L\xbc\x00h\x07@)\xfe\x1ep\x873\x0bT\xe2\x18\xc2\x18\x18J\xbe\xb5X\xecF\xc9+E\x80\x04\x02(\xeb\x0a8\xd2P2\xb2J\x1aR1\xa2\xa4\x9d\xa4\x0e \x18\xa5(D\x14F\xae\xe6+\xb1F!\xac ,\xf9 k\xb3<)\x02\x88D\x0bq\x08*VP\xac\xc4\xae\x17\xb8\xf2,f\xc0\x92\xacdz\x00^F\x15\x1b\xb2A\xbeE\x05\xb8Q\x9aK\xda\xb4U\xf5\xb5)\xee@=1B.\x15R\xfbg\x94\xad\xf7\x7f\xdf\xd9\xde\x82\x83\xcdk\xb03n\x86\x9cR\xc0\xb4D\xb7\xea\xc5V\xde\x82<\xea!H^H\x0d \xdf\x1c\xbc\xb9ym\xa0W7*\xb4\xe6\xf3 c\xc0Dx\xa2\xf9q\xc8@Add$`\xdeN\x0d\xe0N\xff\xec\x05\xd9\x1c=?\xee\xf3\xe5\xfb\x22Z\xa0,\x83\xeb\x18*\xab\xc2 \x9b\xb9\x12\xa0n\xffD\x7f\xf1\x12V0\x1e\xfa\xdf\xf3\x7f\x00j\xf0\xda\xe8\xbc\xba\xd0\x0a\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03\xb2\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x03/IDATX\x85\xbd\xd7O\x88\x95U\x18\xc7\xf1\xcf\x13\xd3h\xf8\xa7\xd1f\x143\xb1H\xa1?CHX\x9b\x88\x0a\xda\x95A\x0b\xc9e\xb9h\x11\x82D\x8b6\x81\xfd\xa1]\xd1\xa2U\x9b\xca\x8d\xb5\x08\xda\xb4\x88\xb0E\x13I\x90P\xd0\xa2i\x92\xcaF\xb1\x1auf,)\xcd\xe6iq\xce\xc5\xd7;s\xe7\xcf\x9d\x9b\x0f\xbc\xdcs\xce{\x9es\xbe\xe7\xf7<\xef\xf3\xbe72S\xb7\x16\x11}\x90\x99\x97\xba]\xe3\x9a\xaew/\xf6*^Z\xce\x02\xcb\x05\xd8\x80\xd5=\x07\x88\x88\x8d\x11\x11\xb5=\xd0j\xcfaCX\xb5\x1c\x80\xbe\xb6\x8d\xf7\xe1\x09\x9c\xc2pD\x8cc;\xee\xabcs\x01\xfc\xb9\x1c\x00\x99\xa9&\xe2n\xcc`{\xed\xf7c/\xfe\xc1=\xady\xcd\x0b?\xe2\xa3\xda\xbe\x13\xab\xe7\x9a7\xdf\xd5\x0c\xc1.L\xe2x\x05\xbb\x98\x99o\xe3\x1dl\xee\xc0\xdf\x0c\xc1!\xdc\xbbT\x01\x9a\x00\xc7\xb1\x1e\xfb\xda\xe6\xbc\x8c?\xda\x1d#b\xa5\x92\x80\xad$\x5c\x83\x81\xa5\x024\xe5\xbc\x03\x97\x90\xca\xa9\xd7\xcc'\x1d\xb6\xd4\xb9\xdf\xd5\xfei<\xa5(\xf2\xedbC\xd0\xbe\xe8\xfd\x18\xaf\x0b\x1f\xc3\xcey\x00\xee\xc6\x04\xc6k\xff\x22\x9em\x80\xad]j\x0e\xc8\xcc\xcf\xb1\x03\x1f\xe3V\x1c\x89\x88\xe7:\x88\xb7\x01?cU\x0d\xc7\xb5\xb8\x1e\xeb\xea\xfd\xa1\xc5D`V\x1d\xc8\xcc3xD\xa9r}x-\x22\x1e\x9a\xc3w\x08?)9\xb0\xb6\x8e\x0d(y\x04C\x111\x18\x11\xc3\x0b\x02D\xc4\xd6\x88\xd8\xd9\x80\x98\xc9\xcc\x17\xf0d\x1dz\xba\x03\xc0x]c\xb0\x01\xd0T\xe0Q\xbc\xb5 \x00.\xe0`D\xf4\xb7\xdd\xff\xb0\xfe\xde\xd0\x01\xe0\xb4R\x886\xd5\xb1f\x08\x06\x95\xc7\xf7\xe6\x05\x012\xf3Wl\xc5\x81\xb6\xb2\xfbx\xfd=\xd4\x01`\xa2\x02\xdc\xa8\x14\xb1+B\x80\x9b\xb0)\x22Vt\x02h\x96\xe2\x1f\x94Br4\x22>\xa9@\x0f\xe3\xf9\xcc|\xb7\x03@S\x81\xdf\x5cV`\xb2\xde\xdf\x8c\xa8k\x8d-\x04\xb0+3OPr\xa2\x9efof\xfe\xdd\x01\xbe]\x81\xf1:\xb6\x0e\xdf7\x00\xce\xe0\x96\x88\xd8\x81\xb3\x99y\xf8\x0a\x80\x88\xd8\x83=u\xe3Y\xbbt~\x11\x1a\xae\xa7n)0\x8emJ\x08\xc6\x1a\x00_(y\xf0\x0c\xfe\xc2\xe1\x88x]y\xcc\xdf\xef\xc37\xe8t\xca\xf9\xec\x8d\xcc<\x16\x11-\x05\x8e*!X\x8f\xcfp\x97\x92\xbc_\xd6\xf66LE\xc4\x8bx\x10\xaf`\xb4/3G1\xda\x05@\xcb\xce\xe36\x9c\xad\x07\xd9\xa2(p;~W*\xea~|\x8d\x11\x1c\xc0\x03\x999B\xdb\xf7@\x97\xd6R\xe0\x1c\xa6\x14\xb9\xc7\xb0\x02'\x95j\xb9\x11\x07\xf1\xa6\xf2\x9e\x18i9/\xf7\x93\xac\x05p]\x05\x98\xc6J\xfc\xa2\xd4\x96\x13\x15\x00\x8ed\xe6\xa9\xcc|\xaf\xe9\xdc+\x00\xca+{J\xa9\x07\xd3\xca\x13r23'\xf0\x81\x92\x8c\xb3\xacW!\xe0\xb2\x02\xd3\x999\x13\x11\x13J\x08d\xe6\xeeN\xce\xbdT\xa0\x95\x03\x93\xb5?\xa1\x84`^\xfb?\x14h\x01<\x86\x7f\xaf&@+\x07&!3/,\xc6\xb9\xd7!h*\xb0(\xeb\xb5\x02_)\xe5\xf9\xaa\x03\x9c\xcf\xcc\x19|\xbaT\xe7^\x85\xe0\x5c\xb7\xce\xb1\x9c\xbf\xe7P?`\xfa\x17\x9bt\xed\xf6\x1f\x88\xe7\xad\x89\x9c\xb9E\x08\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03;\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x02\xb8IDATX\x85\xbd\xd7\xcb\x8b\x5cE\x14\xc7\xf1\xcf\x91\xd1DF\xf3r:c\xa2\xf1\x81Y\x18\x1d\xc4\x95.\xf5\x0f\x10wb\x16\xe2\x1f\xe0F\xd0\x9d\x0b\xe3\x83,\x02J\x16\xd9\xe8\xc6\xc7F]\xf8'\xc4\x85\x01EPP\xc8f\x0c\x01%c\xf0\xd1:3\x89\x194`\xe6\xb8\xa8\xbaL\xe7\x8e3\xe9\xb9\xb7\xc7\x03M\xdf[U\xe7\xd4\xb7~\xbf\xea\xa2:2S\xd7\x88\x88)\xc8\xcc\x7f\xba\xd6\xb8\xa9\xf3\xec%\x8e\xe3\xf5>\x05\xfa\x02\xec\xc7m}\x0aL\xf5\x04\x18\xe0Zo\x80\x88x\x0b\xcf!q\x01;1\x8b_p\x15\xf7 \xf0^f\xbe\xd2\x02\xb8\xd2\x07\xa0\xb1`\x16\x1f\xe0Hf>\x8e\xf7k\xdb;\xf5\xfd\xa1\xda\x7fg+\x7f\xa0Z\x10\x11\x0fG\xc4\x96\xedh\x00\xae\xe1Df.\xff\xd7\xa0\xcc\x5c\xc2\x09E\x856\xc0t}\xfe\x08\x8fu\x05x\xa9N\xb2a\xd4\xfe\x97\x9b\xf7\x88\xd8\xa9\xac\xbeY\xf5\xed\xd8\xd3\x09 3\x17\xc7\x19\xdc\x1a7\xa8\xdf\x0d\xc0.\xec\x8e\x88\xe9\x888\xbb%\x80\x8e1\xc0\xef-\x80=\xd8\x87\xb9\x88\xd8\xb5\xdd\x00\xfb\xf1#\xa6\xab\x1d7c7\xf6\x8e\x00n+\xc0\x00?(\x0a4\xabm\x14\x80AD\xccD\xc4\xdcv\x02,\xd4\x1a3#\x00\xa3\x0a<\x85w7+\xd2\xe7$l\xf6\xc0\x15\x1c\xa8m\xa3\x16\xcc\xe0 \xee\xdb\xac\xc8F\x0a4I\x87n\x000\xac\x00\x07\xb1\xaae\x01\xee\xc6\x81\x88\xd81\x16@\xfd\x09\xbd\x81gk\xd3\xf3\x11q,\x22\xa6\xd7\xa7\xaeS\xe0Wk\x0a,\xd5\xfe\xbb\x94\xc3\xeb\xde\x8d\x00\xae\xb3 3W\xf0j\xfd\xdc(\xda\x0a,\xd4\xb6\xbd\xf8~\x04\xe0\x0f\xdc\x1f\x11\x8fb13O_\x07\x10\x11Gqt\x8c\x09\xdb1\xa7\xac\xbaQ`\x01\x87\x15\x0b\xce\x8d\x00|\xa1X\xfa\x02\xfe\xc2\xe9\x88x\x1b\x0f\xe0\x93)|\x87\xbf;\x00\x9c\xcc\xcc\xf3\x11\xd1(\xf0\x8db\xc1>|\x8eGp\x07\xbe\xaa\xcf\x87\xb1\x1c\x11\xaf\xe1I\xbc\x89\xf9\xa9\xcc\x9c\xc7|\x07\x80&V\xf0 \x16\xebB\x0e)\x0a\x1c\xc1o8\x8f\x17\xf1-\xce\xe0\x18\x9e\xc8\xcc3\xf4\xbf\x90\xb0\xb6\x07.cY\x91\xfb\x1cv\xe0\xa2rZ\xce\xe2C\x9c\xc2\xd9fr\xfa_\xc9\x1a\x80[+\xc0%\xe52sA\xb9\xc8\xfcT\x01\xe0\xcb\xcc\xfc93?\x1eM\x9e\x14\x00\xfc\xa9(\xb0ZA\x86\xb8\x98\x99C|\xaal\xc6u1)\x0bXS\xe0Rf\xaeF\xc4P\xb1@f>\xb3Q\xf2$\x15h\xf6@s\xb1\x19*\x16l\x1a\xdb\xa1@\x03\xf0\xb41n\xcc\x93\x04h\xf6\xc0\x12d\xe6\xd5q\x92'm\xc1\xa8\x02c\xc5\xa4\x15\xf8Z9\x9e\xffw\x80\x95\xcc\x5c\xc5g[M\x9e\x94\x05\x97\xbb&G\x9f\xbf\xe7\x10\x11\x81[\xc6\xddt\xed\xf8\x17\x8f\x90\xe0a\x11\x99\x18?\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\xca\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x007]\x00\x007]\x01\x19\x80F]\x00\x00\x00\x07tIME\x07\xd9\x01\x01\x13\x09\x18\xa0\xf7\x97\xa4\x00\x00\x04JIDATx\xda\xed\x95[h\x1ce\x18\x86\xdf\xff0\xb3\xb3\x9bts\xc441\x89\x8445H0\xd6^\xb5\xf1\xd0h\xcd\x95\xda*U\x1b\x10<\x80\x17z#\x88\x17\xd6\x8b\xda\xde\x88x\xb8(\x82^hKm\x1aEiUD\x02\xd6\x82\xe8Z\xadh\x0a1J\x0cU<\xb4\xb1\x0d\xc9&\xdbf3\xb3\xb33\xf3\xff~\xfflh@\x8cb\x92\xedU\xdf\x99\x97o\x17\x06\xde\xe7\xff\xbe\x8f\x19\x5cQ9\xf5\xe4\xbe/\x1b\x86\xbe\xfe\xad'32ac\x09I\x94A\xeb\xfb\xdf\x86\x86~xb*\xffjk\xc3\x9a\xca7>\x1a=\xa1\xb5\xbe\x891V~\x80\x8e\xfe\xc1*\xad\xd5\xebmM\xe9\xfe\x87\xee\xedF\xe48\xf8j\xe4L\xcfe\xe9@\xc7\xce\xc1ML\xeb\xc1;oik\xbb\xbb\xef:(\xc6\x90\xf5\x02L\xcf\xce\x81N_>\x00:5\x87\xd6\xbbj\xd3\xd6\x9eG\xef\xe9\x92\xcd-5\x98\xf3<@#\x96V!\xca\xd5\x01:\xf5\xe1\xab)a`cg}\xef\xf6\xad\xeb\xc0$G>\xef\x82\x80\xe2|VF\x00\x0a\x1f\xd8\xe6\xd8l\xff\xb6-mu\x9d\xed5\xf0\x83\x22\x100,j\x01 Ze\x00\x0aNB\xeb\x97[\x1bRO\xdcuk\x0b\x12\x8e@\xde\xf5\xe39\xc7\xf1\x7f\x9b\xb7Z\xcd\x0et<0\xd0\xc5\x99~g\xf3\xf5\xf5]\xdd\xd7V\x13G\x88\xa2\xafK\xe1\x9c\xc7\x15(\x81h\x18\xe9\xd5\xe9\x00\x05\x03\xd0\x8fWU\xf0Wzo\xacMV\xa5-\xb8\x9e\x0f!\x048\xd7d\x01f*\xe3 \x0as\x13\xdc\x02\x80\x0aV\x04@\xe1\x87\xea4\xd4\x9b\xedk\xed\xed\xdd\x1d)H\x19\xc1\xf7\x19U R\x8c \x188Yh\x01\x15\xe7_\x9a>A\xb0\x95-\xe1\xba\xfb\xdf\xdab\x095\xd0\xd5*\x9a\xaf\xaaV\xf0\x0b>\x22\xcb\x8a\xc3\x95&\x08\xcd\xa1\xc9\x02%q\xb0\xd2\x18\x8c\x11\xb7ay\x00\xed\xf7\xed\x97\xd0\xd8S\xe5\x14\x9fY\xdf\x04a\xcb\x00^A\xc2\xb2\x19T\x1c*\xa0\x99\x06b\xd3\xadJ;\x00ps-nA\x5c\xa2\xff\x0f\xc0Tt\xac\xb1:\xec]\x9b\x0e\x11\xf8\x1aQ\xc0)<\x01e\x82!\xcc\x03\x00\xd7\x88G.8\xfd,\x19\xc6\x8c\x0c\x06#\xb6\x5c\x00\x15\x15\xe3\xed\xcd\xe5\xe6)XB\x08\x19\x87\x03!\xc0$e(\x0a\xd6\xe0f\x07\xc8f\x1c\x84S\x1a\x02#\x18\xce\xa1\xcd?\xd3\x9d\xe5\x00Da\xb1\xef\xecT\xb8[\xea\xe0Y/\xf7\x87H%\x1d\xd4\xd6\xd5\xa2\xba\xba\x1a\xe94\x00V\xda\x03\xce\x04,\x82KXd\xdb\x86EUJ\x01a\x00t\xa9\x03z9\x00\xbf\x7f\xfcTHew\xf3\xd6\xe7>u\xaa\x1a\x07\x04\xf3\xaf\xc9\xce\xe6\xe1\xfa\xc0\x85|\x84Te\x84\x8a\xca\x10\xa9\x8a\x08\xc9d\x00\x03\xe88\x0aN\xc2F\x82,ei\x0b\xc0\xc9Za)q\xfc\x87\xce\x1e\xdf\x9bq\xe7\xdd\x1b\xf2^\xf8n*]\x09'\x99\x00\x17\x1c\x82\xcc\xb9\xa4* \x17lI\x09\xdb\xb6\x09\x22\x11\x03\xa5RI\x82J\x98\x1dX>\x80\xd1\xb9/^\xcc\x05\xc5\xe2\xce\xc9\x89\xc9G.^\x9c\xcd;\x156\x187\x00,\x9e73\x95\x97\xa0\xa4\xb1\x94\x06\x22\x0e\x97\x96\x84^)\x80\xd1\xe4\x89}\x98<\xf9\xda\xc1\xec\xe4\xd4\x86\xf1\xd1\x91o\xfdp\x0e\xdc\x12\x00\xf4\xe2gW\xebK\xaf_\xa5\x15\x94\x22\x83\xb4r\x80EM\x0f\x1f\xf8\xd9\xcb\xe7{~\xf8\xe6\xe4\x0b\xa7\xc7\xbfS\xcaR\x14F\x81\x91BD\x0e\xc3\x10\xc5b\x00\xcf\xf3\xc9\x05\xf8A`pV\xf7k832\x18P\xd9\x15\x05;\x8e\xcd\x9c?\x7fh\xe3mw479-\x08(\x8cs\x1ew$ \x10FW\x94\xe0P+\xeb\xc0\xd2\xca\xfdx\xe4\xb3\xb9l\xb6\xfb\xf3\xf7\xdf;:\xfc}\x06.\x0fQ\xf0}\xb8\x85\x02\x5c\x97\xecyp\xfd\x22\xb4V\xe5\x010\xba0\xf6\xe1\x8c\x0a\xc3\x1dc\x99\xcccCG\x0e\xce\xff\xe9g\xe1\x11\x84\x09\x9fw=\x82\xf1\xffu\x09\x05VA\xfe\xf48\x8a\xd9\xd3\xa7\x22^\x7f\xf4\xd7\xd1S\x9b\xc3\xba\x8a\xc6\xaa\x9a\x06\x84\xae\x8f@2\x8c\x0dgP8\xf7\xd3\xde%:\xb0z\x9a\xff\xe5\x93\xf1\xc0u7\x8d|p\xf4\xa5\xe3C\x87\xd5\x14wQ\xb0\x19\xac\xf8\xad\xa8qY\xe5\xb4\xdc\xbcuM\xc7\xed\x13}\xcf\x1f\xd0\x1b\xfa\x9f\x9e[\x0a\x80\xa3L*\x9c\xc9\x1c\x17@gnl\xfc\xc1\x5c\xde\xedc\x8c\xe1\x8a\xfeI\x7f\x01T\x83\xf2\xaa\xc9\x97\xeb;\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x06\xae\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x06+IDATx\xda\xc5WKo\x1cE\x10\xfez\xa6g\xbd\xbbv6\xc6qL\x80\x0b\x0f\x13H\x90\x80K\x10\x8f(\x84\x80\x04\x07\xc4\x09\x94\x03g~\x03\x07\x0e\x5c\xe0\x17\xf0'\x90\x90\x90\x10\x8a\x84\xc4\x11\xa4\x88\xc7\xc1Q\xc0B\x10\x05\x91\x10D\x8c\xed\xb0\xb6c\xef\xeeLw\xf3uu\xd1;\xda\x08q\x83\x96+U\xd5\xd3\xf5\xe8z\xf5\xc6\x84\x10\xf0\x7f.\xf3\xec\xd9W\x1ey\xe1\xcc\xe9\xf7\xfa\xfd\xde\x11\x18#\x7f\x80\xfcC\x14\x97\xd2\x89GAP\x86\xabM\x18\x08Ri\xa3\x84\xc8\x00\x08\xa4\x9a\xba\xf1\xa3\xd1\xa8\x01\xd7\xc1h<\xfc\xe4\xd3\x0b\xef\x9a\x97_}\xfd\xcb\xdd\xbd\xbd\xe7\xc6\xe3\x09\x8a\xa2@\x19\xa1,PU\x15\xaaN\x85\x0e\xb1\xb5\xb6\x05%\xcf\x94\x91\x16gLYR\xce\x88l\xcb\xb5h\x12!\xda\x0fA\x9c\xda\xd8\xd8\xc4\xd7_]\xc4\xe2\xd2\x0av\x86\xdb8\xbc\xb8\x88\xa5\xa5\xe5\x1f\xed\xb5\xeb\xbf\xaeB\x17\x95\xa8\xb22;S\x94\x84L\xb7\xf6\x15J\xfd\xae2\xcag\x19\x89\xda\x8d\x1b\xbf\x11\xae\xe3\xf1\xc7\x8e\xe3\xf2\xfa\x15,\x1d\xeea{k\x03\xe3\x89;n\xd1Z\xac\x07\x82x/\x10t\x8fD\x8b\x0e\xad\xaf\xc2\x0ad\x99\x90 \xae\xfd\x83\x03\x5c\xbb~\x03\x0c;:s}\xac\xfft\x0d\xdd\xf9\x01\x86\xfb#t\x17\xee\x923\xe2\x80\xf7\x1e\xae\xa9s\x14\xa6`2m\x08\xa5\x91\x88(mH\xeb9\xee\xb7\xcfQ\x0c\x7f\x0ew\xb0\xb5\xbd\x9d\xfc\xca\xa9)\xd0\xd4\x13\xc1\xdey\xf4\xfa]\xd8\xf9~\xdf\x9c\x7f\xe35\xbct\xee,lYN\x8d)\x96<\x9bH\x13G\xa3\xed\x82k\x15\xa8\x0f\x81\xb9\x1dbm\xed\x12\xb6o\xddB\xf0\xbem9GE\xe3\x8b\x83\xfd\x03|{i\x1d\x96\xd5o\xce\x9c>\x8d\xa3GWP\x88\x115nLv\x00&yO\xd42l4\x05\x9e\xb9\xac\xb1\xb5\xb5\x85\xe1\xeem\xdc\xff\xe0Cx\xa0U\x8caJ\x0b.y\xc9{\x8f\x1d\x93\xa8\x7f\xb3\xf6NJA\xe3\x9cl\xfc\xb2\x1dp\xe1\xd2\x9e\x84\x90\xf6P\xc2\x08&+\xe1\xa65q\x92\xe1\x97\x10:\xf2\xe3\xdaaw\xbfAM\xde\x14s(\x92W\xa4\xa3\xf1 \xbc\x89\x17\xa2\xa3\x87\xfa\x15\x1e\xbdo\x80\xd5\xf9\x1e\x86\xc3]p\xa9\x03\x8d\x83s\x8e7\x01n\x8f\x1c\xbd\x0d\x92\x0ab1\x86\x10\xf3\xed\xe4\xd6>\x88\xc3\x92\xf7\xd1\x842\x94\xe5\xed\x92\xd1@\x19\x04\xe1\x8d\x03(\x05\x1fuQf\xbek1`\xce\xeb&\xc0\xfb\x08\x8e\x22\xd9\x81F\x22\xd0)\x0d\x96\x07\xa5(g\xa4b\x0f\x0b\xcdOp\x81N\xfa\xc47\xc4\xfb\xfb5\xba\x15\xd0\xb54\x13-\x04\xb9;\xbf\xa7B\xe6\x86\xd0\x80\xc1\xa1^\xc5\xd6\xeb\xcae\x06=\x8d\x1e\x01\x08\xd9\x01\xd9\xbcgP\xe0\xcdS=\xeeG\x83\x1e\x81\x86\x82\x86\xb30V\xea`2\x99`<\xaeI\xdb\xdc5\xbar\xae\xb5\xd2d\xcd\xcdu\xd0\xeb\xf6d\xb0\x05\xa4\x9e\xf5\xc1\xd3\x9e\x9b\xa6\xa0\xae\x99C\xb6a\xa8!\x1f\x0dd\xa0\xa8ab\xa4b\x1a\x8fF\x12)km6\xa4\xc58\xcbg\xe7:\x9d\x0e1\xe5\x83#\x10\xeb\xacp\xad\x14H\xbf\xae\xac,\xe3\xcaf\x81/~\x06\x10\xbc\x8eY\x82(2\x92\xb7B,h\x8e\xa5\x15}jO*\xe6Q\x9eO\x17/\xb4x{U\x01[\x8cQY\x83\xaa\xb0\xa0Y,\xcd[\x9c;\xd1\xd7\x14\x00V[#\x82(\xab\xbd\x17\x03\x92\xd6:\x80$A\x8dJ:\xc0\x151!\x16\xa77B7\x1e\x896I\xa6\x02u\xd5\xdc/\x81I\x13P\x95\x8d8}P\xb9\x14\x01\xe7r\x0dH\xdfW\xd6b\xa1\x1bp\xb8\x17\x15Cn_\xe8\xc0)\x8d&5\xd2\x85\xd1y\x90\xe4 g)\x832u\x8d\xb4\xac\x81\xd5\xe8QY\x8a\x10y\x8a\xb2\x13\x0aq\x80\xa9\xd4\x14\xa4\x1e\x97\x08\xac\xae\x14x\xf8n\xca\xb4F\xabD#ODB\xa1X\x81+g=(M\xac\x1b\xad\xf7Dq\x06 G@\xdb.\x1a\xcb\xf3_\xe96\x9f\xf1\x14\xf2\x84\xcb\x96U\xb1\x1aU\xc3\xbee\x18\x8a\x0b\x95\xb3\x84\xfc\x9c^\xdd\xf4\xf8\xec\xfb=\xe9\x02\xb2\xc4\xf1 \x09x~\xd7\x8e\xd04 \x855MJ\x1d\xd5^\x1f*\x89\xbc\x8e\xf6\xa8#\x1aMg\x8f,X\xbcxrA\xcee\x07\x18V\x89@C\xcf\x1a\x07\xcd\x1b\xb4\x03\xa2\x02b\xaf\x1d@\x10g\x10\xcf{\x1a\xd3q\x1d\x5c\xba\x88wpF[\xd8\xb9\x5c'\xe2\x04\x0c\x1a\xff\xf7eL\xae\x01\x0di\x81C]\x8f%zX \xa4\x9bho[K\xc5:nm\xfe\xc1\x81\x1c\x11\xea\xcb\x91Q\x11@\x0c\x11kK\xf2Ot,\xf6\xc5\x96\xa6.\xa8\x03Z\xed\xf7/w\xf0\xd6\xf3])H\x1e\xd6_@\x86\x8a\xcb\xfcD\x93\xbf\xf3\xb9\xd6\xe7V\xab\x9b@\x9c&\xa9V{\x8b'\xdd~\xaa\x93\x8f\xb9\x8e\x0c\xfey\x05\xc5\xffv\xce\x10f&\xe4\xac\x8c\xeei\x0a\xa6\xeb\xea\xc6\x18\x9f\xff\xb0\x9f\xbd*\xb5\x97\xe5\xe6 ]R Vg\xf0)\x15\x04\xa3}\xaf!&\x0e\xc2\xa7/^\x9fqbD}\xc0b\x17xju\x90mZ\xf5D\xa0!=\xae\xa5\x92\x93B\x9f\x8a\xac\x96g\xd9\x93g\xe1\x99\x9ai v\xe4\xad\xd71\x9c\x94\x17Z\x90\xa5C*Bx\xd1\xc1])B\x8a\xa0\xe9\x94\x08)-\xb9\x06\xf2\x03\xd1%\xb7\xbc`\xb5\xa8\x8c\xdc\x92X+\x9dX:Bk\x81\xb41Z\x0f\x90\xee\xd0Z\x01\x0a\xe1A \xef\xb5@EW\xc0\xa1\x8eIu\xd0N\x81w\x9eL|\x8eK\x9c?\xb5\xd0\x1e83\x13\xf1\xce)H\x9c3\x9c\xd4\x0a\xa1\xd3.d\xda\xcfLC\xdaL\x11\x18\xee\xec\x1c|\xf8\xd1\xc7\xf8cs3\xa9QC\xaa\x5c\xc3J'\xb4\xb7\xf5\xb7\xfe\xccd\x84b-5\xc1\xa6=%U7\xf2\xf7\xcb\xdf\xad\xa3i\xdc\xc4\x9cx\xe2\xe9\xb7\x01\xbcO(\xd5\xa8\xb4\xa1\xb5\x16\x15\x81\xb4\xfc\x0f\xa9\xaa\x22XXy9\x89m\xa9a\x9dFh\xba\xf2\xbcO\xd8\x09\x05\xe7\xe3\xbe\x97\xf3\x93I\xed\x7f\xbfy\xf3\x03\x13\x0f\x9c|\xf2\x99%\x00\x03\xfc\xb7ko}\xed\xe2\xa680mO\x19\xfc\x95\x82U\x5c*m\x95.Z`f\x06\x85o\x81#4\x11\x94\xae\x95\xae#\xd0\xae\x9b\xbe\x05\x5c\xaa\xd0\xb6\x8d+t\x14\xe6f\xb1~7-\x03\x13\xc2Xa2\x03A\xc1\xa5\x91a\x02\x97\xff\x0bS\xfb\xebh;+pl\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x1f\xee\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x01\x00\x00\x00\x00\xac\x08\x06\x00\x00\x00\x94\xdbrG\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x1fkIDATx\x9c\xed\x9diTTG\x16\xc7\xffM\x03b# n\x90\xe0\x82Hb0\x88\x0a\x18\x15\x8c\x0b\xae\x89B\x00uT\xa2\x19\x1dw\x83Qg\x92\x89K\xce1~\xc8$\x131\xce1\x9a\x88\x12\x12\x8dQ$**\xb8\xe1.\xee\x8a\xb8\xa0\xa8\xd1\x88\xb2\x18V\x1b\x824\x0dt\xf7|\xe0\xbc\xe7k\xfaU/\xac\xd1w\x7f\x9f\xa0\xea\xbdz\xf5\xa0\xeb_\xf7\xde\xbaU-\x8b\x8e\x8e\xd6\x01\xc0\xeb\xaf\xbf\x0e\x82 \xa4\xc1\xbd{\xf7\x00\x00\xd6\x5cA\xe7\xce\x9d\x9b\xad3\x04A4-\x9c\x00X5s?\x08\x82hFH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00$\xcc\x96-[\xb0u\xeb\xd6\xe6\xee\x06\xd1\x8cX7w\x07\x88\xe6A\xa5Ra\xe5\xca\x95\x90\xcb\xe5\x980a\x02Z\xb4h\xd1\xdc]\x22\x9a\x01\xb2\x00$Jqq1\x00@\xa3\xd1\xa0\xbc\xbc\xbc\x99{C4\x17$\x00\x12\x85\x13\x00\x00x\xf6\xecY3\xf6\x84hNH\x00$\x8aP\x00\xc8\x02\x90.$\x00\x12\x85\x04\x80\x00H\x00$KQQ\x11\xff3\x09\x80t!\x01\x90(d\x01\x10\x00\x09\x80d\xa1 \x01\x90\x00H\x16\xa1\x00\xa8T\xaaf\xec\x09\xd1\x9c\x90\x00H\x14\x96\x05PPP\x80y\xf3\xe6\xe1\xe2\xc5\x8b\xcd\xd1-\xa2\x89\xa1L@\x89\xc2\x0a\x02\x1e>|\x18\xc9\xc9\xc9\xb0\xb6\xb6F\xbf~\xfd\x9a\xa3kD\x13B\x16\x80Da\x05\x01\xcb\xca\xca\x00\x00%%%M\xde'\xa2\xe9!\x01\x90 UUU\xfc@\x07\xc4\x05\xe0\xcf?\xffl\xf2~\x11M\x0f\x09\x80\x04\x11\xce\xfe\x80~\x0c\x80\x13\x80\xd2\xd2\xd2&\xed\x13\xd1<\x90\x00H\x10\xa1\xff\x0f\xe8\xaf\x02\xb0,\x80\x84\x84\x04L\x9b6\x8dr\x06^2H\x00$Hm\x0b@8\xa8\xb9\x81_[\x00~\xfa\xe9'\xa4\xa4\xa4\xe0\xdc\xb9s\x8d\xdfA\xa2\xc9 \x01\x90 \x9c\x00\xc8d2\x00\xe2.@ee%\xd4j5_\xce\x05\x05\x9f>}\xdaT\xdd$\x9a\x00\x12\x00\x09\xc2\x09@\x87\x0e\x1d\x00\x88\x07\x01\x01}+\x80\x8b\x09\xd4\xb6\x1e\x88\x17\x1b\x12\x00\x09\xc2\x0d\xe2\x8e\x1d;\x02`\x0b\x007\xe8\xb5Z-/\x06$\x00/\x17$\x00\x12\x84\x0b\x02v\xea\xd4\x09\x80\xbe\x00\x08\xdd\x01a<@\xab\xd5\xea\xddK\xbc\x1c\x90\x00H\x10n\x16wss\x03 \x1e\x03\x00\x9e[\x00\xc2%A\x8a\x01\xbc\x5c\x90\x00H\x10N\x008\x0b\xa0\xa2\xa2\x02@\xcd\xf9\x80\xc2%A\xce\x02\x10f\x05\xd6v\x01.^\xbc\x88\xfb\xf7\xef7j\x7f\x89\xc6\x83\xf6\x02H\x90\xda1\x00\xadV\x0b\x95J\x85\xca\xcaJ\xbd\xebL\x09@^^\x1e\xa6N\x9d\x0a777\x9c8q\xa2\xb1\xbbM4\x02d\x01H\x10\xe1*\x80\xad\xad-\x80\x9a8\x80\xd0\xfc\x07\x9e\x9b\xfe,\x01\xc8\xce\xce\x86F\xa3\xc1\xe3\xc7\x8f)A\xe8\x05\x85\x04@bh4\x1a~@\xb7i\xd3\x06\x0a\x85\x02\x80\xb8\x00\x88Y\x00\xe5\xe5\xe5\xbc\xcb\xf0\xc7\x1f\x7f\xf0\xe5999\x8d\xdao\xa2q \x01\x90\x18O\x9f>\x85N\xa7\x83\x5c.\x87\x93\x93\x93\xc5\x02\xc0\xb5\x01\x00\xf9\xf9\xf9|Yvvvcv\x9bh$H\x00$\x06g\xc2;99\xc1\xca\xca\xaaN\x02\xc0\xb5\x91\x97\x97\xc7\x97\x91\x00\xbc\x98\x90\x00H\x0cn\xf0\xb6i\xd3\x06\x00`oo\x0f\xa0f)\x90\x15\x03\xa8\xbd3\x90\xcb\x05\x10\xba\x00$\x00/&$\x00\x12\x83\x1b\xbcm\xdb\xb6\x05\x00\xb4l\xd9\x12@\xcd\x8e@N\x00lll\x00\x98\xb6\x00\xc8\x05x\xf1!\x01\x90\x18\xe6X\x00\xae\xae\xae\x00\x0c\x05\xc0\xc1\xc1\x01\xc0\xf3\x18\x80\xd0\x02\xc8\xca\xcaj\xec\xae\x13\x8d\x00\x09\x80\xc4\xa8-\x00\x9c\x05 \x8c\x01\xbc\xfa\xea\xab\x00\x9e\x9b\xfeJ\xa5\x12\x00\xe0\xe1\xe1\xa1\xd7\x86\xd0\x02\xa0U\x80\x17\x13\x12\x00\x89a\x8e\x05\xf0\xca+\xaf\x00xn\x01pB\xd0\xb5kW\xbe\x8d\xd2\xd2R\xa8T*XYYA.\x97C\xa9T\xf2\xf7+\x95J\xfc\xf0\xc3\x0f\xbcp\x10\x7f]H\x00$Fm\x01\xe0V\x01\x841\x00N\x00\xca\xca\xca\xa0\xd3\xe9x\x17\x80\xb3\x00\x8a\x8a\x8a\xf8\x15\x80v\xed\xda\xf1.\x03\x17\x07\x88\x8d\x8d\xc5\x7f\xfe\xf3\x1f|\xfe\xf9\xe7M\xf0FD}\xa0T\xe0f\xa6\xa0\xa0\xa0I\x9f\xc7\x0d\x5c.\x08(\xb6\x0c\xe8\xea\xea\x0a\x99L\x06\xadV\x8b\xd2\xd2R\xbe\xdc\xdd\xdd\x1d@\x8d\x88p\xed\xb8\xb8\xb8@\xa1P ''\x07\xd9\xd9\xd9x\xe3\x8d7p\xf9\xf2e\x00@rr2JKK\xe1\xe8\xe8\xc8?\xbf\xa8\xa8\x88\xdfY\x08\x00\xed\xdb\xb7o\xc4\xb7%LA\x02\xd0\xcc\xf4\xef\xdf\xbfY\x9e\xeb\xec\xec\x0c\xe0\xb9\x00\x08]\x00'''\xd8\xdb\xdb\xa3\xac\xac\x0c999\xd0\xe9t\xb0\xb6\xb6F\xe7\xce\x9d\x01\xd4\x04\x01\x85\x02\xc0\x0d\xf0\xec\xeclTWW\xe3\xc6\x8d\x1b\x00\x00\xb5Z\x8d\xa4\xa4$DDD@\xa7\xd3!22\x12\x87\x0e\x1d\xd2\xeb\xc7\x83\x07\x0f\x1a\xffe\x09&$\x00\xcd\x0c\x97\x8b\xdf\x94\x0c\x1b6\x0c\xfe\xfe\xfe\x00\xf4]\x00\xce\xe7o\xd5\xaa\x15\x1c\x1c\x1cPVV\xc6\x9b\xf5\x8e\x8e\x8e\xbc\xdbP\x5c\x5c\xcc\xaf\x00\xb8\xb8\xb8\xf0\xd6DVV\x16n\xdd\xba\xc5\xa7\x0a\x03\xc0\xce\x9d;\x11\x11\x11\x81\xc4\xc4D\x1c:t\x08VVV\xb0\xb6\xa6\x8f\xdd_\x05\xfaO43\x19\x19\x19\xcd\xfa|\xa1\x05\xc0\x9d\x0b\xc0\x09\xc0\x93'O\xf8\xe5='''~\xa0\x97\x94\x94 77\x17@\x8d\x00\x08c\x00\xa9\xa9\xa9\x00j,\x9b\xabW\xaf\xe2\xfa\xf5\xebHOO\xc7\xd7_\x7f\x0d\x00X\xb2d\x09f\xcc\x98\xd1t/H\x18\x85\x04@\xe2\x88\xc5\x008\x01\x00\x9e\x07\xf6\x9c\x9c\x9c`gg\x07\x85B\x81\xf2\xf2r\xdc\xbd{\x17@\x8d\x00p\xdb\x8a\xb3\xb3\xb3\xf9$\xa2\xa0\xa0 \xb4m\xdb\x16\xfb\xf7\xef\xc7\xec\xd9\xb3\x91\x97\x97\x07www\xfc\xfd\xef\x7fo\xd2\xf7#\x8cC\xab\x00\x12\x87%\x00B\xbf\x1e\xa8\x11\x00\xe0\xf9\xea\x01K\x008\x0b\xc0\xcf\xcf\x0f\xe3\xc6\x8d\x03\xf0<\xf0\xb8t\xe9R2\xff\xffb\xd0\x7fC\xe2\x88\xb9\x00\x0e\x0e\x0e\xbc\x05 t\x01\x80\x9a\xd5\x83\xec\xecl~\xff?\xe7\x02\xc8\xe5r\x94\x95\x95\xa1\xac\xac\x0c-Z\xb4\x80\xb7\xb77d2\x19\x5c\x5c\x5c\x90\x97\x97\x87\x80\x80\x00\x0c\x1f>\xbc\xa9_\x8f0\x01Y\x00\x12\x87K\x04***\x82F\xa3\xe1\xcb8\x01\xe02\xfcj[\x00\x1c...\x90\xcb\xe5|\xee\x00\x00\xf8\xf8\xf8\xc0\xda\xda\x1ar\xb9\x1c\xcb\x96-\x83\xb7\xb77V\xacX\xd1\xe8\xefBX\x0eY\x00\x12\x87K\x05\xe6\x12\x84lmmacc\xc3\xbb\x00\xdcL\xcf\xfd\xce\x05\x02\x01\xc0\xce\xce\x8e\x17\x06777\xde]\xf0\xf5\xf5\xe5\xaf\x19;v,\xc6\x8e\x1d\xdb\xc8oA\xd4\x15\xb2\x00$\x0eg\x01p\xc99\xadZ\xb5\x02\xf0|\xe3\x0f\x87\x98\x05\xc0}\xb1\x08\xf0\xfc|A\xa0\xc6\xff'^\x0cH\x00$\x0eg\x01pX\x22\x00\xdc\xf2\x1f\xa0/\x00B\x0b\x80\xf8kC\x02 q8\x0b\x80\xc3\x94\x00\xb4k\xd7\x8e/sqq\xe1\x7f\xe6\x04\xc0\xc3\xc3\x83\xcf2$\xfe\xfaP\x0c@\xe2\xc8\xe5r\xd8\xda\xda\xf2G\x82s\x03_\x98\xbf/\xfc]h\x01\x08\x05`\xf8\xf0\xe1\x18<x0\xc2\xc2\xc2\x1a\xbb\xcbD\x03B\x02@@\xa1P\xf0\x02\xc0\xb2\x00Z\xb7n\x0d\x80-\x00\x8e\x8e\x8e\x88\x8d\x8dm\xec\xae\x12\x0d\x0c\xb9\x00\x04\x9f\x0b\x00\x98v\x01\x84\xab\x00B\x01 ^LH\x00\x08Q\x01\xb0\xd4\x05 ^LH\x00\x08\x93\x16\x80\xb5\xb55\x7f\x0d\xb7\x1f\x00 \x01x\x19\xa0\x18\x00!*\x00\x0a\x85\x02r\xb9\x1c\x1a\x8d\x867\xff9\x16-Z\x84\xdc\xdc\x5c\xbd\xa5?\xe2\xc5\x84\x04\x80\x10\x15\x00\xa0\xc6\x0aP*\x95\x06\x02@\xdby_\x1e\xc8\x05 \xf4\x04@h\xfas?\xd7\x16\x00\xe2\xe5\x81\x04\x80\xd0K\x06\xaam\x01\x00\x86\x01A\xe2\xe5\x81\x04\x80\xd0K\x07\x16\x13\x00.\x07\x80x\xf9 \x01 LZ\x00\xe4\x02\xbc\xbc\x90\x00\x10&-\x00r\x01^^H\x00\x08\xa6\x05\xd0\xb3gO\x00\x80\x97\x97W\x93\xf7\x89h\x1ah\x19\x90`.\x03N\x9d:\x15\xa3G\x8f\xa6\x84\x9f\x97\x18\xb2\x00\x08\xde\x05\x90\xcb\xe5z\xee\x80\x95\x95\x15\x0d\xfe\x97\x1c\x12\x00\x82w\x01\x84\xb3?!\x0dH\x00\x08\xde\x05 \x01\x90\x1e$\x00\x04<==\xa1P(\xd0\xabW\xaf\xe6\xee\x0a\xd1\xc4P\x10\x90@\x9b6mp\xe9\xd2%\xfa\xd2\x0e\x09B\xffq\x02\x80\xe1\xe1\xa0\x844 \x17\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84i\xd2\xed\xc0\x19\x19\x19\xa8\xa8\xa80(\xef\xde\xbd\xbb\xde\xc1\x94\x96RQQ\x81\xec\xecldee\xa1\xa0\xa0\x00\x15\x15\x15P\xa9TP\xa9T\xd0j\xb5P(\x14P(\x14h\xdf\xbe=\xbau\xeb\x06www\xd8\xda\xda\xd6\xe7U\x00\x00\xd7\xaf_\x87V\xab5(\xef\xd9\xb3g\x83\xed\xad///\xc7\xa9S\xa7p\xe5\xca\x15\xdc\xbd{\x17%%%x\xf6\xec\x19\xec\xed\xed\xe1\xe0\xe0\x80N\x9d:\xa1W\xaf^\x08\x08\x08\x80\xbb\xbb\xbb\xc1\xfd\x85\x85\x85\xc8\xca\xca\x12m\xdb\xcd\xcd\x0d\x1d:t\xb0\xb8O\x1a\x8d\x067n\xdc\x10\xad\xeb\xd0\xa1\x03\xdc\xdc\xdcL\xb6\xa1\xd3\xe9\x90\x91\x91\x81\xb3g\xcf\xe2\xe6\xcd\x9b(,,Daa!lmm\xd1\xae];\xb8\xb8\xb8\xc0\xdf\xdf\x1f\x81\x81\x81x\xf5\xd5W-\xee#a\x1eM&\x00\x8f\x1e=BHH\x88\xe8\x80Y\xb4h\x11\x16,X`q\x9b\x19\x19\x19\x98>}:\x0a\x0a\x0a,\xba\xcf\xc6\xc6\x06\x01\x01\x01\x189r$BCCaggg\xf1\xb3/\x5c\xb8\x80\xf7\xdf\x7f_\xb4.**\x0aaaa\x16\xb7)\xa4\xb4\xb4\x14\xff\xfb\xdf\xff\xb0k\xd7.\x94\x95\x951\xaf\xbbx\xf1\x22v\xee\xdc\x09\x00\xf0\xf7\xf7\xc7\xc4\x89\x13\xf1\xce;\xef\xf0\xfb\xfbsss1~\xfcx\xd1{\xc7\x8c\x19\x83\xb5k\xd7Z\xdc\xb7K\x97.a\xca\x94)\xa2uk\xd6\xac1*\x00\x95\x95\x95\x88\x8f\x8fGtt4rss\x8d>\x87{\xaf>}\xfa\xe0\xc3\x0f?\xc4\xd0\xa1C-\xee+a\x9c&s\x01bbbD\x07?\x00l\xd9\xb2\x05j\xb5\xda\xe26+**,\x1e\xfc\x00PUU\x85S\xa7Na\xf9\xf2\xe5\x08\x0a\x0a\xc2\xaf\xbf\xfejq\x1b\xd1\xd1\xd1\xcc\xbaM\x9b6Y\xdc\x9e\x90\x1b7n`\xe4\xc8\x91\xd8\xbcy\xb3\xd1\xc1_\x9b+W\xae\xe0\x93O>\xc1\xd6\xad[\xf92\x1f\x1f\x1ft\xea\xd4I\xf4\xfa\x13'N\x88Zd\xa6HJJ\x12-o\xd9\xb2%\x86\x0f\x1f\xce\xbc\xef\xd2\xa5K\x08\x0a\x0a\xc2\x8a\x15+L\x0e~!iii\x989s&\xc6\x8f\x1fo\xd1}\x84i\x9aD\x00\x8a\x8a\x8a\xb0k\xd7.f}qq1v\xef\xde\xdd\x14]1 //\x0fK\x96,\xc1g\x9f}\x06\x8dFc\xd6=w\xee\xdc\xc1\xe9\xd3\xa7\x99\xf5w\xef\xde5Zo\x8c\xcc\xcc\xcc:Y5\x1cr\xb9\x1c!!!ze\xc1\xc1\xc1\xa2\xd7\x96\x97\x97[\xdcO\x8dF\x83\xc3\x87\x0f\x8b\xd6\x05\x05\x051]\xb9\x98\x98\x18L\x992\x05O\x9e<\xb1\xe8yB\xd2\xd2\xd2\x10\x1c\x1c\x8cs\xe7\xce\xd5\xb9\x0dB\x9f&\x11\x80\xcd\x9b7\x9b\x9c\xe1\x8dY\x08M\xc1\xf6\xed\xdb\x11\x13\x13c\xd6\xb5\xe6\xcc\xf0u\xb5\x02>\xfb\xec3(\x95\xca:\xdd\x0b\x00\x81\x81\x81\x06g\xf9\x8f\x1d;\x96y\xfd\x81\x03\x07,j\xff\xdc\xb9sx\xfa\xf4\xa9h]m\xe1\xe1\xd8\xb6m\x1b\xbe\xfc\xf2K\xb3\x05\xd6\x18J\xa5\x12\xb3g\xcf\xc6\xed\xdb\xb7\xeb\xdd\x16\xd1\x041\x80\xf2\xf2r\xfc\xf2\xcb/&\xaf\xcb\xcc\xcc\xc4\xb1c\xc70b\xc4\x88\x06yn\xa7N\x9d\xe0\xe0\xe0\x00\xb9\x5c\x8e\xd2\xd2R(\x95J\x94\x94\x94\x18\xbdg\xed\xda\xb5\x08\x0d\x0d5\xfae\x18O\x9e<Abb\xa2\xc9\xe7\x9f;w\x0e\xb7o\xdfF\x8f\x1e=\xcc\xeesjj*\xce\x9f?/Z'\x93\xc9\xe0\xef\xef\x0f///8;;\xa3\xa4\xa4\x04\x8f\x1f?\xc6\xe3\xc7\x8f\x91\x99\x99\x89\xea\xeaj\x00\x10\x8d=t\xef\xde\x1d\x9e\x9e\x9e\xb8\x7f\xff\xbeA\xdd\xf1\xe3\xc7\xa1V\xab\xd1\xa2E\x0b\xb3\xfa\xc8\x12\x0cGGG\x0c\x1e<\xd8\xa0\xfc\xd2\xa5KX\xb9r\xa5\xd16\xed\xec\xec\xd0\xabW/\xb4k\xd7\x0ej\xb5\x1a\xd9\xd9\xd9\xb8s\xe7\x0e\xf3z\x95J\x859s\xe6`\xef\xde\xbdh\xd3\xa6\x8dY\xfd&\xc4it\x01\xf8\xf5\xd7_\xcd\x9e\xd16m\xda\xd4`\x02\x90\x9c\x9cl\x10\xe9\xcf\xcc\xcc\xc4\xde\xbd{\xb1a\xc3\x06TVV\x1a\xdcSQQ\x81\x83\x07\x0fb\xda\xb4i\xccvccc\xcd\x9e\xc9bbb\xf0\xcd7\xdf\x98\xddg\xd6\xe0\xb2\xb3\xb3\xc3\xe6\xcd\x9b\xe1\xef\xef/Z\xff\xf4\xe9S\xec\xde\xbd\x1bIII\x189r\xa4\xe85\xc1\xc1\xc1X\xb3f\x8dA\xf9\xb3g\xcfp\xea\xd4)\xe6}B\xaa\xaa\xaa\x98\xe6\xff\xe8\xd1\xa3acc\xa3W\xa6\xd1h\xb0l\xd92^\x9cjcoo\x8f\x05\x0b\x16`\xf2\xe4\xc9\x06\xdfI\x90\x93\x93\x83o\xbf\xfd\x16;w\xee\x84N\xa73\xb8777\x17\xabW\xaf\xc6\x17_|a\xb2\xdf\x04\x9bFu\x01t:\x1d~\xfe\xf9g\x83r777\xf8\xf8\xf8\x18\x94\xa7\xa6\xa66\xaai\xe7\xee\xee\x8e\x85\x0b\x17\x22::\x1ar\xb9\x5c\xf4\x9a\x93'O2\xefW\xa9T|dZ\x88\xaf\xaf\xaf\xe8r\xda\x81\x03\x07PTTdv\xff\x1e?~,Z\xfe\xd6[o1\x07?\x008;;c\xc6\x8c\x19HHH`\xaeh\x84\x86\x86B&\x93\x89\xd6\x99c\xd1\x00\xc0\xa9S\xa7\x98VTxx\xb8A\xd9\xae]\xbb\xf0\xf0\xe1C\xd1\xeb]]]\x91\x98\x98\x88Y\xb3f\x89~!\x89\x9b\x9b\x1b\xbe\xfa\xea+\xacZ\xb5\x0aVV\xe2\x1f\xd3\x9d;w2\x978\x09\xf3hT\x018\x7f\xfe\xbc\xe8\x07 44\x14\x7f\xfb\xdb\xdfD\xef1\xc7]\xa8/\x83\x06\x0d\x82\xb7\xb7\xb7h]NN\x0e\xf3\xbe\xc4\xc4D\x94\x96\x96\x1a\x94\x8f\x1f?^t\x00TUUa\xc7\x8e\x1df\xf7K\xacm\xa0\xc6\x8d\xaa/\x1d;v\xc4\x80\x01\x03D\xeb\x8e\x1f?n\xd63XB\xd1\xa5K\x17\xf4\xed\xdb\xd7\xa0\xfc\xbb\xef\xbec\xb6\xb5~\xfdzt\xe9\xd2\xc5\xe43\xc3\xc2\xc20o\xde<\xd1\xba\xea\xeaj\xc4\xc6\xc6\x9al\x83`\xd3\xa8\x02 \x5c\x8e\xe2\x90\xc9d\x18?~<\x82\x83\x83E\xcf\xa2\xdf\xb7o\x1f\xfe\xfc\xf3\xcf\xc6\xec\x16\x000\xbf\x05\xa7\xb0\xb0\x90y\x8f\xd8\xfb(\x14\x0a\x8c\x1d;\x96)hqqqf\x077[\xb7n-Z\x9e\x96\x96\xd6 \x91\xef\x09\x13&\x88\x96WTT\xe0\xe8\xd1\xa3F\xef-//\xc7\xb1c\xc7D\xeb\xc6\x8d\x1bgPv\xef\xde=\xe6\xec<|\xf8p\xf4\xee\xdd\xdbDo\x9f\xb3`\xc1\x02\xbc\xf2\xca+\xa2u\xc7\x8f\x1f7\xbb\x1d\xc2\x90F\x13\x80\xbc\xbc<\xd1\x0fU\xbf~\xfd\xd0\xb9sg\xb4j\xd5\x0a\xef\xbc\xf3\x8eA}yy9\x12\x12\x12\x1a\xab[<\xc5\xc5\xc5\xa2\xe5\xac/\xc8\xb8~\xfd:n\xdd\xbaeP\xfe\xee\xbb\xef\xc2\xde\xde\x1e]\xbatA\xbf~\xfd\x0c\xeasrrp\xe2\xc4\x09\xb3\xfa\xe4\xe1\xe1!Z\xae\xd1h0s\xe6L|\xf3\xcd7\xf5Z!\x185j\x14\x1c\x1c\x1cD\xebXk\xfb\x1cG\x8e\x1c\x81J\xa52(\xb7\xb2\xb2\x12\xb5~X\xae\x94L&\xc3\xe2\xc5\x8bMwV\x80\x8d\x8d\x8d\xe83\x00 ;;\x1b\xbf\xff\xfe\xbbE\xed\x11\xcfi4\x01\x88\x8b\x8b\x13\x0d\x96\x09g\xca\x89\x13'\x8a\xde\xdb\xd8n\x80V\xab\xc5\xf5\xeb\xd7E\xeb\xda\xb5k'Z.6\xfb\x03\xfa\xefS_\xb7f\xcc\x981\xcc:\xb5Z\x8d\xf5\xeb\xd7# \x00\x91\x91\x918~\xfc\xb8\xc5\xcb\xa6-Z\xb4`\xe6\x04\x9c>}\xda\xa8\xb8\xec\xdb\xb7O\xb4< @tv\xbe|\xf9\xb2\xe8\xf5\x1d;v\xc4\x1bo\xbcaFo\xf5\x09\x0a\x0ab\xd6]\xbdz\xd5\xe2\xf6\x88\x1a\x1aE\x004\x1a\x8d\xa8\xef\xeb\xe8\xe8\x88\xd1\xa3G\xf3\xbf\xfb\xfb\xfb\xa3k\xd7\xae\x06\xd7\xdd\xbf\x7f\x1f\x17/^l\x8c\xaeA\xa7\xd3a\xe9\xd2\xa5L\xf3\xd4\xcb\xcb\xcb\xa0L\xa9Tb\xff\xfe\xfd\x06\xe5\xdd\xbau\x83\x9f\x9f\x1f\xff\xfb\xe8\xd1\xa3Eg\xd8\x94\x94\x14\xb3\x82U\xde\xde\xde\x985k\x96\xd1k\xd4j5\x0e\x1e<\x88Y\xb3f!((\x08111&\x977\x85\xb0\xdc\x80\xaa\xaa*\xd1w\x04j\xac\xa5\x94\x94\x14\xd1:1\xf3\x1f\x003\x91\x89\x95\x95h\x0aooo\xe6\xfe\x0d\x965G\x98\xa6Q\x04\xe0\xc4\x89\x13\xc8\xcb\xcb3(\x0f\x09\x091Xof} \xb7o\xdf^\xaf>\x14\x16\x16\xa2\xa0\xa0\x00YYY\xb8q\xe3\x06N\x9e<\x89\xa8\xa8(\x8c\x181B4\x92\xcf!\xb6\x0c\xb9g\xcf\x1e\xd1D\xa6\xda}\xb7\xb3\xb3\x13\x9da\xb5Z\xad\xd9\xc1\xc0O>\xf9D\xd4\x95\x10#++\x0b_~\xf9%\x02\x03\x03\xb1f\xcd\x1a\xe6r\x9b\x10\x1f\x1f\x1f\xbc\xfe\xfa\xeb\xa2u,\xd7+))I\xd4\x9aspp\xc0\xa8Q\xa3D\xefa\x0d\xca\x8e\x1d;\x9a\xec\xa3\x18\xd6\xd6\xd6\xcc5\x7fVb\x12a\x9aF\xc9\x03`\xe5\xd6\x8b\xf9q\xa1\xa1\xa1\x88\x8a\x8a20g\x93\x93\x93QZZ\x0aGG\xc7:\xf5\xe1\xed\xb7\xdf\xb6\xf8\x9e\xd7^{M4\x99EL0\xac\xac\xac\xf0\xde{\xef\x19\x94\x8f\x1b7\x0e\xdb\xb6m3(\xdf\xbd{7\x16/^\xcc\x5c~\xe4\x90\xcb\xe5\x88\x89\x89\xc1\xaaU\xab\xb0u\xebV\xb3\xcc|\x95J\x85u\xeb\xd6\xe1\xf8\xf1\xe3X\xbdz5s\x80sL\x9e<Y49'--\x0d\x99\x99\x99\x06\xbb\x0aY\xc2\x10\x16\x16\xc6L b\x0dJV0\xcf\x1c\xc4\x96\x0b\x01\xf6\xeaICRVV\x86\x13'N\xe0\xd2\xa5KP(\x14\xf0\xf3\xf3\xc3\xb0a\xc3L\xfe?9t:\x1d\xce\x9c9\x83\x8b\x17/\xa2\xb8\xb8\x18}\xfa\xf4\xc1\xb0a\xc3\xea\x9c\xc8TTT\x84\xb4\xb44\x14\x16\x16\xc2\xd7\xd7\xd7\xe4\xff\x9cE\x83[\x00EEE\xa2A\xaf.]\xba\x88F\xde]\x5c\x5c\x10\x18\x18hP\xaeV\xab\x99~gc`kk\x8b\xff\xfe\xf7\xbf\x06\xc9,\xb7o\xdfFFF\x86\xc1\xf5\x03\x06\x0c\x10]\xfb\xef\xdd\xbb\xb7\xa8[\x93\x97\x97\xc74\xa3k\xa3P(\xb0b\xc5\x0a\xc4\xc7\xc7\x9bm\x0dp}\x0d\x0f\x0f7\xe9n\x84\x85\x851\x83\x9d\xb5\x07\xfb\xc3\x87\x0f\x99[\x7f#\x22\x22\x98\xcf`\x0d\xd6\xfa\xcc\xd6\xac\x8dK\xf5\xd9Jn\x0eYYY\x08\x0f\x0f\xc7\xa2E\x8b\xb0m\xdb6\xc4\xc4\xc4`\xde\xbcy\x989s\xa6Y\xcb\xa7\x95\x95\x95X\xb4h\x11\xa6M\x9b\x86\xef\xbf\xff\x1e;v\xec\xc0\x92%K\x10\x12\x12\x82\xdf~\xfb\xcd\xec~\xe8t:\xec\xdb\xb7\x0f\xe1\xe1\xe1x\xeb\xad\xb70g\xce\x1c,_\xbe\x1c\x17.\x5c\xa8\xf3\xbb5\xb8\x00$$$\x88\x9a\x8bb\xb3%Ghh\xa8hy|||\x83\xf5\xcb\x18\x8e\x8e\x8e\xf8\xe9\xa7\x9fD\x05\x8ae\xcd\x18{\x1f\xd6V`K\xdf\xa7O\x9f>\xd8\xb6m\x1b\x8e\x1d;\x86\xd9\xb3g\x9b\xb5w_\xa5Ra\xc9\x92%\xa2\xd9s\x1c\x0e\x0e\x0e\xcc`\xe0\x9e={\xf4\xeee\xcd\xfe}\xfb\xf6\xc5k\xaf\xbd\xc6|F\xdb\xb6mE\xcb\xeb\x93\xb8\xc3\x12\x0f\xd6\xf2iC\xa0\xd1h\xf0\xfe\xfb\xef\xe3\xc1\x83\x07\x00j\x02\xa9\x5cb\xd2\xe9\xd3\xa7\xf1\xef\x7f\xff\xdbd\x1b+W\xae\xe4WYd2\x19\x9f\xac\xf5\xe4\xc9\x13DDD\x88\xae\xae\xd4\xa6\xb4\xb4\x143f\xcc\xc0\xe2\xc5\x8b\x0d\x02\xd8\xf5Y6op\x17\x80\xe5_\x17\x16\x16b\xc3\x86\x0d\xa2u,\x15\xbdu\xeb\x16222D\x03s\x0d\x81\xad\xad-&M\x9a\x84\xb9s\xe7\x8a\xe6\xffWVV2\xad\x90\x87\x0f\x1f2\xdf\x87\x15\x00;v\xec\x18\x8a\x8b\x8b-6\xfb\xdc\xdd\xdd\xf1\xe9\xa7\x9f\xe2\xe3\x8f?FJJ\x0a\xb6o\xdfnt\xdd\xfe\xc2\x85\x0bHLLdn\xce\x01jfo1A\xca\xce\xce\xc6\xf9\xf3\xe7\x11\x10\x10\x00\xadV\xcb\x14\x00c\xb3?\xc0\x16\x80\xec\xecl\xa3\xf7\xb1x\xf0\xe0\x01\x9e={&Z\xe7\xec\xec\xcc\xbcO\xa3\xd1\xf0\xb1\x11+++\x03\x0b\xaf\xb2\xb2\x92\x17<[[[\x83l\xc9\x83\x07\x0f\xf2\xc9a\xfe\xfe\xfe\xf8\xe1\x87\x1f\xa0T*\xf1\xde{\xefA\xa9T\xe2\xf0\xe1\xc3\xc8\xca\xcab\x067\x8b\x8a\x8a\xf8\x9d\xae\xf6\xf6\xf6\xd8\xbd{7\xdc\xdc\xdc0\x7f\xfe|\x9c>}\x1a\xc5\xc5\xc5HHH0\xfa\xf7\xac\xae\xae\xc6\xdc\xb9s\xf9\xc0x\xeb\xd6\xad\x11\x11\x11\x81!C\x86\xa0k\xd7\xae\xf5\xda\x0f\xd1\xa0\x02p\xe3\xc6\x0d\xa6I#\xe6\x17\x9bC||<V\xacXQ\x9fn\xf1X[[\xc3\xc3\xc3\x03=z\xf4\xc0\xd0\xa1C1d\xc8\x10\xa6\xa9\x0a\x00G\x8f\x1ee.\x8d}\xff\xfd\xf7\x16?\xbf\xba\xba\x1a\x09\x09\x09\x981c\x86\xc5\xf7\x025\xf1\x81!C\x86`\xc8\x90!HMM\xc5\xd2\xa5K\xf9\x99\xa96G\x8f\x1e5*\x00={\xf6D\xcf\x9e=q\xf3\xe6M\x83\xba\xb8\xb88\x04\x04\x04\xe0\xcc\x993\xa2\xfb\xef\x9d\x9d\x9dEs8\x84xyy\xe1\xcc\x993\x06\xe5\x8f\x1e=\xaa\x93\x08\x1aK\xf816A\xa4\xa5\xa5\xf1\xcb\xcd\xed\xdb\xb7\xd73\x97\x9f={\x06___TWW\xc3\xd6\xd6\x16\x97/_6\xf8<\x08\x83\xb7\x1f}\xf4\x11Z\xb5j\x85V\xadZ!\x22\x22\x02\xdf}\xf7\x1d\xb4Z-v\xee\xdc\xc9\xccm\xd8\xb3g\x0f\xbf\xefd\xe2\xc4\x89\xf0\xf4\xf4\x04\x00DFF\xf2[\xb1w\xec\xd8aT\x00\xb6m\xdb\xc6\x0f\xfe\xee\xdd\xbb#66\x16\xae\xae\xae\xcc\xeb-\xa1A]\x00c\xd1\xf5\xba\xb2o\xdf>\xd1\x8d;\xa6\x88\x8b\x8b\xc3\xae]\xbb\xb0o\xdf>\x1c9r\x04g\xcf\x9e\xc5\xad[\xb7p\xf0\xe0A\xac^\xbd\x1ac\xc7\x8e5:\xf8\x81\xc6y\x9f\xba\x1c>\x22\x86\x9f\x9f\x1f\xb6n\xdd\xca\x9ci\xef\xde\xbdk\xb2\x0d\xd6\x87.99\x19O\x9f>e\xba,\x13&L0\x98Ik\xc3\x0a\xc2VVV2-'\x16\xe5\xe5\xe5\xcc\xed\xd5\x8e\x8e\x8e\xa2\xfbJ8\xfc\xfc\xfc\xf8\xc1RPP\xa0\xb7#\xf2\xfc\xf9\xf3\xbcu\xc0\x9a\x0c\x84\xa9\xecB\x17Q\xf83k\xbf\x83\xb1\xfb\xbd\xbd\xbdyW\xc2\xd8\xfd\x00\xb0q\xe3F\x005\x16\xcc\x9c9s\x90\x9e\x9e\x8e[\xb7n\xd5\xe90\x97\xda4\x98\x00TVV\x9a\xbd\xa9\xc4\x12\x94J%3\x05\xd5\x18\xbdz\xf5B\xef\xde\xbd\xf1\xe6\x9bo\xc2\xc3\xc3\x03\xae\xae\xae\x16\x9d\xd3\x97\x9f\x9f/:\x83\xd5\x97\xdf~\xfbMt\xd6\xad\x0b\x1d:t\xc0\xb0a\xc3D\xeb\xcc\xc9\x0d\x08\x0e\x0e\x16\xcd[\xa8\xaa\xaa\xc2\xa6M\x9bD\xdd\x0c\x99L\x86\xc9\x93'\x9bl\xbbo\xdf\xbe\xcc@\xe3\xd6\xad[E\x97\x89Y\xfc\xfc\xf3\xcf\xccMU\x81\x81\x81F#\xf12\x99L/\xc1J\x18\xa0\x16\xfe,vf\x82F\xa3A~~>\x80\x1a\xf7@(\x10B\x0b\xc6\xd8!'\xc2:\xa1X\xb7h\xd1\x02\xf6\xf6\xf6\x00j,\x11\x96\x1f\x7f\xe7\xce\x1d\xbe\x0d\xadV\x8b\x7f\xfe\xf3\x9f\x983g\x0eBBB\xe0\xe3\xe3\x83/\xbe\xf8\xa2^{E\x1aL\x00N\x9c8\xd1h\xcb1{\xf6\xeci\x94v\x8d\x91\x98\x98\xd8 \x07X\x88\xd1\x90\xef\xc3\xea\xa39\xfb\xfb[\xb6l\xc9</p\xe3\xc6\x8d\xa8\xaa\xaa2(\x1f4h\x10:w\xeel\xb2m[[[ff\xa4Z\xad\xc6\xfc\xf9\xf3\xcd\x12\x81k\xd7\xaea\xfd\xfa\xf5\xccz\xd6\xb9\x8cB\x84\x83\x9b\x135\xadV\xcbO,\x0a\x85B4\xd3\xb0\xa4\xa4\x84\xff\xfb\xd6\x163\xe1\xaeKc;>\x85u\xb5wj\x0a\xdbd\xb5!v.\x02\x17\xa7\xd0h4\x88\x8d\x8d\xc5\x92%K\x98\xcf7E\x83\xc5\x00X\x1fj\x1f\x1f\x1f\x0c\x192\xc4\xac6\xae]\xbb&zD\xd5\xa9S\xa7\xa0T*\x1b5\xda[\x1b\xd6\xfb\x0c\x1c8\x10\xbe\xbe\xbef\xb5\x91\x92\x92\x82\xb4\xb44\x83\xf2\xa4\xa4$,[\xb6Lt\xe6Z\xb8p!\xec\xec\xec0e\xca\x14\xf4\xec\xd9\xd3h\xfb\xbf\xff\xfe;s\x7f\xbe\xb9\xeb\xed\xd3\xa6M\xc3\x96-[\x0c\x84\x84\xb5\x8a0s\xe6L\xb3\xda\x05\x80\xf9\xf3\xe7#>>^4\xca}\xed\xda5\x84\x84\x84`\xed\xda\xb5\xa2K\x9d\x1a\x8d\x06\xbf\xfc\xf2\x0b\xa2\xa2\xa2\x98\xc1\xbf~\xfd\xfa1w8\x0a\xe1\xceE\xcc\xca\xca\xc2\xd5\xabW\x91\x9f\x9f\x8fG\x8f\x1e\xf1\xc1\xda\xa1C\x87\x8aZ+\x8e\x8e\x8e\x90\xc9d\xd0\xe9t\x06\xe6\xb601\xccX<C\xf8\x99\xad\x9dL&l\x93\x15\xc8\x14\xae|\x84\x85\x85a\xe5\xca\x95\xa8\xaa\xaa\xc2\xd1\xa3G\xb1|\xf9rTWWc\xff\xfe\xfd\x88\x8c\x8c\xacS.@\x83\x08@II\x09s\xf3\xc7\xbcy\xf3\xcc:l\x02`\x9f\xa5WUU\x85\x03\x07\x0e\x98\x8c<7\x14\xf7\xef\xdfg\x9eK\xf0\xe9\xa7\x9f\x9a}\xca\x8f\xb7\xb77f\xcf\x9emP^XX\x88\xb3g\xcfb\xd0\xa0Az\xe5J\xa5\x12\x87\x0e\x1dBuu5v\xee\xdc\x09///\xf8\xfa\xfa\xa2G\x8f\x1eh\xdd\xba5\xec\xed\xedQYY\x89\xfc\xfc|\x5c\xbbv\x0dIIIL?\xb0\x7f\xff\xfef\xf5\xb1c\xc7\x8e\x185j\x94YG\x83yyy! \xc0\xacv\x81\x9a}\x15\x1f~\xf8!\xa2\xa2\xa2D\xeb\x0b\x0b\x0b\x11\x11\x11\x81\x1e=z\xa0\x7f\xff\xfepuuEEE\x05233q\xf2\xe4I\xa3)\xbe666\x16\xcd|\xc1\xc1\xc1|\xd0\xee\xe0\xc1\x83z~7kI\x94\xcb>,**\x82Z\xad\x86J\xa5\xe2\x85B80\x8d\x05\xe4\x84K\xb7\xc2{\xaa\xab\xabyaS(\x14prr\x12\xbd\x9fs\x13\x80\x9a\xe5[\xee\xf7\xf1\xe3\xc7#))\x89\xcf-\xb9{\xf7n\xf3\x09\xc0\xc1\x83\x07E\x03u\x0e\x0e\x0ef\xcf\xfe\x80\xf1\xa3\xab\xf6\xec\xd9\xd3d\x02\xb0w\xef^\xd1\xf2n\xdd\xbaYt\xc4\xd7\xa0A\x83\xe0\xe8\xe8(\xea\x1a\xed\xd9\xb3\xc7@\x00\xf6\xef\xdf\xaf\x97\xce\x9b\x91\x91!\x9a\x84d\x0a\xb1\x83A\x8d1c\xc6\x0c\xb3\x04\xa0.\xab\x17s\xe7\xceEzz:\x0e\x1d:\xc4\xbc\xe6\xf6\xed\xdb\x16\x1f\x04\xb3r\xe5J\xa3\xc1\xbf\xdaL\x9c8\x11\xd1\xd1\xd1\xd0h4HHH\xe0\x97\xf6\x5c]]\x8dn4rss\xe3\xcd\xf3\xf4\xf4t\xfe\xdc\x83\xf4\xf4t\xfe\x1ac.\x91\xf0\x88\xf4\xf4\xf4t~\xf5\xe4\xf6\xed\xdb\xbc\xd5el\x7f\x840u\xba\xf6\xaeG\xe1s\xebz\x9ef\x83\xc4\x00X\x03f\xd4\xa8Q\x16\x7f\x01\x07kG\xdc\xd5\xabW\xeb\xbc\x86l\x09:\x9d\x8e\xf9>\xac\x99\x82\x85\x8d\x8d\x0d\xf3\x88\xb3#G\x8e\x18\x04oX\xcf\xb5\x94\x7f\xfc\xe3\x1f\xcc\xad\xc5b\xf4\xee\xdd\xdb\xa4[\xe3\xe2\xe2b\xf1\xfb\x035\xfe\xea\xaaU\xab,\xb2\x1cL\xf1\xd1G\x1f1w\x92\xb2\xe8\xd8\xb1#\xff\xbf\xb8y\xf3&o]|\xf0\xc1\x07\xbc+\x96\x94\x94\x84\xd0\xd0P|\xfc\xf1\xc7\xfc\xec,\xdc\xec\xb4a\xc3\x06h4\x1a\x94\x94\x94 ..\x0e@\xcd\xfbq)\xee\xf7\xee\xdd\xc3\xf4\xe9\xd31u\xeaT~\xebxXX\x18\x1f\xed\x8f\x8f\x8fGAA\x01\xb4Z\xad\xde\x01\xb4\xdc3\xd4j5\x96.]\x8a\x90\x90\x10>w\xc0\xcb\xcb\x8b\x8f\x1d\x5c\xb8p\x81\x17\xca\x82\x82\x02$''\xf3m\xbc\xf9\xe6\x9b\x16\xfd=8\xea-\x00\xb9\xb9\xb9\xcc\xad\x9f\xa6\xd6\x8a\xc5x\xf7\xddwE\xcb\x8d\x0d\xcc\x86\xe4\xca\x95+\xccS\x81\xea\xf2>,A+//\xc7\x91#G\xf8\xdf\xb3\xb3\xb3\x91\x9a\x9ajq\xfb\xb5\x09\x0d\x0d5+;\xad6\xa6|\xfb\x0f>\xf8\xa0\xce\xdfv\xa4P(\xb0y\xf3f\xcc\x9f?\x9fy,\x99\xb9\xed\xac[\xb7\x0e\x0b\x17.\xac\xd3\xfd\xd3\xa7O7ho\xd2\xa4I\x00jV\xb1V\xacX\x81\x9b7o\x22!!\x81O\x80\x0a\x0f\x0f\xe7\xcd\xf3\x93'Ob\xc0\x80\x01\x08\x0c\x0c\xe4\x03\x98o\xbf\xfd6\xbf\xb6\xff\xed\xb7\xdf\xe2\xf4\xe9\xd38w\xee\x1c\x7f\xfeb\xe7\xce\x9d\xf9\x95\x9a\xe2\xe2b\x0c\x1e<\x18\x81\x81\x81\xfc\xceK{{{>X\x9a\x94\x94\x84\xf8\xf8x\xdc\xbau\x0b\x9f\x7f\xfe9\xca\xcb\xcb\xe1\xec\xec\x8c\xa9S\xa7\x02\xa8q\x1b&L\x98\x80I\x93&a\xd4\xa8Q|\x0cc\xf4\xe8\xd1|\x1f,\xa5\xde\x02\xb0w\xef^\xd1\x80\x91\x83\x83\x83h\x8e\xbf)<==\x99\xbeLS\x08\x00+\xf8\xe7\xe9\xe9Y\xa7?r`` 3x)|\x96\x9b\x9b\x1b\xd6\xad[\x07??\xbf:\x0d\x92v\xed\xda\xe1\xeb\xaf\xbfFTT\x14\xf3\x0c=c\x8c\x181\x82i\xca*\x14\x0a\xb3\x96\xfe\x8caee\x85\x7f\xfd\xeb_HLL\xc4\x981c,\xea\xa3B\xa1\xc0\xacY\xb3p\xec\xd8\xb1:\x890\x87\xbf\xbf\xbf^`U8\xb8\xad\xad\xad\xf5\xf6\x14p\x9b\xd0\x14\x0a\x05~\xfc\xf1G>\xd0WTT\xc4\x075{\xf5\xea\x85\xd5\xabW\x1b\xdcS\xfb\xe7\xaf\xbe\xfa\x8a\xdf6\xaeV\xab\xf9\xa5\xc5\xd6\xad[\xe3\xc7\x1f\x7f\xe4\x97b\x85\xf7\xb4l\xd9\x92\xb7\x9e###y\x11\xa9\xa8\xa8\xc0\xe5\xcb\x97\xf9e^\x7f\x7f\x7f\x93\xa7.\x1bC\x16\x1d\x1d\xad\x03`\xf4\x1b]\x8c\x91\x90\x90 z\x98e\x97.]\x989\xfe\xa68u\xea\x14\xae]\xbb&Z7c\xc6\x0c~=\xf6\x8f?\xfe`n\xb3\x8d\x8c\x8c4{\xa7\x96\x90\xcd\x9b7\x8bf\xff\xf5\xec\xd9\xd3\xa8\xafh\x8c\xc4\xc4D\xd1Sk\xac\xad\xadEg\xc5\x9c\x9c\x1c\xa4\xa4\xa4\xf0\x1b\x91\xf2\xf3\xf3Q\x5c\x5c\xcc\xbb\x0cr\xb9\x1c\x0e\x0e\x0e\xe8\xd6\xad\x1b\xbc\xbc\xbc0t\xe8P\xbc\xfd\xf6\xdbuz_!g\xcf\x9e\xc5\x95+W\x0c\xca===\x8d\x1eVR\x17rrrp\xf2\xe4I\x9c>}\x1a\x19\x19\x19(..\x86J\xa5\x82L&\x83\xa3\xa3#\xda\xb6m\x0b\x7f\x7f\x7f\x0c\x1c8\x10\x83\x06\x0db\x9edd)\xa9\xa9\xa9|~\xc7\xb8q\xe3\xf4|\xec\x8b\x17/\x22..\x0e\x1e\x1e\x1e\x983g\x8e\x9e\xfb\x9a\x93\x93\x83\xc4\xc4D\xa4\xa5\xa5\xc1\xde\xde\x1e~~~\x08\x0f\x0f\xd7[=\xc8\xcd\xcd\xc5\xa6M\x9bP]]\x8d\xd9\xb3g\xeb\xf9\xf6j\xb5\x1a\xbbw\xefFjj*JJJ\xd0\xa7O\x1f\x04\x07\x07\xeb]\xa3\xd1h\xb0q\xe3F\xdc\xb9s\x07\x13&L\xc0\xc0\x81\x03\xf5\xfa\x9e\x94\x94\x84\xd4\xd4T<x\xf0\x00\x9d:u\x82\xaf\xaf/BCC\xeb\xf4\x7f\xe7\x96C\xeb-\x00D\xd3QUU\x05\x8dFS\xa7\xef2|\x11P\xab\xd5\xb0\xb6\xb6\xae\xb7\x90\x11\xa6\xe1\x04\xa0I\xbf\x1d\x98\xa8\x1f666&Sp_d\xcc\xfdr\x12\xa2\xe1h\xb2/\x07%\x08\xe2\xaf\x07\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x18\x12\x00\x82\x900$\x00\x04!aH\x00\x08B\xc2\x90\x00\x10\x84\x84!\x01 \x08\x09C\x02@\x10\x12\x86\x04\x80 $\x0c\x09\x00AH\x98\xff\x03tx\xba\xd0\xfd\xc4\xac\xe9\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\x0d\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x01\xbb\x00\x00\x01\xbb\x01:\xec\xe3\xe2\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01\x8aIDATx\xda\xdd\x931K\x5cA\x14\x85\xcf\xbc7\xc3nPW\xc5d\xc5?\x91\xcaVH\x13B\x04\x8d\x8bU\xfe\x80\xe8?\x90 ha\x93\xc2\xc2Z\xd16e\x92&\x16F\xc5\x22\xe5j#\x08\x8b\x98\x14\xba\xb3\xef\xe5\xed*ogW\x9d\xf7\xf68<\xd0\xca(\x92B\xf0\xbb\x5c\xb80p8\x9c;W\x90\xc4\xff\xe0\xb9~Z\x01\x09\x87p`\x02y<\x86\xaf\xb8\xa0#\x13\xc0\x18\x16G\x87G?Yaa\x5c\xc5~\x0c\xe3\xbbI\x1a\xd4D\x00\x14\x00t\x89\xcco\x91E\xf4^\xf6\xa12RY\x010\x95\x09\xc0`I\xff\xd5\xd3\xa5R\xa9_\x9fkQ5Uh\xab\x11z!j\xac\x01\xaf\x00\xbcd\xe6\xb7\xa1#\xe2\xb7gp\x95\xce\xdff\xc0\x9f\xac\x97\x0f\xcbo\xb7v\xb6\xac\x92\x0a\xcaS\x90BBu\x14p\x05\xa0\xe5\xba\xee:\x00\x06j\xc54\x08\x83\x12gY\xbd\xc9 \x83?X\x16\x1f\xc4two\xf7Ja\xa8\xe0\xabT\xc1O}xM\x0f\x1dWh;#v0\x0d\x8e\xc39.\xd8\xcd;\xb7\xc0o\x5c\xdf\xd8\xddX\x8d\xebq*\x13\x09\xaf\xedA\x9eK \x02z\xa2B\xd2\xfcc\xbe\xa7\x0b\x97\x9f\xef]\xa3}mg\xb6\x7fm\xef\xb1\xc5T](\xb0N\xe4\xcerI'\xe2Q\x9b\xcd\xc9\x07\xff\x01\xe7\xc9\xf8$~\xb7w\xb0\xdf\x90-IF\xa4Jr\xc64\xe27to\x0f\x0a\xdc\x84\x1a\x06\xc1\xfb\x13}js*\x9f4\x1b\xadq.S\xe3\x0e\xee\xbd\x05\xf1QL\xe2\x85\xaf\xb8\x96|\xc1?x\x06\xc7t\x0d\x90$\xc3\xdbmt\x09\xd1\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x02\x0d\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x01\xbb\x00\x00\x01\xbb\x01:\xec\xe3\xe2\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x01\x8aIDATx\xda\xdd\x931K\x5cA\x14\x85\xcf\xbc7\xc3nPW\xc5d\xc5?\x91\xcaVH\x13B\x04\x8d\x8bU\xfe\x80\xe8?\x90 ha\x93\xc2\xc2Z\xd16e\x92&\x16F\xc5\x22\xe5j#\x08\x8b\x98\x14\xba\xb3\xef\xe5\xed*ogW\x9d\xf7\xf68<\xd0\xca(\x92B\xf0\xbb\x5c\xb80p8\x9c;W\x90\xc4\xff\xe0\xb9~Z\x01\x09\x87p`\x02y<\x86\xaf\xb8\xa0#\x13\xc0\x18\x16G\x87G?Yaa\x5c\xc5~\x0c\xe3\xbbI\x1a\xd4D\x00\x14\x00t\x89\xcco\x91E\xf4^\xf6\xa12RY\x010\x95\x09\xc0`I\xff\xd5\xd3\xa5R\xa9_\x9fkQ5Uh\xab\x11z!j\xac\x01\xaf\x00\xbcd\xe6\xb7\xa1#\xe2\xb7gp\x95\xce\xdff\xc0\x9f\xac\x97\x0f\xcbo\xb7v\xb6\xac\x92\x0a\xcaS\x90BBu\x14p\x05\xa0\xe5\xba\xee:\x00\x06j\xc54\x08\x83\x12gY\xbd\xc9 \x83?X\x16\x1f\xc4two\xf7Ja\xa8\xe0\xabT\xc1O}xM\x0f\x1dWh;#v0\x0d\x8e\xc39.\xd8\xcd;\xb7\xc0o\x5c\xdf\xd8\xddX\x8d\xebq*\x13\x09\xaf\xedA\x9eK \x02z\xa2B\xd2\xfcc\xbe\xa7\x0b\x97\x9f\xef]\xa3}mg\xb6\x7fm\xef\xb1\xc5T](\xb0N\xe4\xcerI'\xe2Q\x9b\xcd\xc9\x07\xff\x01\xe7\xc9\xf8$~\xb7w\xb0\xdf\x90-IF\xa4Jr\xc64\xe27to\x0f\x0a\xdc\x84\x1a\x06\xc1\xfb\x13}js*\x9f4\x1b\xadq.S\xe3\x0e\xee\xbd\x05\xf1QL\xe2\x85\xaf\xb8\x96|\xc1?x\x06\xc7t\x0d\x90$\xc3\xdbmt\x09\xd1\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03d\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x02\xe1IDATX\x85\xbd\xd7\xcf\x8bVU\x1c\xc7\xf1\xd7W\xa644\x1d'\xc7t2*t\x91$\xe5\xaa\x16\x05\xb5\x8e\x886\x91\xd0\x7f\xe0\xa6E-\xdae\x84\x8b\x82h\xd1\xa66Q\x84\xd5\xa2e\x9b\xc0\x16\x09ET\xa0\xa0\x8bI\xa4\xacI\xa2\x9er\x1c\x1b)!\xe7\xdb\xe2\x9c\xdb\xdc\x1e\xef\xcc<\xf3<O}\xe1r\xcf\xfd\x9e_\xef\xf3\xf9~\xef\xb9\xe7Ff\x1a\xd6\x22b\x022\xf3\xafa\xc7\xd80\xf4\xec\xc5\x8e\xe2\xc5Q\x06\x18\x15`'\xb6\x8c2\xc0\xc4\x88\x00\xd3\xb862@D<\x8d\xc3\xd8\x8bE\x9c\xa9\xf5\x1bp\x87\xb2\xd2\x93\x99\xf9h\x07\xc0\xe2(\x002SM\xc4\xa7\x90x\xb7\xf1\xb5\xea\x0e\xe2\xcb\x0e\xff\xb7\xf8\xa8\x96\xef\xc1\x96\xfe6k]\xed\x1c\xf8\xbe\xde\xaf\x9343O\xe1\x8d\x0e\xfeil\xae\xe5\xf7p\xffz\x05\x18(\x09#b\x7ff\xbe\xd5\xe7\xdb\xa4$`\x93\x847cr\xec\x00\x11\xb1\x07\xcfuTM\xd7{\x03\xb0\x15\xdb\x22bsD\x9c\x1e\x14\xa0\xeb-x(\x22\xde\xae\xe5I<\x80\x8fW\x00\xf8\xb5\x0f`\x12S8\x10\x11[3\xf3\xf20\x00\xa7p\xa4\x05\xf0\x04\xee\xech\xb7\x13\xe7\xb1\xb7\x86\xe3\x06l\xc3\xf6\x16\xe0P\x00\x8b\x99y\xbey\xa8r>\xdb\xd1n\x1a\xdf\xe1>e\xf5\x0d\xf0TS\x1f\x11\x0b\xd8\x95\x99g:\xfac\x80\x1c\xc8\xcck\x99\xf9\xca\x0a\x00su\x8c\x1d-\x80\xb6\x02\x8f\xe1\xcd\xd5\xc6\x1fe'lr`\x11\xbb\xab\xaf\x1d\x82\x1d\x98\xd1\x1d\xbe\x7f\xac\xad@\xd3pf\x1d\x00\xbd\x0a0\x83%}!\xc0\x1e\xec\x8e\x88\x8d\xab\x02\xd4\xad\xf8\xf9\xea{0\x22^\x8e\x88]\x03\x00\xb4\x15\xf8\xd9\xb2\x02\xf3\xb5\xfe6\x84\xb2\x9dwZ\xf3=?\x86ckL\xd8\x05\xd0V`\xae\xfa\xb6\xe3\x9b\x16\xc0o\xb8+\x22\x0e\xe2bf\x1e\xff\x17@D\x1c\xc2\xa1uN\x0e\x07\x94U7\x0a\xcca\x9f\x12\x82\xb3-\x80\xcf\x94\xf0\x1e\xc6\x1f8\x1e\x11\xaf*\x1f\xbe\x0f&\x94\xf7\xfe\xcf!\x00^\xcb\xccs\x11\xd1(\xf0\xb5\x12\x82)|\x8a{q\x0b\xbe\xa8\xe5}\xb8\x14\x11G\xf0\x08^\xc2\xecDf\xcebv\x08\x80\xc6\xae\xe0n\x5c\xac\x0b\xb9]Q`?~\xc19<\x83\x938\x81\x17\xf0pf\x9e`\xf4\x03\x09\xcb9p\x19\x97\x14\xb9\xcfb#.(\xbb\xe5\xadx\x07\xaf\xe3t39\xa3\x1f\xc9\x1a\x80\x9b*\xc0\x026\xe1\x07\x5c\xc5\x8f\x15\x00>\xcf\xcc\x9f2\xf3\xfdv\xe7q\x01\xc0\xef\x8a\x02K\x15\xa4\x87\x0b\x99\xd9\xc3\x87J2^g\xe3\x0a\x01\xcb\x0a,d\xe6RD\xf4\x94\x10\xc8\xcc'W\xea<N\x05\x9a\x1c\x98\xaf\xcf=%\x04\xab\xda\x7f\xa1@\x03\xf0\xb8\x01N\xcc\xe3\x04hr`\x1e2\xf3\xea \x9d\xc7\x1d\x82\xb6\x02\x03\xd9\xb8\x15\xf8J\xd9\x9e\xffw\x80+\x99\xb9\x84O\xd6\xdby\x5c!X\xf3\xec\xb7\x92\xc5(\xbf\xe7\x10\x11\x81\x1b\x07M\xba~\xfb\x1b\x0f\x7f(iE\xd8\xf4\x5c\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\xef\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x04lIDATx\xda\xc5V\xdfo\x1bE\x10\x9e\xfbm\xc7vb\xb7v\x13\x0a\x11\x22\x05\xa9\xa2\xef\xbc \xf1\x14\xde\xf87\x80g\xd4\x97\x0a\x89\xf2\x04\xadT\xf8\x7f\x90\x8a\x04\xaa*\xb5oE4\x84\x22\x88\xd4\x22L\x8c\x93\xdaq\xb1\xef\xec\xbb\xdb[fF\x1e\xfbzw\xe6B\xa4\xd2M\xd63\xbbw7\xf3\xed7\xdf\xee\x9d\xad\xb5\x86\x97\xd9\xec\xb95\xb0\xeb\x97\x09@_\xfd\xe4\xea\xedD\xab]\xb9\xf0\xce\xbb\xef\xc1\xce\x9b\x97\xc1\xb1,\x80\x0cK\x86\x81x\xb1\x9b\xa6\x01\xfcgH\xe7\x8bl\xf1\x12\xcf\xc5Q\x0c{?\xfd\x08\xf7\xef\xde\x01i\xad\xd6\xb9o\xaf\x7f~\xfd\xfd4\x00\xf0\xfd\xc9\xee\xc3\xbd\x87\xb0\xb9\xb9\x05\x17:\x1d8\xdf\xb9\x00\xdb\xdb\xaf\x83\xe7:\x85\xc8\x8d\xf9\x0fYN\x9e\xf5\x19\x84\x01\xfe,\x84~\xff/\x98\x06S\x906H\x9e\xeef\x19\x80$I\xa0V\xab\x81c\xdb\xec\x9b\xa0y\x85\xbf\xf7O\x8a\x92g\x18y~`\xa4\x00\xbe\xd6n\xa0\xd1\x1c3\x9d\xab\x10@s\xa3\x09\x9e\xe7-n\xb0,\x03\xdez\xb5\xc5\xbe\xce&\xe7D\xe2\x8a\xe59a\x80\xc7\xc14\xa4K\xe5\x00h74\x1a\x0d\xf1\xf9a\x0b\x19pm\x0bV\xd5`5\x00\x89\x89\xc9\xa8C\xc21S\xb9\x0a\x19\xc8\xc4\xd7\x08\x00o\xb0%\x9a\x5c(.\x83\x9e\x07\xa6\x84\x1a\x7f\xd0\xe5q\xac\x14\x8f\xd3\xf1\x95R\xe5\x00\x12\x8d\xb8\xf1\xc1\x9f\x1f\x1fCQ\xd3\x99\x91\xd6\xe21\x12\xb2<\xd8:W\x03\x95\xe4JP\x0e\x80\x86\x11>\xb9\xbd\xb9\xfe\xbc\xc84\xff\xe7\xe8$\xa3\x19\xc8\x82\x0d\xee\xb3P\xd1\x8aO\xa7\x81tSq\x021u+\x0d\xac\x00\x88\xac^\x92\xb2]\xfa1.Bi\xfd\xdf5\x10'\x0a\x22\xda\x8e\x91Z]\x02Y\xbd\xb89\x06\x805\xa0\x12}\x86\x12\xe0\xd3Q\xac\xa0w<\x96\xa92-H\xed\xc5\xe5\xd6Z\xf7\x90MU.B\x9d\x01\xa0pL\xf45\x1b\x15\xd9W\xf9D\x99d<\xbb`\x80\x1d\x8a\xc1\xc9\xd3\xf1u!\x039\x0dh\x16\xa1Q\xb2t\x9d\x06%\xe3\xa5\x06\xc8a\x0d\xa4\xe3'\xa7\xd1\xc0\xbd\xbb\xdfC\xaf\xd7\x83\xc0\xf7K\xe8/\x16\xa60S][\x83'\xbf=\xcaj\xa0\xbc\x04O~\xfd\x85z\xfa\x0d\x98\xb5\xc5\xa0V+\xbe\xa4\x048\xf9\x82[\xc9I\xf8\xff}\x19\x15k@\xbfX\x06\xce^\x82\x0f?\xfe\x08\xde\xd8\xd9)\xaa\xe5\xca:\x0f\x06\x03\xf8\xf4\xda50M\x13\xe3j\xde\xd2\x9b\x9d\xce\xbf\x8b0\x8e\xe3\x5c`\xaf\xe2\xb1\x8a\xc7\xe3\xb1\x08/+0\x9a\x93.s\xfcaS\xa9T\xc0\xb2,\xde\x09\xb3\xd9,\x1f_\x00\x14\x09CZ\xfa\x04\xbb\xf1\xc5\x97t\xac\xca\xe7\x1b\xdc\xb8y\x93V(\xa0\xe8>\xb2\xcb\x17\x0f\x832\x89o\xfa\xb2\xa2\xf93\x00\x98\x07\xa5\xf6\xf4\xf8\x18\xa4\x0dON(\x09\x01\xa0\x95\x91\xcf\xcfK\xd7\xcb\x17\x82\x98S\x00(\xa0(\x9cN)x\x16 QK\xf3D-\xdb(\x8ad\xe5\xc2\x02g\x150\x9a\x80!#\xd5j\x15j\xf5:\x0c\x07\x83\x02\x0d\x14 \xb4l\xbb\x10@\x14G\x10\x04\x81$ +L\x90\xe5\x1e\x22({\x0e4\xa6\x18t\xaf\x01\x10\xe0\xa2\xa28>\x1d\x03\xe0\xba\x1c|\x8a\x0f\xc5q$\xe5 +\xc2\xa2\xd5K\x09$9_\x8fBd\x87E\xa8\xf9\x85d\xa0\xb5M\x0b\x9e\x8dF\xd0X_/\x02\xa0z`\xc0V\xf6\xc4\x0a\xfc\x00\x1c\xc7\x811Z\x7f2a]4\x9bM\x14\xa2/I\x85\xfe\x85\x0d\xc3\x10\x81\xd1|\xb2\x10\xa89\xdf\x9e\xedv\x1b\xfa\xfd>\x18\xd846\x02\xc0\x83\xcb\x97.}P\xa9\xd6\xbe\xc1R\xb5\xd3\xfbU\xe8\xbe\xf5\xd5-\x01E\x9d\xb6&\x05\xcf\x89P\xfc(\x0cY+\x02\xc02M\x9e\xefv\xbb,^lUL\x1b\x08\x03\xc6\xa3\x83\x83?766\xbe\xde~\xe5\xe2gx\xc3\x1a\x8b0\x0a9\x91=\xd7Bj\xc5D\xbd\xd0\x9f>\x8c(\xb8h*]2\xd1\x90\x8cC4\xe7\xb1w\x05\x00\xdd\xf9\xf7h4\xfa\x0e\x95\xdf\xf2<\xef\x22\x11\xf0\xf6\x95+\xd6\x0f\x0f\x1e\xec\xb8\xae\xeb\xb9\x8e\xe3\xa2(]\x04\xe3a\x12\x87\xca'\x87\x80\xe6/8\x15\x22\x980\xc26\x1c\x0e}\xdb\xb1\x0f\x0e\x0f\x0fgx\xd5X\xe6`\xba\x15:\xf7\xd0\x9ba\xe7\x12p\x0c\x8c\xe5\xa3\xbb\x17\xccf\x8f\xb1;4\xfdG\xb7\x9bT\xaa\xd5\x10O\xb5D\xd0#K\x80@\x89\x15\x8d\xc04\xd5\xb5^\xaf\x1bx\xfa\x19\x84\xe7\x04\xcf\x88\xfd\xfd\xfd\xe4\xe8\xe8\xc8\x9dL&\x14\xc7\xcc\x7f5p\xf2g\x94\xf7\x1f\xdf\x9a\xd2\x93\xfbC\xb7\xa7\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x07r\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x09pHYs\x00\x00\xddu\x00\x00\xddu\x01\xac\x87\xc3\x83\x00\x00\x00\x07tIME\x07\xd9\x0a\x1f\x0c47\xb1\xa2\x17\xb3\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x06\xf2IDATX\xc3\xbdW\x09L\x94W\x10v\xb1\xd8V+-WWm\xb1RK\xa3\x824\x96\xc3\xe5\x06QQSQ)\x8a\xf1l\xac5\xd6\x00\xc6\xa3E\x05%\x02A\x13H\xf1@\x01\xeb\xc1\xb1\xab\xa0 \x94\xb5\xa0\x5c\x82\xa0\xcb\x15\x10\xb0\x12\x17\xd8e\x97\x95E9\x14pq-\xf2\xa63[ \xadb\xa3f\xe9\x9f|\xf9\xdf\xce\xcc\xdby\xef\xcd\x9bo\xe6\x1f3\xe6\x7f|\x5c\x5c\x5cF\xdf\xc9\xb4i\xd38\xf8z\xff\xdc\xb9s\xbfTVVt466Bss\xb3Z,\xbe\xf7{dd\x84%\xea8\xa3\xbd\x86\x89\x17/^\x94\xb6\xb6\xb6\xc2\xc3\xf6\x87\xd0\xd5\xd5\xa5\xc1S\xf5S\xa6T\xb6\xc2\x8a\x15+\x9cGs\x11\xef\x04\x04\x04\x1ckQ\xb4@G{'\xf4>y\x02j\xb5\x1a\x8a\x8b\x8b\xc1\xcf\xcf\xaf\xd9\xc4\xc4$\x0em\xcc\x10:\xa3\xb5\x80\x0f\xe2\xe2\xe2\x1a\xbb\x1euAoo\x0f<\x7f\xde\x0f\x0a\x85\xe2\x09\xcaw!h\xe7&\x88q\xa3y\xfcS\xae\x5c\xb9\x82G\xde\x09\xdd\xdd\xdd\xa0\xeaS\xc1\xdd\xbbw\xc5(\x9fJ\xa73\xda\xb1\xe7\xb8\xbb\xbb{\x15\xdd(\x84\xda\xdaZ\xe8\xe8h\x87v\x04\xc6}\x80v\xee\xe0`\xaf1200\x18\xb5\x05\x8c\xdb\xbd{\xf7\xf1\x9c\xdckP^^\x0e\xf2\x169H\x9b\xa5 \x95IYr\xca\x85\x07nn.3(\xf6k\xd7\xaf\xb1LHLH\xdb\xb5k\xa7?\xfe\x1e\xab\xcd\x05L8\x19s\xf2\x8f\xa2\xa2B\xd6\xd6\xd6\x06\xa2R\x11`\xeaA]]\x0d\xdc\xbcY\xcc\xa2O\x1c\x1f())\xbeUQQ\x09\xb1\xb1\xb10g\xce\x9c\x0d\xda\x0e\xcb\x8c\xf4\x8c4H\x12\x5c\x80\x0b)\x17\xe0\x87\xad[ \xfc\xd0!(-\x17\x01\x85%>\xe1\x1cTUU\xc1\xa5K\x97\xd8\x8e\x9d;:\xd1~\xa66\xb3A7<<\xfc\x88T\xda\x04\x1e\x8b\x97\xc0\x8d\x92B\xc8\xbb\x9e\x033f\xcd\x84\xb2\xb2[\x90\x90\x18\x0f1\xb11\x90\x97\x97\xc7\x22\x22\x22\xc0\xcc\xec\x8b`\x9c\xf3\x91\xb6\x9c\xeb\xac^\xbdzA}}=0|RSS\xe1\xdb\x95^Ps\xe76\xf8\xac]\x03\xc9)\xc9p \xf8\x00\xf8o\xf7\x87\xe0\xe0`\xb0\xb2\xb2J\xc19_jk\xf7:\x9b6mZ\x22\x97\xcbA \x10\xb0\x9a\x9a\x1a\x88\x89\x89\x81\x85\x8b\x16\xb1}A\xfb\xe1\xe8\xd1\xa3\xb0m\xdb\x8f\xcf\xbd\xbd\xbd\xe5X\x07\x8a\xb9\x5cn(\xce\xb1\xa0\x13\xd3\x8as\xbc\xf5>D\xb9\xf8f\xd66_C`` \x8b\x8e\x8e\x86\xad[\xb7*\xec\xed\xed\x7f\xd3\xd3\xd3\xdb\x8fv\xde\x08\xc7\xc1\x98\x1bi\xeb\xe2q\xc2\xc2\xc2\xb6)\x95J\xd8\xb0a\x03srq\x04W7\x17&\x14\x0a\x81\xc7\xe3\x09P?\x1f1}0\xce\xefi\x9bz9\xc7\x8e\x1d\x0b\xbe\x7f\xff>,[\xee\xc9\x5c\xe79\xc3\xec\xd9\x16,77\x17\x5c]]\xf9\xa87'N8}:ax\x02\x00h\xcf\xf9\xd9\xb3gOby\x05\x8fE\x0b\xd1\xb9\x0b|f:\x15rrr\xc0\xd3\xd3S8\x18\xdf\xe1\xdb-\x91\xc8*\x9b\x9a\x9a+\xff\xc9\x15o\xe5\xd5\xcb\xcb\x8b^c\xb1\xcc\xa6\x89\xc5b<nWpC\xe7\xdcI\x5c\xc8\xcc\xcc\x84\xcd\x9b7\x8bPo\xc5\xe7\xa7\xac\xc1\xddBh\xe8!/\x13\x93)\xa6\x0a\x85\xb2\x9b@c\x94\xad ]F\x86p\xe5\x9b6\x18\xf4z\x17\x8bL\x11q\xbc\x93\xb3#\xb8\xb89\x83\xa1\xa1\x01\xf0\xf9I\xb0w\xef\x1e*6\x0ed\x83\xf8\xaa\xb7W\x85%\xf8\x19\x83\x17\x1e\x92\x91\x8el^\xdb\xb9\xa9\xa9)5\x0ez\x05\x05\x05\xb5\x22\x91\x08\x1c\x9c\x1c\xc0\x19/\x9c\xbe\x81>D\x1d\x89bQQQ\xed\xa8_\x9c\x92\x92\xbaE 8\xef\xc1\xe7'\xfb\xaaTj,\xc3\x0c\xea\xeb\x1b -M\xa8\x01\x8dIF:\x81 \xd9\x8flss\x0b\xd7\xff\xa7s\xe4jr\xceE\xc7\xb2\xdc\xdc<pp\xb4\x07G'{00\xd4\x07l:Xbb\xe2S\xd4\xafA\x18\xca\xe5m\x7f\xf6\xf5\xa9\xe1\xd1\xa3^\xe8\xe9\xe9cG\x8e\x9cb\xbe\xbe\xfb\xc0\xd7\xf7g9BFc\x92\x91\x8elh!2Y[\xff+S\xd2\xc9\xc9\x89\x839<\xad\xba\xba\xba###\x83\xd9;\xda\x01\xc1\xc8\xc8\x106\xae\xdfH\xe9\xc6\xb0\xacR53\xb6\xb5\xe5Q\x8a9\xd6\xd4\xdcS\xb5\xb7wCRR*\xdb\xbb7\xb4\xdb\xdb{\xb9\xd9\xd0\xff\xd1\x98d\xa4#\x1b\xb2\xa59>>k9#\xc5\x5c\xc7\xc2\xc2\xc2\x1c\x1b\x09\x15\x9f\xcfg\xb4k;\x07\x1e\x18\x1b\x1b\xc1\xc2\x85\x1e\xac\xa8\xa8\x08\xcc\xcd\xcd\xc3\xd0\xf4S\x1b\x1b\xde\xd0\x1f\x18\xc8d\x0fA*U@dd,l\xdf\xee7\xd7\xcf\xcf\x7f\xf8?iL2\xd2\x91\x0d\xd9\xd2\x9c\x97\x9c\x8f\x1f?^\x07\x9b\x8a\xb9MMM\xfdH\xa9\xcc\xd1\xd9\x01xv\xb6`\xc45&\x0e\x87\xd2\xd2RX\xb0`\xc1\xaf\x83$\xa3!\x97\xabWo\x96\x8b\xc5- \x91(\xd9\x9d;\x0d\x10\x17\x17\xff\xca\xa4'\x1d\xd9H\xa5JFs\x84\xc2\x82\x1b\xff\xa2V\xe4lW\xa9T\x0a\x87\x0f\x873'tn\xcb\xb3\xd1\xec|\xfa\xe7\xd35M\x06\xf2\xfe\x95\xc1\x5c\x1f\x8e\xdd\xbe}\x11\x17CB\xa2\xe5\x02A\x1ede\xdd\x82\xf4\xf4\xecW. ##[c\xc3\xe7\xe7AH\xc8\x09\xf9\x9e=\x87\x934\x0a}}}**\xee\xc4n\x7f\xd7\xecT\x98e>K\x13s\xee\xc7\x5c,\xa9e\x10\x14\x14T\x81\xa66#4\x95\x14\x06C\x81 \x07o|!~\x03(\x01+\xe2\xf7/:'\x19\xee\x5cc\xc3\xe7_\xa3E\x1a\x0e\xb7\xe7\x93'O6!\x82\xb1\xb3\xb3\xa3R\x19\xe4\xef\xef_\x8b\xf1\x07]]]())\xa1\xaa&\xa1\x0f\x9bA^\x7f\xe99u*~]zz\xe1\xb3\xec\xecR\xec\x82\xe4L\xa1xH\xad\xf8\x8e\xe0\xe0\x80\x09\x04\x1a\x93\xec\xde=9\xcb\xca\x12\xe1\x22\xae?\x8b\x8bKX74\x7f\xec\x993g~\xc2\x9b-\xa5\xec\x1b\xacXK\xb2\xb2\xb2\x88b)\xdd(\xd7\xbf\xa1\x96\xfbU\xcd\xc8\xf9\xf3\xd9=Ba\x09\x14\x14T\xf4\x97\x96\xd6\xb1\x96\x96\x07H:j \xa7\x84\x9e\x1e5\x90L$\xaac\xf9\xf9e\xfd\x99\x99\xc5\xc8\x07Y=CeY7$$$0??\x9f\x1c\x19L\x9c8\x91\x8e\xc5\x9cv\x9e\x96\x96\xd6\x8bc\x1f\x0e\x87\xf3\xe1\x7fq\xc6\xaaU\xeb\xe7\xe1\xcb\xd2\xda\x9a\xe7\x85\x97l\xa0\xb1Q\x8e7\xfd>tvvj@\xe3\xc6F\x19\xf6\x87\xe2\x01++\x1e\xf1\xba\xa5\xb7\xf7\xbay\xc3'@9I\x05%;;\x9b\x8f\xdc>\x1fQ\x89E\xa7\x0fC\xf0\x1d:7|]\xe6\x0c\x0b\x0bw\xc5\x9dBuu\xcdc\x89D\xd2\xa7R\xa9\x80\xd0\xd4$\xe9\xab\xaa\xba\xfd\x18\x09\x0b\xc8f\xa4\xb9\x93<<<\x0e`\xb1\xe9\xc3]\xc3\xc1\x83\x07\x1b\xd0\xf9:tn\xfc&u\xc3\xd7w\xc7\xfc\xca\xca\xdbJkkk7\xbad\x0d\x0d\x0d\x9d\x04\x1a\x93\xac\xa2\xa2JI6#v7\xe8\xec\x13|\xbb#\x96!l\xf1\xf7\xdb6\x8d\xe3<=\x97in\xf7\xe5\xcb\x97\x93\x084^\xba\xd4\x933\xd2g\xd9_\xb3\x07E\x0c}\xcc\x1c\xcc\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x06d\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x05\xe1IDATx\xda\xc5W[lTE\x18\xfe\xe6\xdcv\xb7\xbb\xddv\xb1-\xb4\x05\xa5\x11\xd1\x8a\x82HI\x04\x1e\x00\xc5H4\x12\x89\x11\x02<`\x8c\x06\xf1\x12\xa5\xf2\xe0\x0bb\x13\x8d\x09\x09\x98(1\xe1\x92(>\x11\x8c\xe2-\x12.\xb14b#\xa1\x11\x12.*\x88\x94;\x94\x16\xe9\xeevw\xcfm\xc6\x7fz&)\xcb\xeev\x0b/~\xc9\xb73;;s\xbe\xef\xbf\xe4\x9c\xb3L\x08\x81\xdb\x00#j\x8aLQB(\xf2\xa1qd00\x02(A\x93\x18!V\x12c\xc4\x0a\xb5\xc6\x88\x1e1KL\x13\x93\xc4\x0c\xd1\x19\x89\x11c\x04\x11[J\xb0f\xeb\xee\xa3\xb3O\x9c\xbb\xbeP\xd3\xf4\xc9\x95\x15V\xbc2\x12\x0ak\x1aC\xc6v\xedT\xd6M\x09\xc1O47&v.\x9f\xd7\xbc\x1f@\x8f2c\x13K\xa6y\xb8\x12h*\xe2\x9a\xd6M\xed/\xf5\xa5\x9d\x95\x93\x9a\xeaj\xeb\x13U0t\x9d\xa8\xc144h`\xe0\x22\xa8\xc0@\xceEO2\x8d\x93\xe7\xfb\xaeWG\xcd\xcd\x1f\xbd8k\xab2\x92!\xfa#6\xa0\xc4+\xd6\x7f}p\xda\xbe\xc3\xe7\xb7L\xbc\xbb\xf6\xbe\xa9\x13\xc6\x81\xfb\x0c}i\x1b\xa5<KC\x89\x98\x09&8N\x5c\xba\x86S\xe7z\xcf<\xf1p\xe3\xab\xaf/\x98zP\x95\xc7\x1f\x89\x01F\x8c,i\xfb\xe6\xd9\x8b\xc9\xdc\xe69\x8fN\x8c\x8f\xabM\xe0\xea\xbf6n\x07c\x12\x16\xae\xf6\x0f\xe0\xd7c\xdd\xe9\xc6x\xe8\xb5-\xef\xcc\xdf\xa92\xc1\xcb\x190\xe7\xbc\xf5\xc5\xd3\x0e\x8c\xaf\x9e\x9b=\xc5\xf4\xb9A5\xf6p'\xa8\xb0\x0cD\xc3\xc0\xb7\x9d'\xdc\xa8\xc6\x97\xfd\xf0\xe1\x0b\xdf\x03\xb0\x873\xc0\xa6\xbd\xbc\xb1\xf9r\xd2;\xb0l\xfe\xcc\x84\xa6Y(\x85\x09\xf5\x95\x98\xf7\xc8\x18H\xec;r\x05\x7f_N\xa1\x14b\x11\xe0\xcb\xdd\x87n4\xd5D\xe7\xecY\xb7\xf4\xe8\xcdY\xd0p\x13\xda\xda\xda\x18u\xf9\xb6\xc9\x13\xc7'\x98f\xc2\xf1\xfc\xa2\xcc9\x0e\x9e\x9f9\x16M\xa3\xa3\x92r.\xd7J\xee\x1f\xc8\x09\xcc\x98\xd4T\xddu\xf2\xf2\xe7\x1d\x1d\x1dJ\xb3\x88\x81\xf7\xf7d\x97TVV\xb6475\xd2!\x8f\x0e\xf3\xa2\x14\x5c\xa0::\x94\x1d9\xe7\x9c\x97\xdc\x9f\xcazh\xa8\x1d\x85\x9aD\xd5\xd4\xa7>\xe8XZ\xd4\x00#@\xd3\xd6\xcemy\x08\xa9\x8c\x0f\xdb-\xcd\xfe\x8c\x8b\x03\xc7/\x03\x0a\x07\x8e]B\x92\xd6\x86;\xd3\xdb\xef\xe0\x89\x96\x07`\xfbXCJ\xac\xa0\x07\xd8\xf4\xd53j\x1a\xea:\x9fy|\x168GY\xd8\x94\xf2\xa6\x9a0$\xce\x5c\xcb!\x14\xb2P\x0e\xf1\x88\x89\x1f\x7f\xe9\xc2\xd9\xd3\xdd\xb3\xc4o\xeb:\xf3\xef\x84\x9a\xbfxtm\x1d\xd5\xd2\xc7p\x10\x82\x03\x22x\x0c\xfcs\xcd\x96!(C\x1e\x98\xc60\x1czS>\xee\x1f\xdf\x88\xb3\xdd\xe7\x17\x03\xb8\xc5\x00c\xb3\xe2\xf18\xa5\xcbC)p\x9f\xe3\xbb5\xf3P\x0c\x0b\xda\xf6B7u\x94C\xac\x22\x22\xb5f\x16{\x16\xd4\x99\xa6>\xd8\xb5\xa5\xe0{\x02\xa5\x90\xf38L\xdd\x90)\x22\x96~ Z\xda`\xe1\xeb\xf2\x9a\x90\x11\xc00J\xd3\xc8\x80\xcb\x0b\xe9\x11}\x06\x17\x0cO\xbe\xb7\x17\xb3\xdf\xddU\xd8\x13\x1e\x06\xf78\x5cR\xcd\xbd\xbc\xeb\x908\xc91\x00\x02w1\xc2\xad\x19`9G\xc0\x12>\xf20xH\x12D]~'\x01\xbf\xd0\x00\x17\x00\x89\x13\x82Q\x08D-\x13a\x13\xb8\x91\xce\x12mt]M\xe1\xc6\x80\x0d\x0c\x9d\x0f\x0c\x08\x02{\xac5\x99\xcaf\xa31\x16F\xa0\x86@XH\x13,0\x02\x22\xd7H\xacHj-\x1d\xe1\x88\x05.\x8dP\x10\xe9\xac\x8d\xd3\x17\xfaq\xb17\x09\xf8.\x94\xa8*\x91\x97\x14\x84\xfc\x0c\x08q\xe3xwO=3L\xe8\x8c\xc1\xb4,\x98f\x08:Ea\x994\xa71\x12\x0aA3\x01;\xcba\xdb\xb64N\x82|p\xec<\x9d\x86oy\xa0\x85@\x8c\xd3\xdc'\xea\x06\x14\x82u!\x7f\x17\xfd\xc5\x9a\xf0w\xf8v\xb3\xd0M\xc8^\xf3\xe8#+7\xfbD\xc7\x03r\x0c\xd0\x01h\xf4a\xbb\xc8d2\xf9\x0d*KjXJ\x5c\x93\x0b\x18\x0a\x0eJ\x98\xe8\xe5\x02\xad\x82[\xb1'\xb6\xc3N\x01\xcc !Yk#\x10\xd3\x0d\xa2y\xd3h\x05\xeb\xf9P{-E3\xff\x9c\xa6\xaeg\x84\x00;\x09pl/4p\xef\xa5]\xc8%\xfb(\xd7\xaa\xd9\xf2Xp\xd1\x02H\xd3F\x9ex\xe15\xa8\xbc\x83\x1aM\x17\x7f*0 v\xec\xf0!\xf8'\xe8;\x05\x84cA\x03\xaa\x83*\x1bA\x0a\xed\x81\xc1\xda\x16\xc0u\x81l2\xa8\xbdf\xe6\x9f\x934+\x80\xde?\x01!>\x95ZE\xdf\x07\xd8\xa2E:\xce\x8e=\x8f\xba\x09\xf5\x88\xd5\x03:\x0bRj(\xba6\xae\xafo\xc1p\x18\xb5\x9a\xca\x1b\x8a\x02\xbe\x03x\x8a\xb2\xfe\xfd\x17\x80+'\xaf\xe0\x9e\x0bc\x95\x81\xa1\x0c\xe4e\x81\x8bE\xe8=e\x83q\x95\x05\x22\x93T\x11\x95\x83\xaa\xb9\xdc\xaf\xce\x07\x06z\xfer\xe0\xfb\x8b\xa4F\xd9\x97R6}\xd5JD*6\x22\xde\xa0\xa1\xbe9\xf0iX\xca%\x94\x08QWB\xdc'\xba \x01e\x98\x0dE\xde\x99\x05\xaa\xf7sd\xb3o\x8aC\x1b>\x1b\xe9[1\x99h}\x03\x86\xd8\x80\x86)&j'\x02\x0cA\x17\xabr\x0c\xcdM\x12sU\xbam5:\x81\xa9\xabT\xf3\x8b\x87]xh\x15\x87>\xdeX\xf6\xb5\xbc\xd0\xc4\xdbs\x98\xa6\xed\x14\xd1\xaaj\x8c\x1a\x0f\x8c\x9d\x1aD\xa7\x9bR|(\xddR\xcc\xf7\xa4\x91 \xea\xfd\xd7\x80\xaa\x9f\xa1eS\xfd\xdc\x15\x0bE\xd7\x86\xf6\xdb\xffc\xa2\xc0\xe6\xb6\x19F.\xb9\x1e:[\xe1\xc5F\x87\xd00\x19p\xb2@\xa4\x0a\xa8\x88\x07\x86ri \xd3\x1fd\xe5\xc2\x11\x98\xb9^\x87q\xb1\xd91c\xabD\xfbZ\x0f\x842\x06\xca\x83\xb5\xac0\xadpb\xb9\x1eb\xafp\xe0A\x1f\x08s\xcf\xd7\x85l5\xd3\xf0u\xc6r\x1a\xf0\x07w\xf8&g\xe0\xfa6\xd1\xb5\xc9\x85BY\x03w\x0a\xc6\xe6\xaa\x87Y\xfbpQ\x967\xf0\x7f\xe2?\x0c\x086\xefe\xe9O6\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x09\xdc\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x09pHYs\x00\x00\x1b\xaf\x00\x00\x1b\xaf\x01^\x1a\x91\x1c\x00\x00\x00\x07tIME\x07\xd8\x04\x15\x13\x15;8 '\x96\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x09\x5cIDATx\xda\xadV\x0b8\x94\xe9\x1ew'\x8a\xa8\xa8-\xa2(\xb5\xdbm\xb7-\xb5\xdb\x9e\xce\xa9v\xd3])GW\x94\xd2\xb6\xc5VRT$\xd5)\xa1\x96\xc6\xd4\xd1\xb8DC\xca\x1e\x16\x0d!d\x98$\x8cK24\x0c&\x97\xca\xe4\x9e\xdb\xfc\xf6\xfd\xde\xe79{N\xe7\xd9\xdd.\xce\xfb<\xffg\xbe\xef\x9d\xf7\xfd\xff\x7f\xff\xeb\xefS\xf8\xb3\x95\x97\x97\xa7,\x16\x8b]\xea\xea\xeaBkjj.<{\xf6\xcc\x99\xbc\xdb\xd5\xd6\xd6:\x92\xf7\x93U\x0d\xb2\xdbD\xe2\xc8~\x14\xd9\xe7\x12\x89!\xfb7\xa4R\xe9\xf9\xe6\xe6\xe6\x05\x00\x14\x86\xb4\xca\xcb\xcb\xbf\xcf\xca\xcab\xf9\xfa\xfa.\x88\x08\x0b\xb3\xca\x0a<\x11\x9e{~os\xee\x9d\x1b\xc8\x16J\x90]\xde\x82\x9c'/\x90SR\xd7\x9f\x1f\xc1\xaa\xcc\xdb\xf3MT\xcaAk\xa7\xe4\xc4\xc4\xb5\xe4nZCC\xc3\x86!\x01HOOO8\xb8s\xdb\xa2\x84U\x93\xce\x94\xed\x9c#}~b\x05Z\x03\x1d\xd0\xf5\xf8.z\xfb\x07\x89\xc8\xe9\xef\x9b\xbeA\xba\xd7ze\x17\x1a\xbd\xd6\xa0\xd2\xe9\xcbZ\xbe\xeb\x86\xa8\x22\x01\xff\x0cQ\xa3\xf8Q\xc6G\x8c\x18\xa1\x1cz=$\xff\xc9\xae95-\xe76\xa2#\xd2\x15ox~\xe8\xcf\xbb\x8e\xce\xdab4\xb7\x03\x8dm\xa0\xd2B\x9e\xbb\xc8^\xbf\x80\x837\xa9\x01\xe8\xe4\x1e\xc5\x8b\x8b\x9bQ\xb9\xcfB\x92\xb4j\xe2J555\xe5\x0f2>FOO5\xdb\xca\xc4\x87{\xe9\xcc`{\xa8\x0b\xde\xdc\xbb\x84\xc1\x820\x80\xef\x07$\xeeCS\xee-\x14\xd5\x01\x8f%\xa0R\x5c\x0f4\xe7F\x03\x09{\x80\x07\xe7\xc9\xd9P\xf4f\x04\xa1\xe3\xc6aH\x8f[\x0e\xf27M\xf1at\xbe\x97q]]]5\xfe\x86\xc9\xecF\xaf\xd5\xc8\xb8\x1dN\xbdBn\x00\x10k\x0b\xf9\xa5)\xe8;\xa1\x89\x92\x08/D\x0b\xbaq\x83\xdfI%&\xbf\x1b\xa5d\xaf\xcfC\x03r?\x13\x80kE\x81\x0c\xe4\x87\xa1\xfbgo4\x9f\xb3\x86\xc0v\x1a\x9b\xd1\xfd\xa7\xc6544\x94\xd3\xd7\x1a{?\xf7ZM.\x9eBAz\x02p\xef(\xe4\xac9\xe8uW\xc7\xcb\xfd\x9a\xa8t\x18\x0f\xeeIw\xec\xbdR\xd8c\xef{\xff\xa5\x83o\xe6\xcb}\xac\xa2\x9ehO\x0f<u\x98\x80\x17?h\xe2\xcdQU\xc8/O\x03\x92\x9di\xe4z~9GAdn\x9c\xea\xcd\xd8\xf8#\xfb\x8aQK\xc6[J\x0e\xffu\xa0\xeb\xd6q\xf4\x14p!L\x0e\x81\xfc\xe2Dt\x1eT!\x86G!\xcd\xc6\xb4\xf7\x8a\xe5\x94\xf4\x8d\xf3\xcc\x0f\x18\x19\x19-\x1a7n\xdcg\x8c0\xcf\xd6_N;\x10d9%-\xcd\xc6\xec\xcdS\xfb\xd1hwQ\xc5\xe0\xb9\xb1@\xbc#\x03\x82F\xa2\xe1\xb8\xe5\x00w\xb9\xf1\x8a\xdf-\xccE&ctJ\xecfV\xb6]\xdfO\x0b\xad^\x98\x89\x86\x80\xc5TQ\xe1\xd6\xd1\xb8\xbe\xca\xa4\xd4r\xa6\xf1\xfa\xe1\xc3\xb5F\x93\xe3\xeaD\x18O\x94\xa8\x90gf\x8f\xf9o\xf9\x0c\xe3\xf5\xa1\xab'\x95?&w^\x1f\xa0 h$H:hM<\xd9;_\xb4\xc8D_\xe7\x7f\xed+\xc5[\x1a:7\x9d\xb6BO\x8a?\xa4%\x99\xa8\x08w\xa1\x9e\x0b\xb7i#\xe0\xdb\x09\x19\x93'\x8c5WTTT{G[)2g<\xdc\x5c\xad\xb8\xcek{\x8a\xb6\xe8\xa0\xe3G\x15\x9a\x0eR\x13La\xd2\xeeH\x5cg\xe6l``\xa0\xf4\xdb\xadQ\xa3Fi\x15\xed\x98Y!\x8bpEy\x0e\x0f\xa5a\x07\xd1\xe3\xae\x81zG\x0d\x5c]>\xa6\xd4t\x82\x81\x199\xa6\xf2\xbe]\x14\x1b\x1b;+\x85w7\xfb\xear\xfd2\x89\xc30Z\x13\xa40iwtr\x8f\xa1\xfc\xc8\x8a6\xa1PX\xd0\xd4\xd4\xf49E\xfd\x8f\x05\xe3\x16\xd5\x9c\xb6\x85\xe8Q&\xa4\xbf\x9c\xc1\xe0O\x9f\xa2\xcdY\x15\xf7\xad\x87\xf7\xad\x9b9v\x159\xa3\xfa!m\xccf\xb3\x15SSS\xb91\x9c`i\xc6&]\xc8\xf6\xab\xd1\xee Q -\x1d\x80\xc6Sk\xc0\xf5>\xe4)\x91H\xfc\x99\xf3\xca\x09\x9e\xdf\x9f\x11\xe5\xa6B\x96\x1e\x04\xa4\x1cF\xbf\x976\x9awk h\x99v\xcap-\xcd\x11\x1f9\xcd\x14\x0d\xf4\xf5\xb5\x03\x97\xea\xa44\xee\xd2\xa0-\x8a\x84\xdd\xb4\xadedb&\xed\x5c\x1c\x22\x12\x89\xe2\x15\xfc\xfd\xfd\xf5\x8b\x85\xc2\x966\xd1c\xc8\xea\xaaP[\x94\x03\xfe\xad\x10\xb0\xcf\x05`\xcb\x92\x85\x0e\xb4\xc0>~)o]\xf6\x95C\xe8\xc5\x9f\xf0(\x8e\x83\x86\xc2\x0ctH\xab\xd1\xf5T\x80\xac(\xce\xfd\xd2\xd2\xd2,\x05\x81@\xf0\xc3\xd3j\x09\x9d\xebR\x19\xf0\xa8\x16\xe0d\xb7\xc3\x9eU\xd1?q\xb2\xb99\xf5\xfe\xe3\x97\xa2\xa9\xa9\xf94\x17\x8eh\xe0V~\x0f\x99\x98tlS\xee\xc8\x17VW\x14\x15\x15=P\x10I\xdb\xef\x16\x8a_S\x00u\xad\x80@\x0c\x04\xa7\xbf\xc6\x16\xff\xa2VR\xa9c\x15\x86\xb8\x18\x1dN\xc1\xa5\xb2\xc8\xdc.\x14J\x80\xa66P\x02+\x16\xcb\x06D\xf5\xad9\x0a\x05eb\xaf\x92\xea\xa6\xdf\x00\xe4Q\x002l\x0e(\x96\xe9\xeb\xeb\x8f\x19*\x00F\x87Sp\xd9\xeb\xc8\xdcN<\xfe/\x00\xc2\xaa\xe6\xde\x8a\xea\xfa\x14\x85\xd8\x98h\x9b\xd2\x92'\x14@\x83\x0c\xc8\xaf\x01B\xb2\xda`w\xa5r`\xea\x17KL\x87\x9a\x82\xd9\xf3\x96\x98\x1e\xe0T\xcbc\x1ev\x13\x02\xfbO\x0a\x9e\x88\xea\xfa233\xfd\x14\x92\x92\x92.\xf3\x03O\xd4ud\x84\xa3\xa3\x92\x8f\x17\xd9\x1cT\xb0v\x22\xf1\xd8V\x5c\x5c=w\xf3P\x01\x5cX=\xcf\x96\xe7\xbe\x1d\xa2\xe0\x9dxE\x06Qw\x15\x1fm\x8fy(,(\xe8\xe0\x84\x5c\x9b\xaa\x90\x9c\x9c\x9c}g\xc3\xb4K//\xd8\xa0/\x9bM\xc9g\xc0[\x17/\x9c\xd4!X\xaf\xf3\xf3\xb0a\xc3\xd4>\xd6:s7o\xdd\xc8\xf8&G\x0d\xca\xa2\xfdI?\xa2\xa5$\x03\x0f\x93b\x10\xb9o\xcd5]\x0deU&\x02\x0f\x8e}3iU\xc3\x91\xbf\xc8{\x12\xceB\xfe\xf0\x0ae\xbfv\x17\x15\xd4n\xd3\xeag-\x1ee\xf1!Q\xa8\xaa\xaaR\xc9\xc9\xc9\xd9E\xc2\x1bt\xff\xee/\xa7*O[\xf5U\x07m\x85 \xca\x07\x82\xa4\x9b\x10r\x8eCth\x91<x\xa5\x99\x0d\xe5\x11\x1e\x8f\x17y\xe2\xa8\xab\xc5\xc3\x1d\x9f\x89\xdaB\xf61\xa4A\x19\xac\xff\xa4\x16\x8dB\xd9&\xed\x22\xfb\xcf\xf4t\xdf\xc78\x00f\x0c\x07q8\x1c\xefk\x17\xcf.\x7f\xe8\xb1\xf6U\x89\xdf\xdfQ\xed\xff-Z#\xb7\xa1';\x10\x1d\x91\x87!\xdc\xf3E\x9d\xcd\xacq\x13\xa8c\x04\x80}||\xfcA\xd6\xf2\xc9'\x1a=\x96\xa1'\xf9\x02\x89\x02\x0b\x08\xf9\x1a\xdd\xae*h\xdc5\x0cBkm\x9e\xf3\x1c\x02\xe2\x1d\x91HKKS!\xc6\x05\x87\xe6\xe8\xe9\x15\xaf\xd7I\x95:hRB\x93_\x99\x0d\xe4\xfa\xa37\xed\x12Z|\xd6\x22\xd6j\xca\xf9\xe1j\xcaj\xff&\x8e\xe1\xf7\xee\xdd\xcb\xfbv\xfe,3\xfef\xf3\xaa\xd6@{\xf4\xe5\x5c\x03\xee\x9f\x22,f\xce(\xa0 *6\x8d(\xf9\xd7w\xba\x0b\xd5\xd5\xd5\xffp2\xde\xbcy\xd3\xe8\xfa\xf9\x93\xc5\xa5\xd6\xdaeRBB\x1d$\x8d\x83\xbe\x13\x81T7:\x82\x99\x08\x17\xda\xcf\xa8\xb3\x9d=\xde\xec-gx\xbc\xbb\xfb\xe3\xe2\xe2|\x5c\xbf2\xde^\xb9{Vo{\xf8\x8f$\x15\xa1\xb4 \x09\x08&\x124\x1d\x92mZ\x83Dy\xd2O;\xac\xed\xe6;\x86\x99\x19\xcdX\xac\xc5\x88\xc5\x9ep\xb3\xcb;6\xee\xc8w\x9c\xc9\x8b\xf2p\x80\x94\xcc\xfe\xaeC\xc4\xf3\x8b\xc6\xa0_E\x8f\xc2\xd0\x19\xe3\x0e\xf1\xfe\xf9\xfd\xfeKM\x9cT\x95\x14\xdf&7777\x05\x92\x06N0\x9b\x15\xc1\xb2Y\x98(\xde\xf7\x85\x9c|\x05S\x10$\x124\x1d\xa4&\x98\xc2\xa4@nz\xb9\xc2.\xb8\x0a[\x02\x9f\x0e2\xe2\xc0\xaeB\xf4\xa9#\xa8w1D\x8c\xe7\x0et\x1f\x1b\x06\x04\xcf%\x0e\x1c\xa3\xc6\xbbn\x9fD\xdd\xc1\xaf\xe5\x91+'\xfd\xd3DOK\x9bz\xff{\x05\x14\x16\x16\xfa7w\xf7\xa3\xdc\xab\xf6\xdf\xc9\xaaw\xcf\xc6k\x122\x92\x0eZ\x13\xa40iw\x0cx\x8fDa\x98'\xaeg\xb5\x83MF6;\xe35B\x09w\x14\x85{\xa2\xc3\xc7\x08q~.\xe8\xbf\xb5\x05\xc8\xbbL\xbf\xac\xda\xc9WP\xcd\xfeyr\xee\x0a\x93\xdb\x9f\x1b\xea\x8e\xa5\x95\xff\x8e\xa54w\xc6\xf4\xf1~KM\xd9\xc2\xcdf\xbd\x8d\xa7\xd7\xa2;\xfe\x0c\x8d\x06iQ\x9a\x96\xe7\x82x:1\x05bF(\x81\x91\xbd\x9fI\x01\xbb!=\x9a\x85^\xfeUZ\xcc-\xbe\xb6(\xdf>\xad\xef\xea\xd2\x89!3?\xd1\x1b\xf7\x96\xf1w\x81\xd0\xd1\xd2\x1c\xb9g\xae\xd1\xae\xe4UFb\x91\xddt4\xfa\xacGG\x8c\x07z3Yh{VL\xc7v}+\x15\xca\xa2\xed\xe2btd^\x83\xe0N\x08\x9a\xceoD\xb5\xe3,\xa4\xad\x99(q\xb30\xfc\xde@Gk$5\xfe\xa1cTIII\xfdSC\x03\xf3\xe3\x0b\x8d\xce%X\x1a\x8a\x85\x1b\x8d\x07\xab\x1cg\xe3\x15/\x8c\xe1\x8e\xb7\xe4Uj\x04*\xbd7C\x10\xe81xw\xa5\xa1\xe4\xec\xd7\x86\xbe\x16&\x06\xd3\x95\x89\x8e\xa1\x8ds\x82\x5cYYY\xd3d\xech\x93=\xcb,\xf6F\x1c\xb0\xadO\xf2\xf7\xec\x7f\x90\x9a\xd2%|R;P.j\x18(x\xf0@\xc6\xe7\x04\xc8\xf2\x13c\xbb\xddwo\xdf;\xfd\x93\xd1\x93T\xc8\x1d\xea\xf5\xffoQe\xaa666zQQ\x91vwn\xc7\xb2\x09\x95\xc7\x95?{\x9ePPP\x10\x9d\x9b\x9b\xeb\x17\x1e\x1e>\x879\xf3!\x86\x7f\x05R\xffY\xc6P\xfb5\xbd\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x07\xe4\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x07aIDATx\xda\xc5\x97kl\x14\xd7\x15\xc7\xff\xe7\xce\xcc\xee\xec\xae\x1f\xeb\xc7b0&<dg!$\xd0\x00\x01\x85*\x12R$T\xe5[\xd5\x82\x22\xd46Tj\x9b\xb6(\xad\x08}$j%D\xd5\xa4\x1f\x92\xd0*i(\x8a\x92>\x92\xb6HEM\xdb|H\xa5VQ\x9b\x84\x846<\x02\xa6\x84\x87\x036`\x9b5\xbbk\xf65\xbb;3\xf7\xde\xd3\x1bwea\x84\x8d\x1b>\xe4'\xfdt\xe6\xcb\xcc\xf9\xdfs\xe6\xc3\x0c13n\x0d\x22\xe0\xe3?d\xd6\x01\xf6\xef\xdfl\xf5\xa5\x06\xee\x8b\xda-\x9b\x1cw\xfez\xd7\xed\xe9\x8e\xc4\x17t\xd8v\x1b\x05\xfe\x95b\xe8]\xcaH\xef\xd2{~yp_\xc6myc\xc3\x86\x7f\xc8Y\x07\x18\x1d\xfd\xc5]~\xf5Lyq\xef\xcf\x86p\x1dG\x8e\xac\x89;U\xf5m0\xedH\xba=\x1d\x89\xb8\x83X2\x0a;\x96\x82\x15o\x07!\x0a\xe5\x87\xd0\x81BP\xae\xc1\x1b\xbf\x82|\xe6\xc0U\x84\xea\xa7\xca\xab\xee^\xf1\xc5\x8cw\xd3\x00\xe7\x07\xb6\x0fF\xb5X\x94+\x1e\xd8\xbdr\xed\xbfw\xa0A\xff\xdb\xab\x1e$\xf0\xee\xb6hj^\xe7\xc2;a\xb9\x0a`\x1f3B\x11\x13\xa6\x03\xc5\x8b\xe7\x90\x1b|k\x8cX~w\xd9\xa6\xe1Wf\x0cp\xec\xf0\xbd\xc5\xa5=\x1b[FG\x8e\xa2\x223O\xadXw\xf8\xb1\xe3\xef\xae~\xd2\xd5\xf6\xf7\xe7u-F\xb45\x09\x80\xf1\xff! \xab\xcd\x18;}\x00\xc5\xec\xd0\xb3w\xbb\xa3\x8fb\x13\xab\x1b\x068~h]1=o}\x8b\xd6\xc0\xe5\xcc\x19x\xe1\xd8\x99\xa4\xdb\x9b\xee\xec\xec\x06Y5\xdc\x12\x9c\xc0\xc8\xc9S\x188u\xf8o\xb1R\xf6\x81\x0d;\xa7\x86\xb0\x01\x03\x03\xdaW`\xf217\xb5\x0ca\xb04\x1d\x89\x02\xac\xabF`\x12\x86\x81\x1b\x95\x00\xe5\x80\x1c\x0b\xac\xeb3L\xa8\x82\xeet\x1f\x8a\xa5\xda\xc6S\xf9c/\x01\xd8:5@\x03\xa5\xd4\xe4\x0d\x96\x00TH\x00Mm\xae\xc3\x00\xb5\xd1\x8b\x08\xc73\x90\xf9,d9\x0b\xb2,D\xda{`\xb7\xce\x81\xdb\xb5\x08\x91\x8e6\xe0\xbaI3\xaa\xe8[~\x07\x06\x07\xc7\x1ez\xe1\xd1\x96\x13_\xdb]zf\xea\x0a\xfe\xb5\xb6\xb8\xa8yU\x0bC\x81D\xa3\xab)\xdc\xc8\xc0\xc6`|\x04\x85s\xefA\xd7\xab@\xa8AF\x0e\x19\xac\x196K\xa0A\xacg9\x9a\xd2kA\xf6\x0dfQ\x92x\xe1\x97\x7f.\xcf\x8d\x89\xdb\xbe\xb1\xa7P\xb8f\x05\x0c%%\x88\x18\x9a\xc9T\x00D\x13\x01\x98%*#\xef\xa3\x9e?\x0fX\x80o\xdb81b\xe1|\x86\x90)\xa0Z\xab+P\x18\xc6\xbb\x13\x0a\x9fI\x87\xe8\x1a>\x09?;\x84\x96\xe5\x1b\xe1\xb46\xe3Z\x12.!\xdd\x97n>r\xf4\xe4^\x00\x0f\xc2 \xd0\x08\xa0\x95\x842\xb2\x0a\xa1\xb5\x91C\xb0\xb1<v\x14~m\x08\x14\x17\xb8\x5c\x14x\xf5\x90[\x1f\x1c\x8f\xee&\xc7M\xef\xd9\x97O\xfc\xeaO\x85D\xcf\xdc\xc4\xa7J\x22\xf6\xdc\xee7]\xff\x9f\xe7l\xa8\xba\x87R\xff_!=\x1f\xda\x0f\xa6x\xdf\xda^\x5c\xcc\xf2gw}\x99\xdc\xc9\x09\x10\xf3\xc4~\xc9&0\xd1\xe4\xfb\x14V/#\x0c.\x98\xe6\x04\xdfc\xf4\x8f4\x15\xbb{\xac\xf5\x0f?~\xe1\x03\x5c\xc3\x8f^\xbcr\x1c\xc0\xb7\x9e\xdf\x91\xda54\xee\xbc\xd1\x9e\xab-_\x91\xaa\x8b\xca\xc0Aj\xea[\xc7`-&Wd\x01w,\x99\x1b\x09j\x99/\x00xQ\xc0P\xf75\x94\x0a\x1a\x9a\xd4\x1c\x80\x8d\xb0\x08vS\x0b\x84\xcb\x88\xa5\x18K\x17\xaa\xd6dLo\xc74l{&\x9b_\xbe\xc8\xfdt\xd1I\x5c\xf0\x94U\x0c\xaf^\xbcT\xcf\x9c\xcb\xe9\xc0/\x1bC\xa36\xaa\xbb\xd3\xed\xba\xea\xf3\xe7'WP\xae(\xf2F\xfb\x11\x14\x86\xa1\x83:\xb4\x09\xa2u\x00\x11mB\x22\xb5\x0a\x89\x8e;\xe1\xb44\xa1\xef\x9e*\x96t\xab\xaf\xbc\xf3\xbb\x85?\xc64l\xd9\x99\xf1z{\x22\xdb\xcav\xec \x80sAvp\xd04\x1d5\xe6\x8d%c}A\xca\xad\x14=^<\x19\xa0V\x95\xc3~9\x83j\xee$\xca\xa3o\xa1\x9a5{/_\x80\xf2\xc7\xa1\x82\x02\xd8&D\x9a\x17!\xd6\xd6\x85\xee5\x15\xb4\xb6\xe8\x1f|\xf8\xf6\xfd}\x98\x86yI\xbck\xbbN\x06\xc0\x98\xaa\x153\xca\xaf\x8f\xea\xff9f\xcc\xc6,])y\xcc\x93\xef\x80_\xd7[\x8ex]\xaf\xfa\xe3ja\xe8+H\xc9\xd0*\x07B\x96-bv,\x13\xc0\x01\xc5\x1c\x12n\xa4\x95;\xe7\xa4\x8a\xa9V\x9e\x07`\x007\xa0oK\xa6<\xfa\x5c\xe7\x98dJ\xda\xa4k\xca\xbb*\x85\xe3\x06\x00<\xa3\x9b\xb09Z\xf2\xb4\xbfk\x17\x09\x1b\x86\xcfm=}\x0c\xc0\x12L\xc3\xcb?\xe9\xc1\x97\x1e\x1b\xc6\xec!\xb0j/\x13&\x88\xc9JF\x0a\xcb\x0d\x98U\x8d\xb5t\xa5T\x81\xd4\x88\xee\xdc\xc9\xda\xc6\x0c\xfcp[\x17I\xc9B\x10h\xcf\xae\x1e\xf9\xcd\x9d\xb3\x0b\xf1\xc1oS\xe6\x90\xb2f\x11k\x00u\xe5\xe5\x02\x05\x04\x00\x5ccx\xa5,\xa2\xf1\x88h\xac\xe0:\xf6~'I1H4[\x92\xee\xefP\x94\x8c\x83\x0e\xe7\xdbD\xbe\x18\x0a\x00\x1a\xb3@\x91\xb5 \x1aA\x05\x92j`v\x01\x04\x0d\xa3\xc6\xb0P\xb3#\xf1\x88\xf2a\x10\xb8\x8e\xaf?]\xe0\x87\x9e\xae\xf0]\x1d>\xf7$\x99\x131\x81\x95\xf3|\x1a+Z\xd1\x9f\xef\xea\xc6\xcd8\xb1o\xbe\xa3-\xd1\xdbl\xeb\x12;v\x15B\x14\x00\x14\x8dW\x8d\xc30u g\x17:\x9a\xe9\xc3F\x80\x1b\xd3\xf7=f\xb6-\xc0\xb6\xa85\xae\xc5\xbdK,kx\x8c\xdd\x97\x9f\xec\xc2t\x1c\xda\xdf#\x02\xc7Z\x16A\xb4N\xba^1\xf7\x96\xcc3\xaa\xb0D\x09\xc0\xe5F\x90\xe2\xe9\x8c]hk\xa6C0\xd8\x98\x86\xb3\xcfF\x89-\x1b0!\xd8\x12zEwU\x89h\x92\x8f\x0f\xd7\xad\xbdOt\xeb\xf6\x16\x877?r\x010\xfc\xfd7\xb7\x91\xeb\x88\x88\x9b\x88\xa5\x1c\xaf\x02\x11\xd4=s_\xc8\x80C@`*\x13\xc3\x81\xd6\xceU\xdfu\xf2\x9e\xb4V/\x0cO\xcd\xf8Qz\xf6\xf98\xb1m\x1b-\xc0\xfa(\x84\xc5\xb0\x04\xaa\x1c\xa7\x13\xc3U(\xa7](\xd4ES\x9cDg2f%P\xb2\xdb\xa31\x17\xd5B+\x94\x8a\x93R\x0eI\xe5@*ej\x84\xa4\x8cBJz\xed\xec\x1c;s%\x9fy\xea/\xf2\xfd\x19'`\x1a\x22\xfdpI\x9f\xfeu'-\xdd\x9a\xe3\xc97\xfc\x8f\x0bq\xcf\xed. \xea\x9a\xac\xa8\xa9\x168\xf0\x00&\xb0\xaa\x86\xb0-\x9f\x88\xc0 \x07\x0cE\x0c\x9b\x99C\xb0\x08\xf3\xf56\xfb\xfce\x8fV\xceW\xfd3~\x96\x1f\x7fi\x11\xb9\xfa*\xd2_-2\xa6\xe1?\xaf\xdfN \x12`\x10\x98\x05i-\xa0t\x84\x94\x8aA\xa9(I\xa9!\x95E\xa1\xb4\xcc\xb5\xa3\xa4\x15y\xbd?j\x85\xe5\xf1\xcc\xe3\xbf\x0f30L;\x813%\x81\xcd\xdbM\xf3\x19`\xc7\x06\x884\x18\x02\xcc\x1aZ\x03BI\x10B\x02\x87lFH\x9a\x05,a\x81\x1dy&\x97\x94\xa2>\xecM6\x9f2\x81\x8fI\xff\x9b+\x09\x131\x98\xa0\x14\x9191\xa4\x12$\xe5G'\x17\x13\xc2\x16cy\xdb\x1e85\xa2Zsa~\xd3\x1f\x18\xb7\x1a`j\x88wV\x13\xb4\x86iN0^\xd3\x9c\x22N\x8c\xbclU\x8c\x9c\xbf\x22\x1fx\xc2\xaf\x03<\xcd\xaf\xd9-r\xfc\xe0\x1a\xa2p\xe2\xd3\xcc\xa8D\xcc\x8e\x8a\xe0\xe2%\x8aH\xd6\x15\xcf\xd3+\xb7\xfbz\x96\xff\x86\xb7\xce\xd0ki\x92\xd9\x1cI\xdfg[y\xdc\xfb\x08\xdf\xfc\xdf\xf0\x93D\xe0\x13\xe6\xbf\x92r\x1f*A9\x0fr\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x0a\xf0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00\x80\x00\x00\x00\x80\x08\x06\x00\x00\x00\xc3>a\xcb\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x007]\x00\x007]\x01\x19\x80F]\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x0amIDATx\x9c\xed\x9dM\xa8$W\x15\xc7\x7fg23\x89\x93\xccWf\x12\x9c\xc4\x98\x0eF\x04A\x9dD\x10\x8d\x0b]\xa9+!\x22!\xa0Dp\xe7\xc2\x85_\xe0\x22H@\x0dd\xa1n$.\x5c$A0\x8b,\x041H\xfc\x0a.f\x22\x0a&\x82\xf8\x11\x9d\xe4\xf91\x13\xcd|\xda3\x19'3\xe3\x1c\x17U=}\xeb\xd4\xad\xea\xbe\xaf\xbb\xba\xea\xbd{~\xf0xuoUw\xdd\xf7\xea_\xffs\xee\xad[U\xa2\xaa8\xf9\xb2\xa5\xef\x068\xfd\x92\xad\x00D\xe4\xdd\x22\xf2\x82\x88<+\x22\xf7\xf4\xdd\x9e\xbe\x90\x5cC\x80\x88\xfc\x1d\xb8\xad,\xfeFU\xdf\xd3g{\xfa\x22K\x01\x88\xc8N`l\xaa\xb7\xa9\xea\xe5>\xda\xd3'\xb9\x86\x80\x9b\x22u\xd7\xaf\xbc\x15\x03\xc0\x050\xe5\x86\x95\xb7b\x00\xe4*\x80\xfd\x91:w\x80\x8cp\x07(q\x01Lq\x01d\x84\x87\x80\x92\x5c\x05\xe0\x0eP\xe2\x02\x98\xe2\x02\xc8\x08\x1f\x07(\xc9U\x00\xb1\x1c\xc0\x1d #\xdc\x01J\xb2\x13\x80\x88l\x07vEV\xb9\x03dB\xcc\xfe\xc1\x05\x90\x0d1\xfb\x07\x0f\x01\xd9\xd0$\x00w\x80Lp\x01\x04\xe4(\x80\xa6\x1c\xa01\x04\x88\xc8-\x22\xf2\xb8\x88|]Dn\xee\xa8]\xbd\xb0\xb5\xef\x06\xf4@\x92\x03\x88\x88\x00\xbfb:}\xecF\xe03\x1d\xb4\xab\x17rt\x80\xd4\x10p\x80\xe9\xc1\x07\xf8\xf8r\x9b\xd3/9\x0a 5\x04\xd81\x83\xddKlK\xef\xe4(\x80T\x07\xb0\x02\xd8&\x22oXb{z\xc5\x050\xe5Z\x11\x89\xe5D\xb1Q\xc3M\xe3\x02.\x80*\xb10\x10\x13\xc0\x9e%\xb5\xa5w\xb2\x12\x80\x88l\xa1\xc8\xe2\x9b\x88\x85\x81\x9d\x91:w\x80\x0d\xca^\xe0\x9a\x96\xf5\xee\x00\x9b\x9c6\xfb\x87\xb8\x03$\xe7\x00\x22\xd2\xe62\x83\x227\x014u\x01',,\x00\x11y\x108.\x22\xaf\x8a\xc8\xbbR\x1a\xd7\x07\xb9\x09`\x96\x03,\x14\x02Dd\x04|\x95\xe2\xffz\x13\xf0pB\xdbz\xc1\x05PeQ\x07x\xb3)\xbfcf\x8bz\xc6\x05PeQ\x01\xecM\xdc_\xef\xe4&\x80Y9@,\x04\xc4\xba\x81M\xbd\x00+\x80\xebDd\xd0\x97\x99s\x13\x80=#\xffm\xca\xcbv\x00\x98-\xba^\xc9]\x00k\xa6<o\x12\x98\x22\x80A\x87\x81\xdc\x04`\xcf\xc65S\x9e\xd7\x01\xe6\x0d\x01\xe0\x02\x18\x14\xf6`\xbcl\xca\x8b\x86\x80\xd8\x00\x90\x0b`@\xcc\x12@%\x04\x94\xf7\x10\x5c\x1b\xf9\x1ew\x80\x8d\x86\x88\x5c\x0f\x5c\x17T]\x04^1\x9bY\x07\x88\x9d\xfd\x00;\xcb\xa9b\x16\x17\xc0\x80\xb1\x07\xe28p\xd6\xd4Y\x01\xc4\xba\x80P\xfc\xdfb\xeb\x5c\x00\x03&&\x80\xd7L\x9d\xed\x0549\x00\xc4\xc3\x80\x0b`\xc0\xd8\x1e\xc0q\xe0\x9c\xa9\x9b7\x04@<\x11t\x01\x0c\x98\x98\x03,M\x00e\x8e\xb1m\x8e\xfd\x86\x9fy\xa7\x88<)\x22\x0f\x89H\xdb<\x85\xce\xc8\xe9\xbe\x80\xaeC@\xec\xec\x87\x86\x91@\x11\xb9\x15x\x0e\xd8QV\x8d\x81o\xb6\xec\xaf\x13rr\x80\xaeC@\x93\x00v\x89H\xac+y7\xd3\x83\x0fp\x7f\xcb\xbe:#'\x01\xd4\x1c@U/\x02\x97\x82:;38E\x00m\xb3\x80b.p\x8b)\x8fZ>\xdf\x19Y\x0b\xa0\xfc\xdd\x16\x06\x96\x11\x02b\xfb\x06\xb8\xd5nS\xe6\x11+%'\x01\xd8\xb3\xf0D\xf9\xbb-\x0c4\x8d\x03\xc0\xfc!\x00\xe2\x02\xb0\x0e\x00=\xb8@N\x02hr\x00+\x80y\x1d\xc0\x05\xb0\xc1\x98W\x00\xa1\x03\xac2\x04\x80\x0b\xa0\x1b\xca\xc4.<c\xff\x07\x9c*\x97m\x0e0\xaf\x00\xdc\x016\x10\xfb\x81\xf0\xe2\xcdI\x9d\xbe*e\xbd!`\xdd\x0ePv\x0bc=\x83;Z\xbe\xa3\x13r\x11@\x93\xfd\xc3\xfaC@\x8a\x03\xd8\x83}\xa0a\xbbQ\xcbwtB.\x02\x88\x0d\x02M\xe8#\x04\xc4\xec\x1f\x5c\x00\x9d\x91\xe2\x00a\x08h\xeb\x06\xda\x10\xd06\x10d\xf7\x1fK\x00\x01\xf6\x95/\xb4Z\x19.\x80\x86\x10PN\xf8\x08\x0f\xc6\x15\x8a\xe4q\xc2\x0e3j\xb8\x0c\x07\x80\x15\xbb@.\x02h\x0b\x01M\x0ep\x03\xd5\xc4qL\xfdUsa\x18h\xbbc\xf8Fs\xb5\xaf\xc9\x01\xc0\x05\xd0\x09m\x0e\xd0\x94\x03\xd8\xf8\x7f\x168c\xea\xf6\x00\x947\x7f\x84\x97\x82\xcfS\x15\x96\x00\xfb\x82\xb2;\xc0\x8aYO/\xc0\x0a`\x0c\xfc\xc7\xd4M\x1c\xc0\xda\xffi\xb3\x0f\xdb\x866\x07XiW0\x17\x01\xac'\x04\xc4\x04`\x1d`\xbd\x02\x18\x8c\x03\xe42!d\x19!`\x0c\x5c0u\x93\xb8\x1f\x13\x80\x9dp\xea\x02\xe8\x91\xf5\x84\x00\xdb\x1d\x1bS\xc4\xf6\x906\x078\x1dk\x83\x88\xec\xa2:\xd6p\x85\xaa\x13\x8f\x88 \x22o\x03>\x08\x1cQ\xd5\x9f\xc5\xb6Y\x0f\xb9\x08 L\xc0\x148\x19\x94SB\x80u\x8b6\x07\xb0!`\x12\x86\xec\xd9\xff\x12EN0y\xf6\xe0^\x11\xd9\xad\xaaW\xf3\x8d\xf2\x913?\xa1x\xfe\xc0e\x11\xb9WU\x7f\xc4\x12\xd8\xf49\x80\x88\xec\xa1\x9a\xa1\x9f1o\x09O\x09\x01MI\xa0\x1d\x04j\xcb\x01l\x02x\x94\xfa=\x8a#S\xfe$\xd3\x87Ol\xa5x\x0a\xc9R\xd8\xf4\x02\xa0\xdd\xfe!\xad\x17\xb0\x8c$\xd0:\xc0<\x02x\xbf)\x1f\x14\x91\x83,\x81\x1c\x04\xd0\xd6\x03\x80\xb4\x10`\x1d\xa0)\x04\x9c\x8a\xec\xa7\xc9\x01\x8eQ\x17\x80\xed\x0a\xdeC\x9dOG\xea\x92\xb9\x9a\x03\x88\xc8\x0b\xcb\xf8\xc2\x01b\x93\xb9!:\xc0vS7\x9a,\x88\xc8m\xc0\x9b\xa8\xf3\x09\x11\xf9\x92\xaa\xbenW\x88\xc8}\x14\x8f\xb4o\x1c\x9eV\xd5\x83PM\x02\x07\xffH\xb3%Q90\xaazYD.2=\x08\x93\x99\xc1]\x0d\x04\xc5\x04`o4\x1d\x05\xcb\xb1\xb3\x1f\x8a\xbc\xe3\xa3\xc0Sa\xa5\x88\xdc\x0f<\xd9\xf0\x99\x1a9\x84\x00\xcb\x8b\x91\xbaX\x18\x98\xc7\x01\xdaz\x01'L\xdd\xfe\xf2\x02\xd3<!`\x14,7\x09\x00L\x18(\x9fb\xfeH\xcb\xf65r\x13\xc0/\x81'\x22\xf5\xb10`C\xc7Y\x12\x1c@U\xc7@h\xcf[)\x043O\x12\x18\xe6\x00m\x02\xf8\x90\x88\x84\xe1\xe1\x0b\xd4\x1fU\xd7J\x18\x02\xeeJ\xf9\xe0\x06\xe4\xac\xaa\x1eiX\x17\xbb7`\xd1\x1c\x00\x0a\x17\x08\xcf\xf8\x9b\xa9\xce\x06R\xe0\x15U}]D\xce3\xbdSh\x97\x88\xec\xa5\x10\x90\xcd\xf6\xff\x0a\xdcY.o\x01\x1e\x00\x1e\x16\x91\x03\xc0\x97\xcd\xb6\x8f\x02\xdf\xa5\x0dU\xcd\xfe\x07\xf8uy0&?w\x03GL\xdd\xed\x14yBXw\xb1\xfc\xfc%S\xbf\xbd\xac\x7f\xde\xd4\xdfk\xca\xc7\x836\xfc\xc1\xac\xbb\x0b\xf8\x80\xa9{\x11\xf8\xac\xa9\xfbK\xf9\xf9\xc7L\xfd\xab\xc0\xeeY\x7f{n!\xa0\x89X\x08\xa89@y+Yx=`[\xf9\x16\xb1\xd0I\xcf\x97\xdbA=\x11\xb4\x89\xf6\xd1`y\xcd\xac\xbb\x83z\xff\xff0\xf0}\x8a\xa7\x9bL\xb8SD>\x07|\xcal\xfb\x95p4\xb1\x09\x17@\xc1\xbcI \xd4\xc3\x80\xed\xb3\x87\xd7\x00\xac\x00\xac\x9d\x1f\x0b\x96\xd7\xcc\xba\x11\xf5\xf8\x7fHUO\x02?4\xf5\xdf\xa0\xda\x93\xf8=\xb3\xac\xbf\xc4\x05P`s\x80}T\xfb\xe6\xe7Uu2\x1d\xcc\x9eU#Sn\x13@\xaa\x03\xbc\xcf\xd4\x1d.\x7f?f\xeam7\xf2\xf3A{[q\x01\x14X\x07\xb0\x99z8\x15l\x96\x03\x9c\x0a\x96\xad\x00F\xa6\x1c\x0a\xc0>\xb1\xec\xc3T\xaf1\x9c\xa1\xc8\x13\x00\x9e\xa1\xfe\x80\xab\x09O\xab\xeaO\x1b\xd6\xd5p\x01\x14X\x01\xd8y\xfb\xa1\x00R\x1c\xc0\x8e\x05X\xdaB\xc0[M\xf9\xb9\xc9\xcd,\xe5\xd9\xfd\xbd\xc8\xf7]\x06\xbe8c\x9f\x15\x5c\x00\x056\x04\xa48\xc0\xc8\x94\xdbB\x80\xa5-\x04X\x0e\x9b\xf2\xe3\x91m\xbe\xa3\xaa\x7f\x9a\xf1=\x15\x5c\x00\x05\x8b8@J\x12h\xb9*\x00U=N}\xc2I\xc8\xa1\xb0\xa0\xaa\x7f\x04\x9e\x0e\xaaN\x00\x0f\xcd\xd8_\x8d\x5c&\x84\xccb\x91\x1c\xe0vSN\x11\xc01S^\x03\xde\x1e\xd9\xee2\xc5X\x85\xe5>\x8a\x1e\xc05\xc0\xb7U\xf5Td\x9bV\x5c\x00\x056\x04X\x07\x08\xe7\xf7Y\x07\xb0\xcf\xff\x99W\x00\x97\x22\xeb\xd7\x88\x0b\xe0w\xaaj\xdb\x88\xaa\x9eg\xc1\x17Y{\x08(\xb0\x0e\xb0\xc3\x94\xdb\x1c\xc0r\xda,7u\xc7\x8e\x05w(OXk\xd8\xd6\xc6\xff\xa5\xe1\x02(\xb0\x02\xb0\xb4\xe5\x00\x96\xab\x02P\xd5+T\xe7\x1f\x86X\xfb\x07\x17@ot\x22\x80\x92\xa6\xae\xe0\xd1H\xddZ\xc3\xb6\x87\x1a\xea\x17\xc6\x05PP\x8b\xaf\x86\x94\x10`\x13\xb1\xa6<`^\x01\xfcSU\xff1c\x9f\xeb\xc6\x05P\xd0\xa5\x034\x09`\xde\x10\xd0\xd9\xd9\x0f.\x80\x09)\x02HI\x02!\xc1\x01\xca\xb1\x00\xebF\x9d\xc5\x7fp\x01LH\x09\x01m\x0e\xf0\x9a\xaa^2u)!\x00\xe0\xcf\xa6\xec\x0e\xb0\x02R\x1c`L1\xe1\x22\x86=\xfb!-\x04\x00<\x18,?\x05\xfc\xb6\xbdi\x8b\xe1\x03A\x14\x17WD\xe4\x02\xd5W\xca\x84\x8c\x83mUD\xc6\xc4\xdf\x17\x90\x22\x80\xa8\x03\xa8\xea\x8fE\xe4-\x14C\xcc\xbf\x88\x8c\x15,\x15\x17\xc0\x94s\xcc!\x80\x923,&\x80qldo\x82\xaa\xbeDq\xcf`\xe7x\x08\x98\xd2\x96\x07\xd8[\xbd\x9b\xf2\x80\x98\x00b\xe3\x00M\xf1\x7f\xe5\xb8\x00\xa64\xe5\x01\x97T\xf5\xbf\xa6.E\x001\x07p\x01\x0c\x90&\x01\xd8\xb3\x1f\x9a\xbb\x82\xf3:@S\x02\xb8r\x5c\x00S\x9aB\x80\x8d\xff\xd0\xec\x00\xb5\xcb\xb1e\xb7\xd0\x0a\xc6\x1d`\x8049@L\x00)\x0e\x00\xf50\xe0\x02\x18 )\x02H\xc9\x01\xa0.\x00\x0f\x01\x03$%\x04\xa4:\xc0\xb3\xa6\x9c4o\xafK\x5c\x00S\xbat\x80G\x80\x1f\x00\x7f\x03\x1e(\xe7\xf3\x0d\x02\x1f\x08\x9a\xd2Y\x0e\xa0\xaag\x81\x8f\xad\xa7Q]\xe3\x0e0\xa5K\x07\x18,.\x80)\xcb\xe8\x06\xba\x0060\x8b\x0e\x04\xc5.\x05\x0f\x1e\x17\xc0\x94EC@\xf2\x9c\xfc!\xe0\x02\x98\xb2h7p\xc3\xd9?\xb8\x00B\xe6v\x00U\xbd@\xf5!\x0d\xe0\x02\xd8\xf0\xa4\x84\x00\xa8\xbb\x80\x0b`\x83\x93*\x00\x9b\x07\xb8\x0068)9\x00\xb8\x03l:\xdc\x012'\xe6\x00J\xb30\x5c\x00\x9b\x89\xf2FN;\xf5\xeb\x5cY\x1f\xc3^\xd3\xff\xd7\xf2[\xd5=.\x80*\xf6lo\xb2\x7f\x80o1}\x14\xec\xf3\xc0\xcf;iQ\xc7\xb8\x00\xaaX\x01\xc4\x86\x81\x01P\xd55\xe0\x8d\xc0G\x80\xf7\xaa\xea\xac[\xc6\x06\x89_\x0e\xae\x92\xe2\x00\x94\x07\xfd\x99\xee\x9a\xd3=\xee\x00Ul\x22\xd8*\x80\xcd\x80\x0b\xa0J\x92\x03l\x06\x5c\x00U\x5c\x00\x99\xe3! s\xdc\x012\xc7\x05\x909\xf6q,+\xb9E\xbbO\xa4\xe3\xe7\x0fl(\xca\xb7z=J1\xb8\xf3\x04\xf05\xf3\x9a\xd9M\x87\x0b s<\x04d\xce\xff\x01\x04S\xd2\x87\xf9\x0d\xcb\xd3\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\xa3\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x07tIME\x07\xd9\x01\x01\x13\x09,\x81Cc\x11\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x04#IDATx\xda\xed\x97_h\x1cE\x1c\xc7\xbf3\xbb\xb7\xbdKzI\xdabHI\x82\xc6&\x15\xa1yP,\x1a\xb5&\xd4\x16\x14\x0a*X\x93\xd2R\xa3 \xa5>\x08\x16!\x82\xa2\xa2RP\x9f-B\xa9\xd5\xa6\xa9\x7f\x0a\x86\xfa\xa0XSK\xaa\xa0\xa8\xb1\xc1\xc64\xb6\x89`\x9b\x1a\x1b\x9bk.\xb7w\xb7{\xbb3\xe3ov\xb7\xe7\x8byHr\xd7\xa7~\xb9/\xbf\xb9\xbb\x87\xef\x87\xef\xcc\xedp\xb8\xa1\xf9\xe4\x0by\xdb\xc4_\xe9\xee\xa7\xf6~S\x872\x8a\xe3\x7f\xa4\x94\xe2\xfb\x8f\x9d\xf98\x9dq\x0f\x9e\x99\xb8r~\xed\xb6\xben\x5cO\x11\x00\xee|\xf2\x90\x1a\xfd\xdbV\xc7~\x99R\x0f\xbf\xf0\xb9j\xe9:\xfcQ\xcb\xb6\xbe\xea\xeb\xd2\x00c\x0cW\xd36rB!Y\x93@\xcf\xee\x0dxl\xe3\x9a.\x06u\xba\xa5\xab\xaf\xad\xec\x00ZJ\xfa\xc8\xe5]\xe4r\x0el\x9a\xed\xf75\xa1\xe7\xe9\xf5M7\xd5X\xa7\xa8\x8d\x97\xa8\x0d^v\x80l\xce\x85\x93\x0f\x9d\xa7uuM\x1c{\xba\xef2\xdbZk\xdf\x84\x92'\x08\xa4\xa1|\x00\x22l\xc0u\x0b\x81\x1d'\xb2\xe7\xe1\xa1\xf6&\xec\xdc\xb2\xb6\xa32n\x0c\xb7t\xf5>R\x16\x00\x19l\x81\x0e\xf7\xe0\x90\xdd\x02\xd9\x0d\xad\xc1\xeaj+\xb0\xeb\xf1\xdbW57$\xfb[:{\xdf\xa56\x12\xa5o\xc0)P\xb0\x8f\x82\xb6K\x0e \xfc\xa2})\xb0\xa5\xbd\x91m\xbe{\xf5\xb3&W?\x12\xc8\xba\x92\x9e\x01\x1d\xe2y\xa2\xe8B05H\x04C\xd6m\xdc\xbc:\x8e'65\xae\xab]a\x11\xc4\xa1\xdd\xb4-\xa5\x00\xf0\xe0\xf9\x92\x02#\xfb\xd7 \x8a\x93 \x84n(0\xe0c\xd3\xfa\x95\x89\xd6[\x93\xfb\x94\x92\xfd\x04\xb2j\x89\x00\x82B\x15|_j\xeb\xb5v\x04C\x8e\x00\x8a\x10nxho\xa9\xe3\xe8hM>Z\x19\xe7\xc3\xcd\x9d\x1fv,\x1a\x803\x09!\x01_\xb0\xc8\x80 \xd3\x8c@\x94\x06!\x00Y\x04a\x8a\xc1\xc9:H\xcf\x5c\x06\x9b\x1bo\x10\xb9\xd4@\xf3\xd6\x03o\xac\xd9\xfa\xbe\x89yd\xceO&!%\x83R\xd0\xa2\x19ZJm\xfd\x86`\xa8\x8d,\x05\xdav\x0e\x99\xb9,\xa6\xa6g`g\xb2\xd4F\x1e\xae\x93\x87\xe3L\x19V\xb2\xee\xe5XU\xfd=\x006/\x0c\x80Ids\x1e\x84P:\x90\xa6\x84O\xf6\xbcp\xcf\xe9\x09IA.\xbc\x82\x8b\x826\xaduC\xdc\xe0`\x9c\xcc8\x96Y\x16\x18$\xa4\xefb\xc1\x0d0\xa60\x9b\xf1\xa0dx9iK)\x03 \x9f\xcc\x0d\x03\xa6i\xd2g\x02\x5c\x90\xb9\x17\x86S\xb0\xc1\x0d,\xaf\xaaD\xb6\x10\x13>\x96\xed\xe5\xc2{}Q\x00V\xcc\x0aBIz\x06fL\x14\xb7D\x9a2\x087t8\x852\xc6\x10OX\xe0&\xc3\xf4L\xee\x02!\xef\x98\x1c\xe8\xf9\x16\xa4\x05\x03@\x03X\xb1 T\xa9k\xdb\xa0\xc3Y\x08\xa4t\x1b\x06\x85G\x95\x1b\x1c\xcb\xab+qi\xf2\x12ff2\x9f2n\xee\x9a\x1a|k\x16\xa4E\x03\xc4\x08@\x09\x09\xa9\x01h2\xf6_\xb8!8x\xb4\xd7V\x9c\x9ab.F\x87Gl\xba/\x9e\x9b\xfea\xdfADZ:\x80\x1fV/\xb8\x04Xx\x1e\x0c} \xb9\x08\x80\x125\x15\x988\xfb+\xc6GF\x7fR\xe0\xdb\xaf\x0c\x1d8\x0fRi\x00b1\x9a\x14\xae\xf7\xde\xf7\x83pA\xe1\x9c3\x18\x96\x09p\x17\xdf\x1f\xffR^\xfdg\xfamp\xf3\x95\xd4\xe9^\x0f\xa4\x92\x00H\x86\xa0\x01\xb0\xf0\x94\x03\xe1\xaf\x803\x0aO&pal\x18C_\x1f\x9f\xf4}\xb1sv\xe4\xe8ID*\x19\x80\x0a\x1a\xd0_3\xfd\x0aZ\x80\xc1\x91\xaf`\x18\xec\xff\x04\x17\xc7\xc6>c\x9c?\x93\x1e\xedOa\x1e-\xb9\x013\x16\xd3\x8b\xf0!Ta\xe1\xdc\xf89\x0c\x1e9\x9cul\xfbyf\xc4\xf6S8\x8a*y\x03J\xea\x07Mp\x0b\xa6\x12\xc0\xc0\xd1#8{jp\x08\x8cm\xcf\x8e\x7f\xf5{\xb9\xff\x17`\xc5\xca*\x5c6<\xfc\xfc\xe7(>x\xedE\xf9\xdb\xc9\x81w\x94\x14\xf7\x06\xe1\xe5\xd6\x1fs\x8ao|\xf5=\xfb\x8e\xce=*\xdex\xffd\xbcq\xc3\x83(\x93\x18\xe6Q\xfd\x03;\xda\xdcT\xaa\xdeN\xcf}\xe1\x5c\xfc.\x87\x1b*\x93\xfe\x05I\x09\xc1O\x1b \x86\xa8\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x03w\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x02\xf4IDATX\x85\xc5\xd7\xddk\x5cE\x18\xc7\xf1\xcf\xb3\x9b6M\xfa\x92\xed\x96\xb41bM\x8a\xb1\x96\x80J\xa0\x85 B\xb5*\x8a/\xe0\x85\x8a\x82\xad\xe0\x0b\x8a \x82 \xa8\xf4/\x10\xf1\xa67\xad7\x05\xc1\x0b\xdb\xaaW\x22\x22\x22F\xbc)\x14\xe9\x8dU\xc4\x1a\x8am\xaa46\x15\x9b\xbe\xd0\x1d/\xce\x116q\xcff\xb3\xae\xc9\xc0\xc0\xcc3\xcf\xcc|\xcf\xef<\xcf\xcc9\x91R\xb2\x9c\xa5\xd4l0\x22\xf6F\xc4\x81e\x03X\xc5\xce`wD\xacY\x16\x80\x1e\xc6z3\x9f\x91%\x07\x88\x88\xdeYz\xc7\xb9\x8a\xa1%\x07\xc0\x96\x8d\xcc\xdeL\xf7r\x01\xf4U\xb0\x85r\x95m\x10\x11\xb7DD\xffR\x01\xac\xabP\x1aB7[#\xa2\xbf\xcc\xb1\xd5\xbc\xd3I\x80\xaef\x00U\xban$]cs\x99G6\xb3\xf2\x12wv\x12`!\x05\xca7\x91.0\xb0\x9e\x97\xf6R\x9ef0\x22\x9afO\xc7\x00\xd6\xb3\xa2Bi\x17\xb5`\xf41\xf4eYq\xdd\xff\x0e\xd0\xcb\x86>\x02\x0e\x11\xdfc\x0d\xaa$l\xe8\x14@W\x84\x0an\xcf\xfb\xbf\xc8S\xae\xc7\xd8\x0dg\xadK_*]\x1ev2\x9d4\x1c!]\xed1\x89W\xef\x8eP\xad\xf7o\xa3\xfd]J\xce\x17\x06\xe1\xac\xda\xcaOm=\xd8e\xf4\xf8vG\xa7\x8e\xda>\x00'\xec\x7f\x88\x99\xb5m=n\xa3\x92RjX\xf1!\x1eo`\xdf\x8f\xe7\x8a\xe6-\xb66\x0b\xc2\xb5\xf8\xb3\x81}Z\x07c\xa0\x1d\x80?P]\xcc&\x11\xf1ZD\xbc\xd1h\xac\xe9A\x84\x0b\x05\x00-\xdf\x8e\x111\x84\xd7Q\x8b\x88\x8fSJ'Z\x05(R\xe07ll\x15\x00\xf7\xe03\x5c\xc2}\x98\x030\xe7\x15\xcc;\xe1\x8a\x00\xce.\x12\xe0~|\x91\xd7\xbb\xfe5Z\x17\xddo\xe1<\xf6\xe5\xfd\xcb\xe8n\x90\x05C\xf8\xb9\x95\x08G\x1ffP\xc9\xeb9\xac\x98\xe3\x93;\x8e\xc8\xa4\xbd\x15\xbf\xe7\xfd+\x05\x8b\xf6\xe0\xaf\x167\xff\x04\x1f\xd4\xd9\xbe\xc2\xaeFi\xf84\x0e\xa6\x94\x8e\xe3s<*\x0b\xb6F\x8a\xcd\xe2ZD\xac.\xd2<\x22\x02Gp\x06\xcf\xd4\x0d}\x84g\xeb}\xff\x01\x18\xc4\x8fy\xfbk\xec\xc6\xa9\xa2\x0d0\x85Mu\x1bV\xe7}\xb8> \x93\xfc\xe5\x94\xd2\x95:\xfb{\xb8#\x22v\xcc\x01\xc6O\x18\xcbe\x1aD\x0dG\x9a\xc8;\x81\xf1\xba\xd77-\x8b\xee\xee\xdcv\x18{\x0a\xe6\xee\xc1D\xfd+\xb8\x173)\xa5c\x90R:\x8do1\xd9D\x813\x18\xc8\xdb/b\x1fN\xe3\xc1\x88\xd8\x84\x9d8T0\xf7}\xac\x8a\x88\x03\x11q}\x09\x17\xf1\xe6<\xa7\xe7\xf1n\x13\x80\x1f0\x9a\xb7w\xe0\x1b\xd9\xdd\xf1$^\xc0\xe1\x94\xd2\xc5F\x13SJ5<\x8c_1\xdc\xd6\x05\x82'd2\x87\xec\xac\xe8\x97\xc5\xc49Y6mky\xad6\x01F\xf2'\xb8\x0d\x93u\xf6qyl\xb4Z#\x9f\xb8\xe8\x12\x11\x13\xb2\xcf\xb3\xa9\x94\xd2Sm-b\x81_\xb3\x05\xca+\xb2\xb3\xe2\xed\xff\xb0F\xfb\x0at\xaa\xfc\x0d\x82\xe2:\xdb\x0f\x89d\x18\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x08h\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x07\xe5IDATx\xda\xc5WkpU\xd5\x15\xfe\xce\xf3\xbeonn\x12\x12C\x12\x02I\x0cv4\x12E\xb4Z\xf11\xe8L[\x19\xabV[0\xb5\xc2\x80\xb4\xc5JC\xcb\x80\x19m;@\xcb mGej\x09-M\x7ft\x1c*\xb6\xe3\xa8\xa5*L\xb5\x9a\x0c\x0de\x1aZ0$\x12%\x0fBH\xcc\xe3>rO\xee=\xe7\x9e\xb3\xbb\xf6\xb9w\x0e&\xb96\xf8\xcb5\xb3\xb2Ov\xce\xde\xdf\xb7\xd6\xfa\xd6\xd9;2c\x0c\x9f\xa7\xc9\x98n\x029\xfb<\x09\xb0\xcd\x8d\x8dG\x98\xc5V k\xcbnY\x8eE5\x8b\xa1H\xe2,j\x82\x90\xe1,\x8a\x02\xff\x09\xf0\xd1\xb2 \xc8\x12\xf8\x0c\xff\xbbH\x0e\xf2t\xda\xc4\xfb\xa7\xff\x8b\x7f\xb6\xbe\xeb\xac\x0f\x87\xc3Gg\x12\xc0\x946\xb5\xe2\xd4\xe9S(..\xc1\xbc\xa2\x22\x14\xcc\x9b\x87\xf2\xf2\x05p\xa9\x0arY\x86Cf\x8c\x1f:\x88\xf1\xdf\xed\x83Z]\x83\xa2\xef>\x0e\xcf\x92\xfa,\x09\x01ZJ\xc7\xc7#\xc3H&\x93\xce\xda\xb1\xb1\xb1\x15\xb3\x08X\x14\x81\xcf\xe7\x83\x22\xcb\xf6\xb3\x08fG\xd8?\x12\xc9\x05\xeeX\xb4\xff(\x16L\xfc\x15RA\x01\x94\xbe^t\xddu\x07\x02?j\x82\xf4\xc8z\xfb\xc5\xb2\xc2\x00\x0d\x8c\xef9\x0d+'\x81P^\x08.\x97\xcbyY\x92\x04\xd4\xcc\xcf\xcf\xd4h&8E\x17\xb9x\x12\xe5\xd6\x16\xf8nRq\xb6\xbd\x0a\x0b;\xcfS\xc9$L\xec\xfc1\xbeP\x7f5\xc2\xf7\xdc\x83\xa9\xa4\x0e\xb2\xb9\x09\xf0\xae\x08\x04\x02\xce\xb3@\x00\x12e@\xa5\xba\xe62-:\x00w\xcf7\x10\xf4{\xb1\xff5\x15\xcfv\xf4a-\xad\xb95\x1eGBU\xf1\xc1\x0f6\xe1\x96\x95+a1\x02\x84\xc5\xf7\x9c\x86\x95+\x033\xd2\xcc\xc0\xf5'\xcb\x02f\xa6@OFa\x9c\xfa\x1a\x0aB.\xbcu\x9c\xe1\xb9C\x06\x22\x91\x08\x9aFGq\xf0\xdeJ\xc4\xdf\x1c\xc2d\x22\x01\x83\x04\x986M0kz\x09L\xcb\x9c\x9b\x80\xc5\x887-<\xd3;:=S\x96\x01\xffG\xabP\x91o\xe0\xc4\x19\x03\x9b\xf7\x0a\x98\x9c\x9c\xb4\x09l\xdf\xb8\x10\xd7\xdc8\x80\xe4\xad\x95\x88\xd5\xb5\xa0\xab\x7f\x0c%a\x1f\x01\xce(\x01M\xccM\xc0\x02\x0cz\xb1\xbc8\xc8\xcb\xed\xd8`\xdbz\xcc\x0f\xeb\xe8\x1b4\xb0a\x8f\x80\x84\x96\xb2\xc1\xd7\xdeW\x82\xfb\x09<\x10\xca\x07\xab{\x0e\x15\x05\xd5TR?R\xba\x09\xd34/O\x03\x9f43m!\xcd]\xba\xb4\xf0\xc3c\xbbPW\xd0\x8b\x89\x98\x81\xd5\xdb\xd3\x88k\x82\x0d~\xdbu><\xber\x14yy^\x8c\x17\xed\x02\x5c\xb5\x10E\x89\xd2\xcf\xc8-\x98\x8c}v\x0d\xa4\xa9N\x06oG\xc3\x04\xb7\x81\xd3/\xe2\xeap\x1bRi7\xd6\xec\xfc\x18c1\x89\xc0\xc7QY\xc2\xb0k}\x0a\xe1\xa0\x88a\xff\x0f\x91\x94o@@\x94\x01r\xdd\xc8h\xc0\xb4r\xb4\xe1\xdc\x1a`\xb6\x88.\x8eNB\xd4\xbap\x95\xf7U0\xa5\x10\x9b\x9e\xe9\xc6\xd9\x01\x13qR\xbbKJ`\xdf\x16\x0f\x0a\x82)\xf4\xe3!\x9c\x8b\x7f\x09~AG\x8a<\xae'\xc0-?\xe8\xa2l^N\x09f\x100-\xcbN\x9fj\xf4\xa2J\xd8\x0f\xd5W\x89\x9f\xef\xeb@\xdbI\xcd\xfe\xaa\xe9\xc9\x18\x0e\xee\x0c\xa2\xac \x86\x88r'\xce\xa3\x81\xb2\x10\xa0\xba\x07\x91\x97\x1fr:\x87\xf6\xb0\x01\xd94\x11\xe6\xea\x82Y\x1a`H\xc4\x87\xb0\xd0<\x00ox1\x0e\xbd\xd6\x81\x13=\xf9\xb4\xd90b\xb1\x08\x9a\xb7\x06\xb1\xb84\x02M\xadCGb\x1d\xfc\x01\x09\xa2$\xc3\xed\xf3\x92\xe82}\xcf2\x05\xe7\x1a\x98\xb6\xbfu9\x1aho{\x0b7\x86G\x11Z\xb4\x14\xed\xed\xff\xc2\xcf\xf6\x7f\x08]\xcf\xf6{\x83\x84/\xd6N a\x15\xe3\xd7o\xd4\x83\xc9\xc7\xe0\xf1x\xed\xe8U\x97;\x13z\x06\x1b\x1e\xaf\x17}=]\x9f\xad\x04\x02\x18\xbe~\xfd)T\x5c\xd9\x80\xee\xce\x0e\xbc\xf4\x8f\x10\x07\xb7\xeb\xfe\xe0r\x0d\xdf\xbc\x93\x94\x0d\x1f\x9a\x0e\x14c\x22q\x06\x8a\xa2\xc2\xedv\xc3\xedr\x81\xe5P\xfc,\xac\xb9D\xb8\xea\xf6Q,\xbb\xe3\x09\x8cE\x93\xf8\xceO\xdb08\x14\xb5\xeb~\xd3\x95Qlk\xb0\xe0\xa2(\x1b\x9b\xe7\xe3\xe2\xb8D\xe0\x1c s\xf2\x19\x86\x81\xcb1\xd3\xfa?\x1a\xb8\xa1z\x18\x0f|\xebI\xc4\xa3CxxC3\x81k\xf6\x87\xa4\xb2p\x02{62\x04\xfd.4\xb5\x5c\x81\x9e!7\x81\xf3;\x81\x08\x97\x9b\x22w\xa2\x9d\xdb,\x8b\xe5.AYx\x04[\xb7l\xb4#Y\xd7\xb8\x0f\x1f\x9d\x9f\x02\xb7\x802\x8e\x17\x1a\xd3\x98\x17V\xf0\xcb\x97\x0bq\xbc\xcbC\xe0\x80\x08\x01^\x8f\x07\x02\xe3\xe0\xd6\x1c\xb0s\x94 \xe4\x19\xc3\x8eG\x87 \xb8+\xb1y\xd3f\xfc\xa7;\x03.ZQ\x1cx\x92\xa1\xa2TA{\xdfR\x08%\xcb\xb1\xba!`\x9f\x9c555\xfcv3+\xf2\xf1\xf1q4m\xdb\xc6\xb3\xc3\xa3\xb5[\xba\xb8\xa8\xe8\xd3E\xf8\xfb\xa7\xdd\xb5\xcf<\xb1\x1c\xf9K\xfe\x82\xedO}\x1fG\xdb\xe3\xd9V\xd4(r\x035\x15\x16:\x87\xaa\xf0\xc6\xfb\xf5\xf0\xfbY\xe6\xee\x10\x0aAUU$\x12\x09~tswDG\x17\x1b[\x94\x92$\x81sK\xa5RH\xa7\xd3\x0e\x1e#w\x08\xbc\xbe\xf7\x11\xd7\x8a\xa5\xfe\xbf\x15V\xde\x85gw\xacBkg%\x8f\x01\x8c\x84\xf2\xd4\xea\x09,\xbbJ\xc2\xf9\x89\x12l\xd8\x9d\x84\xc9\xde\x84\x22+\xf6M\xe9\xf9\xbd{y\x8460c\x19R|t\x0e\x1e\x9b\x94\xc8\xf3\xcd\xdf\xe7\xf3\xb9/\xa5\xa1\x92\xaa\x87'\xf3\x1b+\xffx\xe0W\xf8\xed\xeby\xf4b?\xb8\xad\xbd{\x18\xf7\xdf&\x22\x92\xccC\xf3\xdb7#\x16\x7f\x07\xb2$\x83\xa9\x16\xdf\xcci7\x8a\x8c\x00\xed9\xc7\xf9|V\x95\xc8\x0e\x9fN\xc0\x1b*Y\x1f,]\x22\x84ki\x8a\xb5\x80\xdb\x97\xebG\xf0\xbd\xfb\x04$\x0d\x15\xbb_\xa9'P/\xc0\xec\xa0l\xd1M%\x93\x1c\x94\xa7\x96\x8f\x5c\xb4\x1c`Z\x168\xaaC\x94\x13\xa3\xc5\x1eZ\xeb\xf3\xfb1A\x1a\x91\xb3\xe9\x97\xaa\x97\xdd{\xbd\x16\xe9AE\xb1\x81\xad\x8f]\x83W\x0f\xb7\xe2'kMH\x94\xea5;\x5c\x08\x95\xf9Q\xe4S\x00pp/\x98}H\x19\x98\x9a\x9ar\x00ht2A\xa3\xed:\x91\x92%)3G\x0e\xfe\xae\x00N\xde>!\xe5L\xf4\xc5_\x81\xa8)\x89h/\xb4\xe89\xcc\xcf\x1f\xc5\x1a\xbaX\x0cG\x06\xb0\xbd\x85\xa1{\xc0\x85\x07o\xae@UU\x15\xfe~\xe4\x08\x81\xf1hu\x0e\xea\x08\x8b\xa2wJ\x90\x05\xcf\x90\xd4);\xb6\x083w\x02\x81FY\x94\x10\x8bF\x11\x08\x063\x04N\x9c\x8d\xed\xbe\xce\xd3\x05\xaf4\x82\xd4\xe4\x05$4R\xfb\xb9\x02<\x7fP\xc0X\xd4DYi\x00\xd7\xd6]\x8b`^\x10S)\x1d\x1a)\xde\xccv\x80\xa6i\x0eh6\xfd\xce\xa8\xeb:\x11\xe3\xf3\x96#P1\xdb\x9e\x85\x85\x85\x18\x19\x19\xc9\x10x\xa7\xfd\x83\xc5/\x1f>\x86\xc7\x1e\x98\xcf\x0a\x02\xbe\x7f\xff\xe6\xa5\xf1E\xa3\x91H\xfe'\xfa\xd5I\xf7\x9e_\xec\x01\x99#4\xba\x07\xe6\x12\xa1\xf3l\xe8:oC\x87\x80$\x8a\xf6\xfc\xe0\xe0\xa0\xdd=\xf2\xe6u_}\xf4\xc2\xd0\xb0P[\xee\xee|\xbb\xf5\xe2C/\x1e>9\x9e\x17\x0c~\xbb\xac\xb4\xf4iI\x94\xbc \xd3\x0d\xdd\x06\x92eyZ\x9ai\xb4S\x9fM\xff\xb4C\x87oN\xc6\xeb\xec\xcc9\xc2\xbc\x14\x98.k\x93\x93\xe7\xea\xab\x02\xc5\x7fx\xe5x\x04\x80\x9b</\x1a\x8b\x9dL\xa6R-.E-\x81\x00VU]-\xb6\xbe\xf7\xde\x025c\x8a$\xcb\x8a\x22+\xaa$\x89\x0a\xf5\xbf$\x88\xfc\x08\x02\x83 \xf0\xc2\xeb\x96\xc5\xf4t\xda0\x22\xd1\xa8&\xabJ\xcf\xd0\x85\xa1\x14\xb3\xa5w\xe9bO\xebL\x22tLn\xfe\xd3\xbb\xad\x00\xae _H\xaed[3B\xe2\xfa3\xb9\x04\xc0\xec\xee\xee6\xfb\xfb\xfbS\x14\x95I\x0b-\xbe\x98\x88X\x14\x8d\xc5\xe7(\xc5&e\xc1\xa2\xaf\x1e\xf3z\xbd\x16e\xca\xe2\xbf\x0f\x0f\x0f\xa7)sR\xf6\xffA\x01\x8e9DR\xff\x03\x8c\xc5\xeaCX+lK\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x06\xe0\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x09pHYs\x00\x00\x1b\xaf\x00\x00\x1b\xaf\x01^\x1a\x91\x1c\x00\x00\x00\x07tIME\x07\xd7\x0c\x1b\x16\x05\x11\x8ek\xb0\xdd\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x06mIDATx\xda\xcdW[lTU\x14]\xf7\xde\x99\xb9\xf3\xe8\xf4\x01\x08\x04\x8aR>JUb\xa6\x10\xc4\x86\xa2\x12\x22\x90HH0\x8d\xa1\xbc\x0a\x82<R\xa8\x06\x13\xd0\x0f\xe5\xcb\x98\x88\x06C\x0c\xef\xc4/\xf8 $\x02\xf1\xc3G\x08*J0\x80\x96\xd2\x82\xb4\xb4\xbc\x9f\xe9\xcbN\x87\x99{\xe7>\xdc\xfb\xf4\xe6\xdc\x19[\x22\x1f\x10\xdd\xc9\xea9=g\x9f\xbd\xd6\xde\xfb\xcc\x99\x0c\xfekS\xe0Y+\xcd\x8b\xc3\xe1\x90\x13\x8f\xebAMS\x95\xc7L\xe4\x12\xach\xd4U:;3\xbd}}f9-I\x01\xbd4Zuu\x09e\xd4\xa8}\xbd'O&\xb2\xf7\xef\xab\xde\xa1\xfc r\x1czM\xce\x1f\xb2\x17\x9b0\xc1\x1d^]\xddh65\xad\x8c\x1e<\xd8XL\xcb\x01vH\xeb\xba\x1e(-\xdd\x17\x98={\xb2VU\x05UQ\x06g\xf08\xca\xadR\xe4\xc2\xc2\xca\xcc\xb5k{\x15]\xaf\x86ad\x84\x80\xac\xaaFn\x1d9\x92\x88WT\xc04M<Is(\xb9\xd6c\xc7*\x9f'N\x00\x03\x02l\xd7\xd5z\x9a\x9bU=\x99\x84\x8d'k\xb6a\xa0\x8fZ\xcc\x9c \x1b\xa8\x80\xeb\xaa\x0e\x00+\x93\xc9\x13\xd0M\xcem\xe10\x82\xe94\x12\xba\xce\xady\xb4,]\x17\x8dt6\x1b\x89\xa0\x9cb\x96\xe8\xba/ \x9b\xe5\x84\x05\xa7\x14@\xe4\xaa\xcd\x9b9\x02\xfe\xb2m8\x89\x04\x16UW#M\x02~<t\x08e\xc9$\x8b\xf8W\xf2+\xf18\xe6\xd5\xd6\x22B\x02~;q\x02=\xe7\xce\xa1P\xd3\x06\xf6M\x93\xdb 8\x01\xef\x8f\xe38,\x80+ \x11\xa5\xcb8\x8d\xc8\x15r\x8eF\xa3x\xb5\xa6\x06\xb7JJ\x90\xe5\xfd\xa1!\xf6n\x16\x17\xb3/\x9f\x11g\xa7\xcd\x98\x81HU\x95\xf4q\xb8\x02\x1e\xa7\x14`;\x8e\xdf\x02\x0feEEPU\xb1\x0d\x97\xb2\xd2\xa9\x8c/\xcd\x9f\x8f\xae\xd2R\xe9'\xe1\x91w\x8e\x1d+|B\xa1\x10\x13\x80\x8dc\x94\x91(\xe9\xcb\x15\xf08\xa5\x00\xcbo\x81T\xdaz\xe0\x00\x1e\xb4\xb7\xc3\xab\x90@0\x18D\xe5\xdc\xb9HM\x9c\x08\xcb0dP\x9e\xa7\xca\xcb\x91\x983\x07\x81@\x00\xb6m3\x84\xf0\x07\x1d\x1dh\xdb\xbf?\xaf\x02\xdc\x02\xe6\xf4+\xe0_B\x89l*\x85\x96\xbd{\x91jk\x93\x228 \x13<7k\x16\x9c)\x95PlC\xc0\x9e\x9c@\xc5\xcc\x99\xd04-\x9f\xfc\xf2e\xb4\xec\xd9\x033\x95\x1a$\xc0\xce\xbd\x846\xa0\x88E\xc3\xe0\x0d\xff\xc6\x12\xce\xef\xd8\x81Ik\xd6 XV&+!\xca:}\x06n\x93\x18\xc0E\xe9\x8bU\xbc\xc6\xc4<\x8a\xde\x1b\x1d\xed\xb8\xb0{\x0f\x1cZ\xcb5\xdb\x13\xc0\x9cB\x80\xac\x00\x070M\x11D\x1a\xb9\xd02.\xee\xdc\x8eg\xd7\xd6C}\xa6LV\x82\x89FO\x9d\x06\xcfd\xcf\x99\xdc\xb9v\x05\x97v\xef\x80cZ\xd4\xeb\x9cg\x94\xf6\xf4\xe2\x22\xe2s\x04\xa7\x7f\x07|\x01~\x1b\x8c\x0c\x5c3\x035\x9b\x81\x92\xe9G\xfb\xce\xcf\xe1^mc\x02Y\x09\xef\x82\xf2\x5c\x0a\xd7n^A\xc7\xaem\x80\xd1\x0f\xc5\xcaP\xcb4\xc4\xc7\x8dCd\xc4\x08\xb8\xe4wf\xebV\x98\x19\x03\xd6?\xdf\x01WU\xd0}\xa1\x05J8\x82P<\x8e`8\x84\x80N\xe5t\x07T*\x06p}\xd7\xa7xz\xf5F\xb8cd;X\x00C\x08\x8b'\xbbq\xf7\xf0~\xe81\x1d\x0aGM\x1b\xe8\xbb~\x19\x1dG\x0f#\x9dL\x83-\x18\x8b\x82\xb8x7O\x80\xc2\x15H\xde\xb8\x8eL\xff\x03h\x9a\x82H,\x84Xa\x04\x91\x02\x1d\xe1h\x08z8\x08'\x12\xc2\xb1\x1f\xbeG\xd5\x92U\x928W\xc4\xaf\xdf~\x83\xe8\xd7_Q\xf5,\x98Y\xc0\xf0\xa0\x104\xa2\xb3\x99\x88\xc9]UpJ\x01B\x84\xea5\x1c\xecL=\xb6\x0d\xb8)\x036\x1d\xb6\xfa\xc9[\xd7\xd0\xf6\xda\x22T\xd7,\x91m\xc8\x05[\xd9\xdc\x05h\xee\xba\x8f\x91G\xf7\x01`a\x10\xe0m\xee\x10\x8f\x8aF\xe49\x17]\xcd\xfd\x96bu\xac!\xa0\xf9\x08\xf2\x18\xd2p\xe7\xf5\xe5x\xa5\xe1\x03\xc4b1x&o<g\xcfw\x80?\x86/,]\x87\xee7\xd6\x8a3\xc1@~,r\x17\x1c\xae\xa2\x0c\x16\xc0}au\x1a\xedi9\x224\x0a\xd4\xb5`5^\xde\xf0>\x0a\x0a\x0a\x98P\x92\x0f\xbf\xd1\x88\xa7n6\xf2\x5c\xb6\x83\xdf\x89D]=R\xb5\x0d|\xd6\x8f\xc3P!|\x1duh\x01\xb2\x02\xc2\x91A\xe9\xa7\x16\xae'\xf2\xcd\x9c9\x9338S\x22?\x0b\xfb\x8bep\xb6\xaf\xc0\xe8;M\xfcJ29\xef\x89g{\xca[\x1b\xe0\xae\xd8\xc41\xf2b*\x94\xa1;\x94\x00V\xc5\x9b\x14_@\x0dh\x14`#\x91obrV\xce\x10$\xc5WO\x0b\xf2\xb0j\x0a(_\xae\xc0\x98{\xe7\x11\x0e\x87\xa5\x10\x9eO}\xbb\x01\x91\xfa\x0fE,\x11\x93\xa1)\xc4\x85!\xef\x00\x11\xf8\x02b\xeb6\xa3\xba\xfe=I\xce\x99q\xe0X\xdb)d>[\x82\xa0kB\x0f\x01aB\x10&\xb2\xdb\x96SU\xfe\x18,b\xd5z\x0c{w\x8b\x88\x09\x16\xf0\xf0;\xc0\xff\xf9\x1bU\x8b\xebD\x002I\x1el\xf9\x05\xbd\x1f/\x02\xb2&\x06\x99e\x22\xf9\xc9RD[Oq\x0bX\x04\xb7K\x9c\xab\xacY(\xdd\x14U\xe0a\x97\x10\xf2\xa3sze\x0d\xd0\xdf'\x9fW\xfb\xf7\xe3\xb8\xfdQ-l\xc3\x14\x9f\xe7\xac\x05\xd0\x14\x19\x02\xcfm\x1b\xe2%\xbd\xbbe1p\xeeg\xf9U\xce1\xceP,\x8e\x09\x86w\xd7\x86h\x81 \x92\x02zZ\xce\xe3\xcc\xb2yP\x93\xbdH\x9f\xfc\x0e\xed\x9b\x16\xc2\x22\xb6\xac\x0d\xf9\xc8\xa4\x0d!@\xc0\xb4@{\x04R\xd5A\xbe|\x86\xcf\x9e\xad\x9bG\xb1\x9aDL\xc7\xf5\xb2\xf7\xf9\x11\xc8o\x81p\x12\x19:\x04>\xf8\xd3\xf4\xf1\x88\x84 \xfa\xad\x12\xbc\xf6\x09?\xcd\x93\xcf\xfe\x96\xc5\x22\xbc\x91*\xd1\xdc\xf0\xe6\x808\x83c\xf91!\x12\xcdo\x01\xaf\xd9j\xc107:\xb2\x90\x1d\x05,\xdbG\xd6b\x88\xcc\x05\x0c/p\xdaC\x86\xc1\xeb\xde>\xfbZ\x022\x86\x8c\x1b\x1dYD\xeb\x8a\xc3\x9c\xb2\x02.\xd0\xad\xc7\xe2\x97\xe2\x93\xc6Wh\xd1\x104\xc7\xe2W\xcc{\xc9\xfcQ#(\x04\xae\x96E\x90\x99p\xdb\xf8\x99%\xa8\x84\xa0Ms\x8b\xce\xd0\xa8\xd3\x18\xf5\x12\xb0\xb5 \xacx\x09\x94\x0b\xf7.1\xa7\x14\xa0\x00=\xa1\x8b\x7f6t\xf4\xf6l\xef\xe9\xea,\xcf\xa6\xd3\xea\xa3\xff\x0c{t\x0bE\xa2NX\xbdqit\xb2\xff\x1d\xe6D\xeeu8>\xd0\x8e\x12\xc20n/\x9e\x80ye\xe7\xcc{f\x02\x0e\xfe\x0f\xf67\x83v\xd2D\xe2\x1dh\x05\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04\x99\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x04\x00\x00\x00\xd9s\xb2\x7f\x00\x00\x00\x02sBIT\x08\x08U\xecF\x04\x00\x00\x00\x09pHYs\x00\x00\x03v\x00\x00\x03v\x01}\xd5\x82\xcc\x00\x00\x00\x19tEXtSoftware\x00www.inkscape.org\x9b\xee<\x1a\x00\x00\x04\x18IDATx\xda}\x95\xcdk\x9cU\x14\xc6\x7f\xf7}\xa73\x93I\xa6Mk\x91\x0cL[\x15\xa5\x0d\xe2B\xa1F+\xd5\xd2fU4-\x08~P\x5c\x89\x7f\x80\xba\x90JHi\x08\x16\xc16(\xb8\xb1K\x13M[\x14\xdbE\x11jk\xedB\x8d\x11D\xa4D\xa4\xc5E\x8b\x13\x89\xd46\x1f3\xf3N\xe6\xbd\xd7\x87\xc30C\x88\xe4\xfe\xb8$/\x9c\xe7\xb9\xe7$\xe7\x9e\xeb\x02k\xd7\x89\x9c?\xe0\x86\xe8\xa7$\xa0\x22f\xc3\x85\xe8\xf2\xd1\x845k\x8d\xc1X)\x8cp\xc4\x17\x03\x9e@+\x88\x08\xedE&\xdd\xe8pe\x1d\x83\xd1|\x18\xe6M_h\xb2BB\x83\x14\x0f\x12\xc7d\xc9\xb1\x81\x0cQ\x95q76R\xff_\x83c}\xe1\xab0\x90RgY\xa2\x12e\x8at\x83\xbe\x16\xb9M\x05\xaf\xaf<1n\xda\x1d>>\xb7\xc6`\xf8\xb1p\xd1\x97\x1bTe\xb0\x9b\x9dl\x22ogBJC\xd4\xf8\x95\x19\xf2\x14\xc8\x12\xddv\x07\xc7~[e\xf0n_\x98\xf1\xe5:\x0bla\x1f\xdb\xe9%\x873 \x90\x8a&\xcb\xccq\x91;l\x94\x8d,v\xbf7\xd76x'\x1f\xae\xfa\x01\xa5\xce\x13:\xbd\x8f.2\xc4\xc2\x89\x00x\xb1B\x83\x84\x05\xbeg\xc6J\x89\xa6\xdd\xbe\xf7\xeb\x10\x01\xa4\xc3~ \xe1\x9e\xe4\x03l\xa3\x87<9\x91\xb5\xad\xdfmw\xd1-6\xf3\x1cO*2\xc1\x0f\xa4\xc3\xad\x0c\xde.\x85\x1b\xcd\xc2\x12E^e{\xab\xf2\x98K<+\x01\xb6\x82\xd1\xb4,j,pZ\xbb\x87L\xd5=|\xb2\x12A:\xd2,\xd4\xa93HY\xf2l\xcb\xe0\x06\x13\xfcAd\xc4\xc6\x06\xcb\xabK\xd2\x17\xa9\x8bf!\x1d\x81\xf8N\xce\x7f\xba\x92[\xe0\x19\x15\xb0Y\xf2\x8c\x02c\x89~d\x89_\xf8\x8b\x87\xc8\x02\xae\x0d@\x1e\xf8]qn\xe7\xd7\xa7\x22\xbf?-6\xf4\xd1\xcf\x16b\xc3\x19ujT\xf9\x81c\xfc\xdc\xe9G\xcb#+\x9e\xd2O5Z\xd1\xef\x8f\xd2C)\x09\xdb\xec\x1f\x97\xb1\x90\x08\x07$\xc8@\xfc\xcd)>b\xa1m\x91\x11Y\x0a<HBJz(\xf2\xfd2\xe0\x01\x02\xcb\xda&\xb6\xd5\xb0\x1c\x96\x8doy\x8b\x9f0\x0b\x11\x93\xd1\xdee\x06\xbe?\xf2%5\x09\x1bQE\xa4\x04\x0b2\x03\xa1\x1c$\xaf\x89y>\xe0c\x161\x03\xcbs\xabT2(e|\xc9\x93\xd2c\xe9w\xce\x87?I\xa8[\xf3xbk\xe1k\xdc\xe4\x08\x8f\xe3,r\x0b\x92#\x83\x80h%\x07\x1d\x83[\xdc\xe3\xaev`\x93\x911\x01\x80\xc5\x89`dB%<\xe2X\xc2ll\xd3\xae6Ra\x81\xa8e\xbe\x877(\xe2\xb18\x99\x9bE%\xe3\xcd\xe0.M\xbc\x08\xc2\x96\xf5C\x1e\x08\xd6\x5c[9\xca\xdevWz1o\x06\xbe\x12\x85Y\x88U\xf1\x8a\xa5\x98\xb6-\xb2\x12vS\x14\xdd<\xcfg\x1dy+nV*\x08\xb3Q8\x1f\xacq\x13R\x9a\x1d\x03\xbb<=\x92\xef\xe0CN\xd0\x0b\x1d\x03\x8b\xbb.\x95\xbe\xceG\xe1\x0a\x8b15f\xa8\xd2\xa0)\xacJ\x0af\xf0\x02\xe78\x00m\xb9\xb7\x88\x06\xd7\x14\x1d\xc3b\xb8\x12M$a2\x22\xc77\xcc\xd1\x10V\x0aA\x06%\x8es\x8c\xdeU\xf2\xd4n\xe4?|!ED\x98\x9cH\x22\x08\xa3\xa1\x1a\x03S,PG&\x96\xe2\xd3\x9cd\x0f\xab\xc5:[\xd4\xf9\x84\x80\x0a\xa8\x86Q\x88\xe0\xf3\x0a\xe3\xb1\x1coq\x95%j$b\x85\xd7)\xa0\x5c\x8c\xd4X!\x115\xe5zS\xd11\x8cKI\x06\x801\x06\xe3\x81..\xe1\x18\xc4\x04\xab\x86Zk*\xa2\xd3%\xff\x92.$\x9f\x96\xaa3T_\xd1P\x0d6\x93w\xf0\x1ae\xb2\xc2\x9a\x1b\x00/$g\x9e\xd3\xdc\xb0\xb9\xec4T\xa7:C\xd5,4\xd6C\xb9i\xbd?\xc4^\x85\xc5\xad\xabm\xf5\x8b\xef8\x87\xb3K/\xf9\xc1\xa9\xd5c\x1d\xe0\xe5>\xf4\xb0xK\xb4\x9b]<\xca\xfd\xdc\x87\xe3_\xe6\xb9.\x96\xac'e9\xcd\xe13k\x1f\x16\xb3\xb0\xa7-\x14\xecO\xd6\xee\x08'\x91\x0d:\xe1\xeci;S_\xe7q}\xc9\x1e\xd7P\xec\x5c.g[\xd8\xe3zv\x9d\xc7\xb5c\x92\x0b\x07\x18r\xfd\xa1\xf5\xbc\xbbJ\x98\xe5\x82\xbb|6a\xcd\xfa\x0f\xcb\x16\xf49\xc3\x9f\xd1\x8a\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x06(\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x09pHYs\x00\x00\x0d\xd7\x00\x00\x0d\xd7\x01B(\x9bx\x00\x00\x00\x07tIME\x07\xd8\x01\x03\x14#,\x8a2\xa2\x92\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x05\xb5IDATx\xda\xbd\x96kl\x14U\x14\xc7\x7f3;\xb3\xefv\xfb\x80B\xcb\xc2\x16\xca#B A\xc1\xa2`\xc0\x221DL\xf4\x1311\xc4\x0f\x86D\xbe\x98\xa0$`x\xc5\xf8B@E\xc2\x07\x83\xbc\x03\x84/\x22Fy\x04\xac\xc8\x1b\x04\x92\x12\x1e\xf2\xb4\xd8--\x96v\xdb\xee{wv\xc6\xd9\x9bl\xb22\xbbY \xc4\xb3{r\xe7\xde\xdc\xdd\xffo\xce9\xf7\xcc(<}\x93\x84\xff\xd7\x0c\xe1\x05\xeca\x80z \x00H<\x89\x8d\x9e\xe3g\xc8\x94Wqx\x1b\xb1\xd9\x02\x18\x86\x81\xae\xb7\x91\x8a\x9e\xe1\xee\xf1C\xdc>\x18\x04Z\x85\x17\x01\x08$\x93\xc9\xa3\xa6\x83$\x91\xfd \xbe\x88y\xfe\x98o\x91x\x8a\xc5\x9bO\xb1\xfdX+3\x1bG2{R\x80x\xf0\x0a\x00.\xff\xb8\x91\x07\xce\xdf\x1dy\xe4\x5c\xe5\xdb\xef\xbc\xf7>\x9bV\xcc\x9bM\xb4\xab(\x80\x94L\xa6\x88D\xa2H\xb2dj\xe5y>@\x1eD\x7f,\xc9\xdcU\xbfr\xb5[g\xe5\xfc&f\x8e\xadaP\xb9\x1d\xfb\xe4:\x0c\xdd \x91\x81I#\xaa\x996\xde\xcf\xaa=\x7f@\xe3\xc2\x8f\xe9\xbez\x9a\x96\x1d}\x85\x00@\xe2!q\xd9t\xac\x10\x80\x96\xc9\xf0\xd1\xd63\x5c\xe8\xd4X\xf4V#o<[\xc7\xb0J\x07\x8ad\x90\xd13\xe8\x19\x1d\xaf\xae\xe3\xb3\xab\xb8&\xd4\x10I=\xc7\x9a=\xfa\xe4\x8c$\xad\x03\xde\x054\x0b@NT\xce\x83\x00+\x00\x86\xc1\x85\xeb\xf7\xd8}\xb6\x83\xc6\xe7\xc7\xf2\xe6\xc4\xc1\xd4W9\x90\xd0I\xa55~\xfei\x9f\xd8?\xa3\xa9\x09\x9bl\xc3_\xae\xf0\xda\xf8\x1a~\xbb6\x9csgc\xf3x\xe1\x83M\x9c^{\xc2\x0a\x009q\x8bh\xfeuZ\xd3\xd8|\xf8:J\x99\x8fY\x13\xeah\x18\xe0\x04#+\x9e&\x99Lp\xff\xfe}\xecv;}}}8\x9dN\x5c.7#\xaaT\x9a\xc6\xd5p\xf1ZP\xd2\xe2U\xf3\x81\xb3\x05RP0\xf7b$/\x22\x89x\x9c3\xb7B\x94W\x0cf\xfc\x10/\xaal\x08\xa8x\x22a\xd6PX\x00\xb8\x5c.zzCT\xf8*PU;\x0e\xa7\xca\xd8Z\x0fe\x15\xe5\x84z\xddS\x01\xf7\xc3\x00\x86T\x1c@x.\xfcq\x13\xa0#\xaa3\xa4\xceI\x9d\xcf\x8e\xae\xeb\xa4R)s=F(\x14\xe2\xd0\xa1C\xb8\xddn\x9a\x9b\x9b\x09\x87\xc3ttt\x88\xf5\x83-m\x94y\x1c\x84\x1cn?\xe0)\x94\x82\x92\x00\xe6\xf1\x16b\x86\xc3\x91\xcdWv.\x004-C\xd2\x5c\x8f\x99\x10---(\x8a\x82\xc3\xe1\x10\xbf3\xf7\x0b\x97\xcdkl6\xb0\xab\x00N\x05\xab\x15\x15\xcf\xb9.\xcbb\x1cXa'\x9c\xd2\x08\xf6&\x183@!\x93\xc9\x90\xd14\x00~0\x8b\xd0\xe5tRS3\x88\xaa\xca*<\x1e\x0f\xaa\xdd\xc1\xfek\xbdDR\x19P\xe8\x02\x8c\xe25P8\x1a\xc2\xcd\xbb\x10\x855v\x90\x8dc])~\xbf\xdeM\xa3\xdf\x85b\x80,\xc98\x1dN\x06V\x0f\x145P\xe6-\x17QP\x14\x95X\xda\xe0\xd8\x8dn\xc2\xc9$${\xae\x00I\x0b\x80E\xb8\x90\x03^\xaf\x97\xd9\xc3U\x9aC\x12\x07\xaet25\xe0f\xdap\xaf(\xb62o\x19\x06\x98 \x8el\xf5\x8b\xd3\xa0#\x09\xd0\x03\x97;I+\x12\xdcl\xfe\x05\x88\x16\x8c\x00\x16Q\x8b\x8b\x90N\x1dSGck+\xe7\xfa||u\xe4/\xb4\xe9~&\x0f\xf3\x0aQ\x9bMFQU\x01\x14\xd7\xe0\xc4\xcd.V\x1f\xbeC0\x1b\xfe\xd0\xed\xd3\x5c\xda\x7f\x12\x88\x15)\xc2\xd2\x10\xaa\xaa\x12\x08\x04X0\xfao\xee]Oq)\xe6d\xe5\x81;\xbc\xd2\xe0\xe3\xc5\x86\x0a\xfcUY\x08\x8d\xce\xfe>\x8e\xdf\xec\xe1\xe0\x8d\x1e\xdaP\xd0\xb5\x9e ?~\xb2\x01h\x05\xd2\x02\xe0q!\x001VVV2e\xd2D\x16GO\xf0\xdd=\x17\x97\x18\xc2\xf6[a\xf6\xfe\xd9\x83\xd7\x86\xd8\x13\xd6MWT4\x9f\x8f\x09z\x90\x96\xf5\x0bv\x12\xef?\x05\xf4\x96.Bks\xca?)\xe2\x98\xf9\xfd~f\xcdx\x89\xca\xf3\xe79z\xe3\x1c'=\x83h\xa3\x82v\x5c\x00x\xa5$c\x8c\x07L\xd5\xae\xd0\xf4\xccP\xe6\xc6\xfb/\x02\xed\x80\x06P(\x05\xd6;\xb7\x1e\xd3\xdc(\x0al\xe8\xd0\xa1\x94\x97\x97\xd3\xd0p\x97\x97\xef\xdc\xa1\xb3\xf36\xd1h\x14@\xd4Jmm-#FL\xa7\xbe\xbe\x1e\xe0\x01\x90\xb2>\x8e\xad\x10\x16\x10\xc02\x02\xa2\x1e\xaa\xab\xab\xf1\xf9|\x8c\x1a5J\x88\x8bw\x0a\x10G\xd0\x84\xc8\x1eI\x01\x0b\xe8\xe4\xd9\xa3\xa4\xa0\xa8x\xfe\xb5,\xcbB\xc0t\x11\x0d]\xd7E\x87\xcc\xdf\xa3\xebF\x89W\xb2by\xb7ZQ\xa0\x5c[6\xbb\xa2\xf0\x1c\x88\xf8/\x9bE\x8e\xe2\xad\xd8*R2\x1a9\xe1\x5c\xdfO$\x12\xd9Q\xac\xdbl6\x12\x9a\x11\x02\xa2%\x01\xf2AJ\x98E<+\x1a\x8eD\xe8\x0d\xf5f\x9f\x82bnS\x14\xdc\x1eOr\xdd7_/\x05.?\x0a\xc0\xe3\x84>\x17vQx}}\xfd\x04\x83mt\xf7\xf4\xe0v\xb9\xa9\xab\xabE\x96\xa5\xcc\xc6\x8d\x1b\x97\xef\xd8\xbe}\x0b\x10\x7f\x94\x14\x14\x17\xb4\x9a\x00\xd04\x8dX,\xc6\x83\xee\x07\xfc\xd3\xd5\xc5\xebs\xe6\x90N\xa7\x09\xb6\x07\x8d]\xbbv~\xb6f\xcd\xda\xf59\xf1\xa7\x9c\x02\x11~!\x16\x0e\x8b\xd0g\x9f\x94b\xde\xdenv\xdd}{\xbf]\xbel\xc5\xe7\x16\xf1\xe2\x00\xa5\xcd\x1a~\x01 \x22\x10\x89F\x19^\x1f\x10w\xbe\xcf\x14\xffp\xe1\xa2%\x16\xf1'\x06(\x1d\x05\xd1\x0fTU\xcd\xec\xde\xbd\xeb\xd3eK\x97\x7fa\x11\xb7\x00<e\xab\xa8\xaaJn\xda\xfc\xfd\xd2\xd5_\xae\xd9`\x15/\x0d \xcek^\x03*]\x84\xe6\x9a!\xc9H\x8a\x1d]u\x866nX\xbfd\xdb\x96\xad\xdb\x80\x04\xc0\xe3\x02\xb4\x9a\x003\x00\x89'\xb3\x08p\xb9\xb8xi\x80\xbb\xc2\xffG\xfb\x17\x94<v9\x5c\xadN2\x00\x00\x00\x00IEND\xaeB`\x82\x00\x00\x04)\x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\x00\x00 \x00\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x06bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\x00\x09pHYs\x00\x007\x5c\x00\x007\x5c\x01\xcb\xc7\xa4\xb9\x00\x00\x03\x9bIDATx\xda\xedT=o\x1bG\x10}\xbb\xb7w$\xf5A\x9elJEB\x18\xb1\x0a\x1bv\x13\xdb\x85\xaa\x18P$\xa4\x89\x81\x94\xd6_\x08`\xb8r\x93 \x85\x02\x01\xa9\x9c\x1f\xe0>\x85\x81\x94F\x00\xa7\x11\xd88\x85\x1bK6@)\x94-\xc8\x96\x10\x18\x81\x22\xf3C\x22i\xf2\xf6v\xf3B.s\xd1\xb1H\x80\xb83\x07x\x98\xbd\xdd\x99y3\xb3\xb7\x83\xb1\x8ce,\xef\xbd\x88\x1f\x97\x97\xe1\x05\x01\x820D~~\x1e\xdb\xf7\xee\xa1st\x94\x188H\x07/\x05\xe5\xe0\x0f\xb5C\xf0\xb7N\xd6\xb9\xb3g\x11\xde\xba\x85\xf8\xd5+\xc4\xb5\x1a`\x0c\xc4\x83B\x01\x82\xe4\xe1\xcd\x9b\xf2\x8f'Ol.\x9f\xb7\x96\x07\xd1\xc9\x09\xbeX_\xc7\xbb\x90\xd7\xd7\xafCNM\x01RB7\x9bbraA4\xee\xdf7q\xa3\x01\xf1\xc3\xf9\xf3\xf0\xc3P\xac\x90\xfc\xd1\xda\x9a|v\xf7\xee4\xc9\xbdw\xda\x01\x87\xec\xf4t<\xb7\xba\xda\xcc\xdc\xb9c\xebW\xae\x88\xb8^\xb7\xa2|\xfb\xb6\xf8\xe0\xeaU\xdbl4>?\xda\xd8\xf8\xf6\xa0\x5c\xfe\xa8yp \xff=\x01G\x9eN\x22!\x1fM\xa0T2S\x8b\x8b{\xb9\x85\x85U\xe4r?G\x9b\x9bB\xd5\xaaU[\x7f\xfe|)\xbcp\xe1Avv\xd6\x93ah\xdb\x07\x07B\xfc\xe7\x04\x122\xe3`\x07p~\x09\x04c\xa3X\x9c\xd5;;?\x89\xdd\xdd\xcf\xa41ee\x94B\xb3Z\xfd\xca\xfa\xbeWZZ\x8a>^YQ\xf3\xec\x88\xa7\x94#'\xac\x85L\x05\x1c\x09\x9e\xe8S~\x88cxR\x22\xc8\xe7\xa1\xe7\xe6`\x84\x88\xf4\xe3\xc7\xbe\xa8V\xbf6\x17/\x96\xd5\xeb\x87\x0f\x0bZ\xa9K\xad(\xc2\x87\xd7\xaey\xc1\x8b\x17b\xe6\xf8\x18\xca\xf3\xe0Y\x9bT<B\x98\x00B\x0c\xaa\xa6\xbd!bc\xfa\xdad2\x00\x89\xdbL`\xfb\xe9S\xfc\xba\xb5\x85Oo\xdc\xf0J\x95\x0a\xec\xfe\xfe\xa5\xdc\xcb\x97\x05\x15[\xabzQ\xe4\xd7\xf6\xf7qD\x833$\xb7LF1\xa8JZ\x9dJ\xc2\x81$\x18VJX\x12\x81\xa4rr\x12&\x9bE\xa7\xd3\xc1\xef\x9b\x9b\xf8m{\x1b\xf5\xb7o\xd1e\xcc\xe3j\x15zo\x0f\xf4\xf1\xd9\x0d\xa5\x0c\x00\x87~\xe6\xacf\x00\x0a\x83\x9e\xd6\xffl=\xc9$\xbb$\x95\x02\x82\x00\xa0\x8e\xe9\xd7k\xb7\xd1\x22A\x83o\xbd~x\x88\xb6\xf3\xcb\xf2\xacEm\xc8A\xe2~\xd2\xc6\x15\xa7\xb9\x88b\x00o8\x1c\xbat\x0a\xba]L\xb1\x82\xac\xef#\xabT\x9f\xcc#\x04\x1d\xe5\x10\x0c \xb4\x86\xe9\xf5`834\xdft\xaf^G\xf7\xafJ\x1di\x86v\x9a~]\xdaj\x12cb\x02\xbe\xbb\x22IN^\x99V\x87B4&\xac\xad\x00(\xedll\xe8\xe2\xb9s\xbeGbEC\x9fW\xe1\xc71|!\x86W\xd2\xdfW\x0c\xe0q_\x11\xc3\xffD\xce\xcc@\x16\x8b\x10\x1e\xbf\x5c\x85Y\x22\x00P\xa0\xd6\xd4>\x8b9\xb3\xbb\xab\xa3A\xcf*5r\x8b5\xf4\xe5\x93\x0e\xb0~\xc2\x03\x1eZ#%\xfd\x93\xb6\xb3\xda\xd3O\x8f\x04\xbe\xd3\xa7\xde<\xed2\xc0\x08\xb2\x0e\x19\x06\xa5\xbf\xe0\xbaG\xbfe\x00\x8f\xd4174\x17\x0c\xb0\x04\xe0;\x03\x5cf\x8b\xa4M?\xad\xd4\xcfG\x0dV\x9f\x9e\x0f\xce~ v\xf8o\x11zpn\x14\xb0\xc5\xbdo\xda\xc0/\x92\xa6j\xd2\xcd\x8c<7\x9e\x01\x8b4\xca\xd3X%\x09$\xc3%\x91\xd1\xa4b\x87\xd40J\xdbk\xee5\xc9\x09=\xd8\xb2\xea\x8dK\xb6\x05\xc8\x90Z\xd0\x00\xceY\x11\xdf\xe3\xffI%\xf5mI\xdc\x224`\xfaE~\xe9\x0e\xa4\xd3=B\xa7F\xe9\xe8\xe8\x1dE\x90B&\xd1)\x0c\xc4`,c\x19\xcbX\x06\xf2'\xe3\xdf\x9d\x06\x06\xf6\x92\x0e\x00\x00\x00\x22zTXtSoftware\x00\x00x\xda+//\xd7\xcb\xcc\xcb.NN,H\xd5\xcb/J\x07\x006\xd8\x06X\x10S\xca\x5c\x00\x00\x00\x00IEND\xaeB`\x82"
qt_resource_name = "\x00\x08\x06\xc1Y\x87\x00o\x00p\x00e\x00n\x00.\x00p\x00n\x00g\x00\x09\x06\x98\x83'\x00c\x00l\x00o\x00s\x00e\x00.\x00p\x00n\x00g\x00\x08\x04dZG\x00a\x00m\x00p\x00a\x00.\x00p\x00n\x00g\x00\x08\x0b\xb2XG\x00r\x00e\x00d\x00o\x00.\x00p\x00n\x00g\x00\x09\x0c\x98\xbaG\x00p\x00a\x00u\x00s\x00e\x00.\x00p\x00n\x00g\x00\x0e\x05\xd7\x95\xa7\x00c\x00l\x00e\x00a\x00r\x00-\x00l\x00i\x00s\x00t\x00.\x00p\x00n\x00g\x00\x08\x0bcX\x07\x00s\x00t\x00o\x00p\x00.\x00p\x00n\x00g\x00\x07\x07\xa7W\x87\x00a\x00d\x00d\x00.\x00p\x00n\x00g\x00\x0a\x03\xaf\x02G\x00s\x00t\x00a\x00l\x00t\x00a\x00.\x00p\x00n\x00g\x00\x0c\x0blg\xc7\x00t\x00a\x00k\x00a\x00n\x00a\x00m\x00i\x00.\x00p\x00n\x00g\x00\x14\x06\x10\x9a\xe7\x00g\x00o\x00-\x00p\x00r\x00e\x00v\x00i\x00o\x00u\x00s\x00-\x00v\x00i\x00e\x00w\x00.\x00p\x00n\x00g\x00\x0a\x0b\x88O\x87\x00r\x00e\x00p\x00e\x00a\x00t\x00.\x00p\x00n\x00g\x00\x0a\x08\x94\x19\x07\x00s\x00p\x00l\x00a\x00s\x00h\x00.\x00p\x00n\x00g\x00\x0a\x01\xcb&g\x00m\x00a\x00r\x00k\x00e\x00r\x00.\x00p\x00n\x00g\x00\x0a\x0a\xc8\xf7'\x00f\x00i\x00l\x00t\x00e\x00r\x00.\x00p\x00n\x00g\x00\x08\x08\xc8Xg\x00s\x00a\x00v\x00e\x00.\x00p\x00n\x00g\x00\x0c\x0b\xdf!G\x00s\x00e\x00t\x00t\x00i\x00n\x00g\x00s\x00.\x00p\x00n\x00g\x00\x09\x06\xc7\x98g\x00a\x00b\x00o\x00u\x00t\x00.\x00p\x00n\x00g\x00\x0f\x0e:\x09\xc7\x00o\x00n\x00l\x00i\x00n\x00e\x00-\x00h\x00e\x00l\x00p\x00.\x00p\x00n\x00g\x00\x08\x04\xb2X\xc7\x00u\x00n\x00d\x00o\x00.\x00p\x00n\x00g\x00\x07\x08sW\x87\x00a\x00p\x00p\x00.\x00p\x00n\x00g\x00\x10\x06O7\xc7\x00g\x00o\x00-\x00n\x00e\x00x\x00t\x00-\x00v\x00i\x00e\x00w\x00.\x00p\x00n\x00g\x00\x0d\x03\xd6\xc5G\x00t\x00h\x00r\x00e\x00s\x00h\x00o\x00l\x00d\x00.\x00p\x00n\x00g\x00\x0b\x03yNG\x00s\x00a\x00v\x00e\x00-\x00a\x00s\x00.\x00p\x00n\x00g\x00\x08\x0f\x07Z\xc7\x00e\x00x\x00i\x00t\x00.\x00p\x00n\x00g\x00\x08\x02\x8cY\xa7\x00p\x00l\x00a\x00y\x00.\x00p\x00n\x00g\x00\x0f\x04\x7f\x99\xc7\x00o\x00p\x00e\x00n\x00-\x00r\x00e\x00c\x00e\x00n\x00t\x00.\x00p\x00n\x00g\x00\x0a\x06\xcbO\xc7\x00r\x00e\x00m\x00o\x00v\x00e\x00.\x00p\x00n\x00g"
qt_resource_struct = "\x00\x00\x00\x00\x00\x02\x00\x00\x00\x1d\x00\x00\x00\x01\x00\x00\x01X\x00\x00\x00\x00\x00\x01\x00\x00as\x00\x00\x01X\x00\x00\x00\x00\x00\x01\x00\x00c\x84\x00\x00\x02\x9e\x00\x00\x00\x00\x00\x01\x00\x00\xaf\xfc\x00\x00\x02l\x00\x00\x00\x00\x00\x01\x00\x00\xa0\xac\x00\x00\x00\xbe\x00\x00\x00\x00\x00\x01\x00\x00/\x0c\x00\x00\x02L\x00\x00\x00\x00\x00\x01\x00\x00\x9d1\x00\x00\x00.\x00\x00\x00\x00\x00\x01\x00\x00\x0d\x08\x00\x00\x02\xb4\x00\x00\x00\x00\x00\x01\x00\x00\xb4\x99\x00\x00\x01\xfc\x00\x00\x00\x00\x00\x01\x00\x00\x85\xae\x00\x00\x00r\x00\x00\x00\x00\x00\x01\x00\x00\x1c\xe3\x00\x00\x00\xf6\x00\x00\x00\x00\x00\x01\x00\x006\x01\x00\x00\x02&\x00\x00\x00\x00\x00\x01\x00\x00\x98\x8a\x00\x00\x00\x16\x00\x00\x00\x00\x00\x01\x00\x00\x07\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x01\xc0\x00\x00\x00\x00\x00\x01\x00\x00uf\x00\x00\x02\xd8\x00\x00\x00\x00\x00\x01\x00\x00\xba\xc5\x00\x00\x00\xaa\x00\x00\x00\x00\x00\x01\x00\x00)9\x00\x00\x02\x12\x00\x00\x00\x00\x00\x01\x00\x00\x8d\x96\x00\x00\x01>\x00\x00\x00\x00\x00\x01\x00\x00A\x81\x00\x00\x01\x8c\x00\x00\x00\x00\x00\x01\x00\x00h\xfd\x00\x00\x01r\x00\x00\x00\x00\x00\x01\x00\x00e\x95\x00\x00\x00\x94\x00\x00\x00\x00\x00\x01\x00\x00$\xa8\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x01\x00\x002\xc2\x00\x00\x01$\x00\x00\x00\x00\x00\x01\x00\x00:\xcf\x00\x00\x00D\x00\x00\x00\x00\x00\x01\x00\x00\x10\x94\x00\x00\x01\xa2\x00\x00\x00\x00\x00\x01\x00\x00m\xf0\x00\x00\x00Z\x00\x00\x00\x00\x00\x01\x00\x00\x18f\x00\x00\x01\xd8\x00\x00\x00\x00\x00\x01\x00\x00{\xce\x00\x00\x02\x88\x00\x00\x00\x00\x00\x01\x00\x00\xa9\x18"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/generated/qrc_icons.py | qrc_icons.py |
# Resource object code
#
# Created: dom feb 7 18:18:05 2016
# by: The Resource Compiler for PySide (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore
qt_resource_data = "\x00\x00\x09^<!DOCTYPE html>\x0a\x0a<html>\x0a<head>\x0a\x09<meta charset=\x22utf-8\x22 />\x0a\x09<title>Application Info</title>\x0a\x09<style type=\x22text/css\x22>\x0a\x09body {\x0a\x09 text-align:center}\x0a\x09.reference {\x0a\x09 font-style: italic}\x0a\x09</style>\x0a</head>\x0a\x0a<body>\x0a<h1>APASVO v.0.0.6</h1>\x0a\x0a<p>A graphical tool to perform event detection/picking over a seismic trace.</p>\x0a\x0a<p>Three different picking algorithms are available: STA-LTA, AMPA and Takanami</p>\x0a\x0a<p>\x0a STA-LTA algorithm processes seismic signals by using two moving time\x0a windows, a short time average window (STA), which measures the instant\x0a amplitude of the signal and watches for earthquakes, and a long time\x0a average windows, which takes care of the current average of the signal\x0a amplitude.\x0a\x0a <div class=reference>\x0a See:\x0a Trnkoczy, A. (2002). Understanding and parameter setting of STA/LTA trigger\x0a algorithm. IASPEI New Manual of Seismological Observatory Practice, 2, 1-19.\x0a </div>\x0a</p>\x0a\x0a<p>\x0a Adaptive Multi-Band Picking Algorithm (AMPA) method consists on an\x0a adaptive multi-band analysis that includes envelope detection, noise\x0a reduction for each band, and finally a filter stage that enhances the\x0a response to an earthquake arrival. This approach provides accurate\x0a estimation of phase arrivals in seismic signals strongly affected by\x0a background and non-stationary noises.\x0a \x0a <div class=reference>\x0a See:\x0a Álvarez, I., García, L., Mota, S., Cortés, G., Benítez, C.,\x0a & De la Torre, A. (2013).\x0a An Automatic P-Phase Picking Algorithm Based on Adaptive Multiband Processing.\x0a Geoscience and Remote Sensing Letters, IEEE,\x0a Volume: 10, Issue: 6, pp. 1488 - 1492\x0a </div>\x0a</p>\x0a\x0a<p>\x0a Takanami algorithm estimates the arrival time of a seismic signal\x0a by using two autoregressive models: a model that fits the earthquake and\x0a a noise model. Assuming that the characteristics before and after the\x0a arrival of the earthquake are quite different, the arrival time is\x0a estimated by searching the time point where the minimum value of the\x0a Akaike's Information Criterion is reached.\x0a\x0a <div class=reference>\x0a See:\x0a Takanami, T., & Kitagawa, G. (1988).\x0a A new efficient procedure for the estimation of onset times of seismic\x0a waves. Journal of Physics of the Earth, 36(6), 267-290.\x0a </div>\x0a</p>\x0a\x0a\x0a<p>Created by Jose Emilio Romero Lopez.</p>\x0a</body>\x0a\x0a</html>\x00\x00\x89K GNU GENERAL PUBLIC LICENSE\x0a Version 3, 29 June 2007\x0a\x0a Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\x0a Everyone is permitted to copy and distribute verbatim copies\x0a of this license document, but changing it is not allowed.\x0a\x0a Preamble\x0a\x0a The GNU General Public License is a free, copyleft license for\x0asoftware and other kinds of works.\x0a\x0a The licenses for most software and other practical works are designed\x0ato take away your freedom to share and change the works. By contrast,\x0athe GNU General Public License is intended to guarantee your freedom to\x0ashare and change all versions of a program--to make sure it remains free\x0asoftware for all its users. We, the Free Software Foundation, use the\x0aGNU General Public License for most of our software; it applies also to\x0aany other work released this way by its authors. You can apply it to\x0ayour programs, too.\x0a\x0a When we speak of free software, we are referring to freedom, not\x0aprice. Our General Public Licenses are designed to make sure that you\x0ahave the freedom to distribute copies of free software (and charge for\x0athem if you wish), that you receive source code or can get it if you\x0awant it, that you can change the software or use pieces of it in new\x0afree programs, and that you know you can do these things.\x0a\x0a To protect your rights, we need to prevent others from denying you\x0athese rights or asking you to surrender the rights. Therefore, you have\x0acertain responsibilities if you distribute copies of the software, or if\x0ayou modify it: responsibilities to respect the freedom of others.\x0a\x0a For example, if you distribute copies of such a program, whether\x0agratis or for a fee, you must pass on to the recipients the same\x0afreedoms that you received. You must make sure that they, too, receive\x0aor can get the source code. And you must show them these terms so they\x0aknow their rights.\x0a\x0a Developers that use the GNU GPL protect your rights with two steps:\x0a(1) assert copyright on the software, and (2) offer you this License\x0agiving you legal permission to copy, distribute and/or modify it.\x0a\x0a For the developers' and authors' protection, the GPL clearly explains\x0athat there is no warranty for this free software. For both users' and\x0aauthors' sake, the GPL requires that modified versions be marked as\x0achanged, so that their problems will not be attributed erroneously to\x0aauthors of previous versions.\x0a\x0a Some devices are designed to deny users access to install or run\x0amodified versions of the software inside them, although the manufacturer\x0acan do so. This is fundamentally incompatible with the aim of\x0aprotecting users' freedom to change the software. The systematic\x0apattern of such abuse occurs in the area of products for individuals to\x0ause, which is precisely where it is most unacceptable. Therefore, we\x0ahave designed this version of the GPL to prohibit the practice for those\x0aproducts. If such problems arise substantially in other domains, we\x0astand ready to extend this provision to those domains in future versions\x0aof the GPL, as needed to protect the freedom of users.\x0a\x0a Finally, every program is threatened constantly by software patents.\x0aStates should not allow patents to restrict development and use of\x0asoftware on general-purpose computers, but in those that do, we wish to\x0aavoid the special danger that patents applied to a free program could\x0amake it effectively proprietary. To prevent this, the GPL assures that\x0apatents cannot be used to render the program non-free.\x0a\x0a The precise terms and conditions for copying, distribution and\x0amodification follow.\x0a\x0a TERMS AND CONDITIONS\x0a\x0a 0. Definitions.\x0a\x0a \x22This License\x22 refers to version 3 of the GNU General Public License.\x0a\x0a \x22Copyright\x22 also means copyright-like laws that apply to other kinds of\x0aworks, such as semiconductor masks.\x0a\x0a \x22The Program\x22 refers to any copyrightable work licensed under this\x0aLicense. Each licensee is addressed as \x22you\x22. \x22Licensees\x22 and\x0a\x22recipients\x22 may be individuals or organizations.\x0a\x0a To \x22modify\x22 a work means to copy from or adapt all or part of the work\x0ain a fashion requiring copyright permission, other than the making of an\x0aexact copy. The resulting work is called a \x22modified version\x22 of the\x0aearlier work or a work \x22based on\x22 the earlier work.\x0a\x0a A \x22covered work\x22 means either the unmodified Program or a work based\x0aon the Program.\x0a\x0a To \x22propagate\x22 a work means to do anything with it that, without\x0apermission, would make you directly or secondarily liable for\x0ainfringement under applicable copyright law, except executing it on a\x0acomputer or modifying a private copy. Propagation includes copying,\x0adistribution (with or without modification), making available to the\x0apublic, and in some countries other activities as well.\x0a\x0a To \x22convey\x22 a work means any kind of propagation that enables other\x0aparties to make or receive copies. Mere interaction with a user through\x0aa computer network, with no transfer of a copy, is not conveying.\x0a\x0a An interactive user interface displays \x22Appropriate Legal Notices\x22\x0ato the extent that it includes a convenient and prominently visible\x0afeature that (1) displays an appropriate copyright notice, and (2)\x0atells the user that there is no warranty for the work (except to the\x0aextent that warranties are provided), that licensees may convey the\x0awork under this License, and how to view a copy of this License. If\x0athe interface presents a list of user commands or options, such as a\x0amenu, a prominent item in the list meets this criterion.\x0a\x0a 1. Source Code.\x0a\x0a The \x22source code\x22 for a work means the preferred form of the work\x0afor making modifications to it. \x22Object code\x22 means any non-source\x0aform of a work.\x0a\x0a A \x22Standard Interface\x22 means an interface that either is an official\x0astandard defined by a recognized standards body, or, in the case of\x0ainterfaces specified for a particular programming language, one that\x0ais widely used among developers working in that language.\x0a\x0a The \x22System Libraries\x22 of an executable work include anything, other\x0athan the work as a whole, that (a) is included in the normal form of\x0apackaging a Major Component, but which is not part of that Major\x0aComponent, and (b) serves only to enable use of the work with that\x0aMajor Component, or to implement a Standard Interface for which an\x0aimplementation is available to the public in source code form. A\x0a\x22Major Component\x22, in this context, means a major essential component\x0a(kernel, window system, and so on) of the specific operating system\x0a(if any) on which the executable work runs, or a compiler used to\x0aproduce the work, or an object code interpreter used to run it.\x0a\x0a The \x22Corresponding Source\x22 for a work in object code form means all\x0athe source code needed to generate, install, and (for an executable\x0awork) run the object code and to modify the work, including scripts to\x0acontrol those activities. However, it does not include the work's\x0aSystem Libraries, or general-purpose tools or generally available free\x0aprograms which are used unmodified in performing those activities but\x0awhich are not part of the work. For example, Corresponding Source\x0aincludes interface definition files associated with source files for\x0athe work, and the source code for shared libraries and dynamically\x0alinked subprograms that the work is specifically designed to require,\x0asuch as by intimate data communication or control flow between those\x0asubprograms and other parts of the work.\x0a\x0a The Corresponding Source need not include anything that users\x0acan regenerate automatically from other parts of the Corresponding\x0aSource.\x0a\x0a The Corresponding Source for a work in source code form is that\x0asame work.\x0a\x0a 2. Basic Permissions.\x0a\x0a All rights granted under this License are granted for the term of\x0acopyright on the Program, and are irrevocable provided the stated\x0aconditions are met. This License explicitly affirms your unlimited\x0apermission to run the unmodified Program. The output from running a\x0acovered work is covered by this License only if the output, given its\x0acontent, constitutes a covered work. This License acknowledges your\x0arights of fair use or other equivalent, as provided by copyright law.\x0a\x0a You may make, run and propagate covered works that you do not\x0aconvey, without conditions so long as your license otherwise remains\x0ain force. You may convey covered works to others for the sole purpose\x0aof having them make modifications exclusively for you, or provide you\x0awith facilities for running those works, provided that you comply with\x0athe terms of this License in conveying all material for which you do\x0anot control copyright. Those thus making or running the covered works\x0afor you must do so exclusively on your behalf, under your direction\x0aand control, on terms that prohibit them from making any copies of\x0ayour copyrighted material outside their relationship with you.\x0a\x0a Conveying under any other circumstances is permitted solely under\x0athe conditions stated below. Sublicensing is not allowed; section 10\x0amakes it unnecessary.\x0a\x0a 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\x0a\x0a No covered work shall be deemed part of an effective technological\x0ameasure under any applicable law fulfilling obligations under article\x0a11 of the WIPO copyright treaty adopted on 20 December 1996, or\x0asimilar laws prohibiting or restricting circumvention of such\x0ameasures.\x0a\x0a When you convey a covered work, you waive any legal power to forbid\x0acircumvention of technological measures to the extent such circumvention\x0ais effected by exercising rights under this License with respect to\x0athe covered work, and you disclaim any intention to limit operation or\x0amodification of the work as a means of enforcing, against the work's\x0ausers, your or third parties' legal rights to forbid circumvention of\x0atechnological measures.\x0a\x0a 4. Conveying Verbatim Copies.\x0a\x0a You may convey verbatim copies of the Program's source code as you\x0areceive it, in any medium, provided that you conspicuously and\x0aappropriately publish on each copy an appropriate copyright notice;\x0akeep intact all notices stating that this License and any\x0anon-permissive terms added in accord with section 7 apply to the code;\x0akeep intact all notices of the absence of any warranty; and give all\x0arecipients a copy of this License along with the Program.\x0a\x0a You may charge any price or no price for each copy that you convey,\x0aand you may offer support or warranty protection for a fee.\x0a\x0a 5. Conveying Modified Source Versions.\x0a\x0a You may convey a work based on the Program, or the modifications to\x0aproduce it from the Program, in the form of source code under the\x0aterms of section 4, provided that you also meet all of these conditions:\x0a\x0a a) The work must carry prominent notices stating that you modified\x0a it, and giving a relevant date.\x0a\x0a b) The work must carry prominent notices stating that it is\x0a released under this License and any conditions added under section\x0a 7. This requirement modifies the requirement in section 4 to\x0a \x22keep intact all notices\x22.\x0a\x0a c) You must license the entire work, as a whole, under this\x0a License to anyone who comes into possession of a copy. This\x0a License will therefore apply, along with any applicable section 7\x0a additional terms, to the whole of the work, and all its parts,\x0a regardless of how they are packaged. This License gives no\x0a permission to license the work in any other way, but it does not\x0a invalidate such permission if you have separately received it.\x0a\x0a d) If the work has interactive user interfaces, each must display\x0a Appropriate Legal Notices; however, if the Program has interactive\x0a interfaces that do not display Appropriate Legal Notices, your\x0a work need not make them do so.\x0a\x0a A compilation of a covered work with other separate and independent\x0aworks, which are not by their nature extensions of the covered work,\x0aand which are not combined with it such as to form a larger program,\x0ain or on a volume of a storage or distribution medium, is called an\x0a\x22aggregate\x22 if the compilation and its resulting copyright are not\x0aused to limit the access or legal rights of the compilation's users\x0abeyond what the individual works permit. Inclusion of a covered work\x0ain an aggregate does not cause this License to apply to the other\x0aparts of the aggregate.\x0a\x0a 6. Conveying Non-Source Forms.\x0a\x0a You may convey a covered work in object code form under the terms\x0aof sections 4 and 5, provided that you also convey the\x0amachine-readable Corresponding Source under the terms of this License,\x0ain one of these ways:\x0a\x0a a) Convey the object code in, or embodied in, a physical product\x0a (including a physical distribution medium), accompanied by the\x0a Corresponding Source fixed on a durable physical medium\x0a customarily used for software interchange.\x0a\x0a b) Convey the object code in, or embodied in, a physical product\x0a (including a physical distribution medium), accompanied by a\x0a written offer, valid for at least three years and valid for as\x0a long as you offer spare parts or customer support for that product\x0a model, to give anyone who possesses the object code either (1) a\x0a copy of the Corresponding Source for all the software in the\x0a product that is covered by this License, on a durable physical\x0a medium customarily used for software interchange, for a price no\x0a more than your reasonable cost of physically performing this\x0a conveying of source, or (2) access to copy the\x0a Corresponding Source from a network server at no charge.\x0a\x0a c) Convey individual copies of the object code with a copy of the\x0a written offer to provide the Corresponding Source. This\x0a alternative is allowed only occasionally and noncommercially, and\x0a only if you received the object code with such an offer, in accord\x0a with subsection 6b.\x0a\x0a d) Convey the object code by offering access from a designated\x0a place (gratis or for a charge), and offer equivalent access to the\x0a Corresponding Source in the same way through the same place at no\x0a further charge. You need not require recipients to copy the\x0a Corresponding Source along with the object code. If the place to\x0a copy the object code is a network server, the Corresponding Source\x0a may be on a different server (operated by you or a third party)\x0a that supports equivalent copying facilities, provided you maintain\x0a clear directions next to the object code saying where to find the\x0a Corresponding Source. Regardless of what server hosts the\x0a Corresponding Source, you remain obligated to ensure that it is\x0a available for as long as needed to satisfy these requirements.\x0a\x0a e) Convey the object code using peer-to-peer transmission, provided\x0a you inform other peers where the object code and Corresponding\x0a Source of the work are being offered to the general public at no\x0a charge under subsection 6d.\x0a\x0a A separable portion of the object code, whose source code is excluded\x0afrom the Corresponding Source as a System Library, need not be\x0aincluded in conveying the object code work.\x0a\x0a A \x22User Product\x22 is either (1) a \x22consumer product\x22, which means any\x0atangible personal property which is normally used for personal, family,\x0aor household purposes, or (2) anything designed or sold for incorporation\x0ainto a dwelling. In determining whether a product is a consumer product,\x0adoubtful cases shall be resolved in favor of coverage. For a particular\x0aproduct received by a particular user, \x22normally used\x22 refers to a\x0atypical or common use of that class of product, regardless of the status\x0aof the particular user or of the way in which the particular user\x0aactually uses, or expects or is expected to use, the product. A product\x0ais a consumer product regardless of whether the product has substantial\x0acommercial, industrial or non-consumer uses, unless such uses represent\x0athe only significant mode of use of the product.\x0a\x0a \x22Installation Information\x22 for a User Product means any methods,\x0aprocedures, authorization keys, or other information required to install\x0aand execute modified versions of a covered work in that User Product from\x0aa modified version of its Corresponding Source. The information must\x0asuffice to ensure that the continued functioning of the modified object\x0acode is in no case prevented or interfered with solely because\x0amodification has been made.\x0a\x0a If you convey an object code work under this section in, or with, or\x0aspecifically for use in, a User Product, and the conveying occurs as\x0apart of a transaction in which the right of possession and use of the\x0aUser Product is transferred to the recipient in perpetuity or for a\x0afixed term (regardless of how the transaction is characterized), the\x0aCorresponding Source conveyed under this section must be accompanied\x0aby the Installation Information. But this requirement does not apply\x0aif neither you nor any third party retains the ability to install\x0amodified object code on the User Product (for example, the work has\x0abeen installed in ROM).\x0a\x0a The requirement to provide Installation Information does not include a\x0arequirement to continue to provide support service, warranty, or updates\x0afor a work that has been modified or installed by the recipient, or for\x0athe User Product in which it has been modified or installed. Access to a\x0anetwork may be denied when the modification itself materially and\x0aadversely affects the operation of the network or violates the rules and\x0aprotocols for communication across the network.\x0a\x0a Corresponding Source conveyed, and Installation Information provided,\x0ain accord with this section must be in a format that is publicly\x0adocumented (and with an implementation available to the public in\x0asource code form), and must require no special password or key for\x0aunpacking, reading or copying.\x0a\x0a 7. Additional Terms.\x0a\x0a \x22Additional permissions\x22 are terms that supplement the terms of this\x0aLicense by making exceptions from one or more of its conditions.\x0aAdditional permissions that are applicable to the entire Program shall\x0abe treated as though they were included in this License, to the extent\x0athat they are valid under applicable law. If additional permissions\x0aapply only to part of the Program, that part may be used separately\x0aunder those permissions, but the entire Program remains governed by\x0athis License without regard to the additional permissions.\x0a\x0a When you convey a copy of a covered work, you may at your option\x0aremove any additional permissions from that copy, or from any part of\x0ait. (Additional permissions may be written to require their own\x0aremoval in certain cases when you modify the work.) You may place\x0aadditional permissions on material, added by you to a covered work,\x0afor which you have or can give appropriate copyright permission.\x0a\x0a Notwithstanding any other provision of this License, for material you\x0aadd to a covered work, you may (if authorized by the copyright holders of\x0athat material) supplement the terms of this License with terms:\x0a\x0a a) Disclaiming warranty or limiting liability differently from the\x0a terms of sections 15 and 16 of this License; or\x0a\x0a b) Requiring preservation of specified reasonable legal notices or\x0a author attributions in that material or in the Appropriate Legal\x0a Notices displayed by works containing it; or\x0a\x0a c) Prohibiting misrepresentation of the origin of that material, or\x0a requiring that modified versions of such material be marked in\x0a reasonable ways as different from the original version; or\x0a\x0a d) Limiting the use for publicity purposes of names of licensors or\x0a authors of the material; or\x0a\x0a e) Declining to grant rights under trademark law for use of some\x0a trade names, trademarks, or service marks; or\x0a\x0a f) Requiring indemnification of licensors and authors of that\x0a material by anyone who conveys the material (or modified versions of\x0a it) with contractual assumptions of liability to the recipient, for\x0a any liability that these contractual assumptions directly impose on\x0a those licensors and authors.\x0a\x0a All other non-permissive additional terms are considered \x22further\x0arestrictions\x22 within the meaning of section 10. If the Program as you\x0areceived it, or any part of it, contains a notice stating that it is\x0agoverned by this License along with a term that is a further\x0arestriction, you may remove that term. If a license document contains\x0aa further restriction but permits relicensing or conveying under this\x0aLicense, you may add to a covered work material governed by the terms\x0aof that license document, provided that the further restriction does\x0anot survive such relicensing or conveying.\x0a\x0a If you add terms to a covered work in accord with this section, you\x0amust place, in the relevant source files, a statement of the\x0aadditional terms that apply to those files, or a notice indicating\x0awhere to find the applicable terms.\x0a\x0a Additional terms, permissive or non-permissive, may be stated in the\x0aform of a separately written license, or stated as exceptions;\x0athe above requirements apply either way.\x0a\x0a 8. Termination.\x0a\x0a You may not propagate or modify a covered work except as expressly\x0aprovided under this License. Any attempt otherwise to propagate or\x0amodify it is void, and will automatically terminate your rights under\x0athis License (including any patent licenses granted under the third\x0aparagraph of section 11).\x0a\x0a However, if you cease all violation of this License, then your\x0alicense from a particular copyright holder is reinstated (a)\x0aprovisionally, unless and until the copyright holder explicitly and\x0afinally terminates your license, and (b) permanently, if the copyright\x0aholder fails to notify you of the violation by some reasonable means\x0aprior to 60 days after the cessation.\x0a\x0a Moreover, your license from a particular copyright holder is\x0areinstated permanently if the copyright holder notifies you of the\x0aviolation by some reasonable means, this is the first time you have\x0areceived notice of violation of this License (for any work) from that\x0acopyright holder, and you cure the violation prior to 30 days after\x0ayour receipt of the notice.\x0a\x0a Termination of your rights under this section does not terminate the\x0alicenses of parties who have received copies or rights from you under\x0athis License. If your rights have been terminated and not permanently\x0areinstated, you do not qualify to receive new licenses for the same\x0amaterial under section 10.\x0a\x0a 9. Acceptance Not Required for Having Copies.\x0a\x0a You are not required to accept this License in order to receive or\x0arun a copy of the Program. Ancillary propagation of a covered work\x0aoccurring solely as a consequence of using peer-to-peer transmission\x0ato receive a copy likewise does not require acceptance. However,\x0anothing other than this License grants you permission to propagate or\x0amodify any covered work. These actions infringe copyright if you do\x0anot accept this License. Therefore, by modifying or propagating a\x0acovered work, you indicate your acceptance of this License to do so.\x0a\x0a 10. Automatic Licensing of Downstream Recipients.\x0a\x0a Each time you convey a covered work, the recipient automatically\x0areceives a license from the original licensors, to run, modify and\x0apropagate that work, subject to this License. You are not responsible\x0afor enforcing compliance by third parties with this License.\x0a\x0a An \x22entity transaction\x22 is a transaction transferring control of an\x0aorganization, or substantially all assets of one, or subdividing an\x0aorganization, or merging organizations. If propagation of a covered\x0awork results from an entity transaction, each party to that\x0atransaction who receives a copy of the work also receives whatever\x0alicenses to the work the party's predecessor in interest had or could\x0agive under the previous paragraph, plus a right to possession of the\x0aCorresponding Source of the work from the predecessor in interest, if\x0athe predecessor has it or can get it with reasonable efforts.\x0a\x0a You may not impose any further restrictions on the exercise of the\x0arights granted or affirmed under this License. For example, you may\x0anot impose a license fee, royalty, or other charge for exercise of\x0arights granted under this License, and you may not initiate litigation\x0a(including a cross-claim or counterclaim in a lawsuit) alleging that\x0aany patent claim is infringed by making, using, selling, offering for\x0asale, or importing the Program or any portion of it.\x0a\x0a 11. Patents.\x0a\x0a A \x22contributor\x22 is a copyright holder who authorizes use under this\x0aLicense of the Program or a work on which the Program is based. The\x0awork thus licensed is called the contributor's \x22contributor version\x22.\x0a\x0a A contributor's \x22essential patent claims\x22 are all patent claims\x0aowned or controlled by the contributor, whether already acquired or\x0ahereafter acquired, that would be infringed by some manner, permitted\x0aby this License, of making, using, or selling its contributor version,\x0abut do not include claims that would be infringed only as a\x0aconsequence of further modification of the contributor version. For\x0apurposes of this definition, \x22control\x22 includes the right to grant\x0apatent sublicenses in a manner consistent with the requirements of\x0athis License.\x0a\x0a Each contributor grants you a non-exclusive, worldwide, royalty-free\x0apatent license under the contributor's essential patent claims, to\x0amake, use, sell, offer for sale, import and otherwise run, modify and\x0apropagate the contents of its contributor version.\x0a\x0a In the following three paragraphs, a \x22patent license\x22 is any express\x0aagreement or commitment, however denominated, not to enforce a patent\x0a(such as an express permission to practice a patent or covenant not to\x0asue for patent infringement). To \x22grant\x22 such a patent license to a\x0aparty means to make such an agreement or commitment not to enforce a\x0apatent against the party.\x0a\x0a If you convey a covered work, knowingly relying on a patent license,\x0aand the Corresponding Source of the work is not available for anyone\x0ato copy, free of charge and under the terms of this License, through a\x0apublicly available network server or other readily accessible means,\x0athen you must either (1) cause the Corresponding Source to be so\x0aavailable, or (2) arrange to deprive yourself of the benefit of the\x0apatent license for this particular work, or (3) arrange, in a manner\x0aconsistent with the requirements of this License, to extend the patent\x0alicense to downstream recipients. \x22Knowingly relying\x22 means you have\x0aactual knowledge that, but for the patent license, your conveying the\x0acovered work in a country, or your recipient's use of the covered work\x0ain a country, would infringe one or more identifiable patents in that\x0acountry that you have reason to believe are valid.\x0a\x0a If, pursuant to or in connection with a single transaction or\x0aarrangement, you convey, or propagate by procuring conveyance of, a\x0acovered work, and grant a patent license to some of the parties\x0areceiving the covered work authorizing them to use, propagate, modify\x0aor convey a specific copy of the covered work, then the patent license\x0ayou grant is automatically extended to all recipients of the covered\x0awork and works based on it.\x0a\x0a A patent license is \x22discriminatory\x22 if it does not include within\x0athe scope of its coverage, prohibits the exercise of, or is\x0aconditioned on the non-exercise of one or more of the rights that are\x0aspecifically granted under this License. You may not convey a covered\x0awork if you are a party to an arrangement with a third party that is\x0ain the business of distributing software, under which you make payment\x0ato the third party based on the extent of your activity of conveying\x0athe work, and under which the third party grants, to any of the\x0aparties who would receive the covered work from you, a discriminatory\x0apatent license (a) in connection with copies of the covered work\x0aconveyed by you (or copies made from those copies), or (b) primarily\x0afor and in connection with specific products or compilations that\x0acontain the covered work, unless you entered into that arrangement,\x0aor that patent license was granted, prior to 28 March 2007.\x0a\x0a Nothing in this License shall be construed as excluding or limiting\x0aany implied license or other defenses to infringement that may\x0aotherwise be available to you under applicable patent law.\x0a\x0a 12. No Surrender of Others' Freedom.\x0a\x0a If conditions are imposed on you (whether by court order, agreement or\x0aotherwise) that contradict the conditions of this License, they do not\x0aexcuse you from the conditions of this License. If you cannot convey a\x0acovered work so as to satisfy simultaneously your obligations under this\x0aLicense and any other pertinent obligations, then as a consequence you may\x0anot convey it at all. For example, if you agree to terms that obligate you\x0ato collect a royalty for further conveying from those to whom you convey\x0athe Program, the only way you could satisfy both those terms and this\x0aLicense would be to refrain entirely from conveying the Program.\x0a\x0a 13. Use with the GNU Affero General Public License.\x0a\x0a Notwithstanding any other provision of this License, you have\x0apermission to link or combine any covered work with a work licensed\x0aunder version 3 of the GNU Affero General Public License into a single\x0acombined work, and to convey the resulting work. The terms of this\x0aLicense will continue to apply to the part which is the covered work,\x0abut the special requirements of the GNU Affero General Public License,\x0asection 13, concerning interaction through a network will apply to the\x0acombination as such.\x0a\x0a 14. Revised Versions of this License.\x0a\x0a The Free Software Foundation may publish revised and/or new versions of\x0athe GNU General Public License from time to time. Such new versions will\x0abe similar in spirit to the present version, but may differ in detail to\x0aaddress new problems or concerns.\x0a\x0a Each version is given a distinguishing version number. If the\x0aProgram specifies that a certain numbered version of the GNU General\x0aPublic License \x22or any later version\x22 applies to it, you have the\x0aoption of following the terms and conditions either of that numbered\x0aversion or of any later version published by the Free Software\x0aFoundation. If the Program does not specify a version number of the\x0aGNU General Public License, you may choose any version ever published\x0aby the Free Software Foundation.\x0a\x0a If the Program specifies that a proxy can decide which future\x0aversions of the GNU General Public License can be used, that proxy's\x0apublic statement of acceptance of a version permanently authorizes you\x0ato choose that version for the Program.\x0a\x0a Later license versions may give you additional or different\x0apermissions. However, no additional obligations are imposed on any\x0aauthor or copyright holder as a result of your choosing to follow a\x0alater version.\x0a\x0a 15. Disclaimer of Warranty.\x0a\x0a THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\x0aAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\x0aHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \x22AS IS\x22 WITHOUT WARRANTY\x0aOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\x0aTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\x0aPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\x0aIS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\x0aALL NECESSARY SERVICING, REPAIR OR CORRECTION.\x0a\x0a 16. Limitation of Liability.\x0a\x0a IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\x0aWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\x0aTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\x0aGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\x0aUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\x0aDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\x0aPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\x0aEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\x0aSUCH DAMAGES.\x0a\x0a 17. Interpretation of Sections 15 and 16.\x0a\x0a If the disclaimer of warranty and limitation of liability provided\x0aabove cannot be given local legal effect according to their terms,\x0areviewing courts shall apply local law that most closely approximates\x0aan absolute waiver of all civil liability in connection with the\x0aProgram, unless a warranty or assumption of liability accompanies a\x0acopy of the Program in return for a fee.\x0a\x0a END OF TERMS AND CONDITIONS\x0a\x0a How to Apply These Terms to Your New Programs\x0a\x0a If you develop a new program, and you want it to be of the greatest\x0apossible use to the public, the best way to achieve this is to make it\x0afree software which everyone can redistribute and change under these terms.\x0a\x0a To do so, attach the following notices to the program. It is safest\x0ato attach them to the start of each source file to most effectively\x0astate the exclusion of warranty; and each file should have at least\x0athe \x22copyright\x22 line and a pointer to where the full notice is found.\x0a\x0a <one line to give the program's name and a brief idea of what it does.>\x0a Copyright (C) <year> <name of author>\x0a\x0a This program is free software: you can redistribute it and/or modify\x0a it under the terms of the GNU General Public License as published by\x0a the Free Software Foundation, either version 3 of the License, or\x0a (at your option) any later version.\x0a\x0a This program is distributed in the hope that it will be useful,\x0a but WITHOUT ANY WARRANTY; without even the implied warranty of\x0a MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\x0a GNU General Public License for more details.\x0a\x0a You should have received a copy of the GNU General Public License\x0a along with this program. If not, see <http://www.gnu.org/licenses/>.\x0a\x0aAlso add information on how to contact you by electronic and paper mail.\x0a\x0a If the program does terminal interaction, make it output a short\x0anotice like this when it starts in an interactive mode:\x0a\x0a <program> Copyright (C) <year> <name of author>\x0a This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\x0a This is free software, and you are welcome to redistribute it\x0a under certain conditions; type `show c' for details.\x0a\x0aThe hypothetical commands `show w' and `show c' should show the appropriate\x0aparts of the General Public License. Of course, your program's commands\x0amight be different; for a GUI interface, you would use an \x22about box\x22.\x0a\x0a You should also get your employer (if you work as a programmer) or school,\x0aif any, to sign a \x22copyright disclaimer\x22 for the program, if necessary.\x0aFor more information on this, and how to apply and follow the GNU GPL, see\x0a<http://www.gnu.org/licenses/>.\x0a\x0a The GNU General Public License does not permit incorporating your program\x0ainto proprietary programs. If your program is a subroutine library, you\x0amay consider it more useful to permit linking proprietary applications with\x0athe library. If this is what you want to do, use the GNU Lesser General\x0aPublic License instead of this License. But first, please read\x0a<http://www.gnu.org/philosophy/why-not-lgpl.html>.\x0a"
qt_resource_name = "\x00\x0c\x0d\x8d\xcf<\x00v\x00e\x00r\x00s\x00i\x00o\x00n\x00.\x00h\x00t\x00m\x00l\x00\x0b\x05u\xa8t\x00l\x00i\x00c\x00e\x00n\x00s\x00e\x00.\x00t\x00x\x00t"
qt_resource_struct = "\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x09b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/gui/views/generated/qrc_strings.py | qrc_strings.py |
import numpy as np
from apasvo.picking import findpeaks
from numpy.lib import stride_tricks
from scipy import signal
def sta_lta(x, fs, threshold=None, sta_length=5., lta_length=100.,
peak_window=1., method='convolution'):
"""Event picking/detection using STA-LTA algorithm.
The STA-LTA algorithm processes seismic signals by using two moving time
windows, a short time average window (STA), which measures the instant
amplitude of the signal and watches for earthquakes, and a long time
average windows, which takes care of the current average of the signal
amplitude.
See:
Trnkoczy, A. (2002). Understanding and parameter setting of STA/LTA trigger
algorithm. IASPEI New Manual of Seismological Observatory Practice, 2, 1-19.
Args:
x: Seismic data, numpy array type.
fs: Sampling rate in Hz.
threshold: Local maxima found in the characteristic function over
this value will be returned by the function as possible events
(detection mode).
If threshold is None, the function will return only the global
maximum (picking mode).
Default value is None.
sta_length: Length of STA window, in seconds.
Default: 5.0 seconds.
lta_length: Length of LTA window, in seconds:
Default: 100.0 seconds.
peak_window: How many seconds on each side of a point of the
characteristic function to use for the comparison to consider the
point to be a local maximum.
If 'threshold' is None, this parameter has no effect.
Default value is 1 s.
method: 'strides', 'convolution' or 'iterative'.
Warning: 'strides' method may throw an 'array too big' ValueError
exception on 32 bit builds if x is large enough.
Default: 'convolution'
Returns:
event_t: A list of possible event locations, given in samples from the
start of the signal, that correspond to the local maxima of the
characteristic function. If threshold is None, the list contains
only the global maximum of the function.
cf: Characteristic function, numpy array type.
"""
# Check arguments
if fs <= 0:
raise ValueError("fs must be a positive value")
if sta_length <= 0:
raise ValueError("sta_length must be a positive value")
if lta_length <= 0:
raise ValueError("lta_length must be a positive value")
if sta_length >= lta_length:
raise ValueError("lta_length must be greater than sta_length")
if method not in ('convolution', 'strides', 'iterative'):
raise ValueError("method not supported")
fs = float(fs)
sta = min(len(x), sta_length * fs + 1)
lta = min(len(x), lta_length * fs + 1)
peak_window = int(peak_window * fs / 2.)
x_norm = np.abs(x - np.mean(x))
cf = np.zeros(len(x))
if len(cf) > 0:
if method == 'strides':
sta_win = stride_tricks.as_strided(np.concatenate((x_norm, np.zeros(sta))),
shape=(len(x), sta),
strides=(1 * x_norm.dtype.itemsize, 1 * x_norm.dtype.itemsize))
lta_win = stride_tricks.as_strided(np.concatenate((x_norm, np.zeros(lta))),
shape=(len(x), lta),
strides=(1 * x_norm.dtype.itemsize, 1 * x_norm.dtype.itemsize))
sta_win_len = np.concatenate((np.ones(len(x) - sta) * sta,
np.arange(sta, 0, -1)))
lta_win_len = np.concatenate((np.ones(len(x) - lta) * lta,
np.arange(lta, 0, -1)))
cf = (sta_win.sum(axis=1) / sta_win_len) / (lta_win.sum(axis=1) / lta_win_len)
elif method == 'convolution':
sta_win = signal.fftconvolve(np.ones(sta), x_norm)[sta - 1:]
lta_win = signal.fftconvolve(np.ones(lta), x_norm)[lta - 1:]
sta_win_len = np.concatenate((np.ones(len(x) - sta) * sta,
np.arange(sta, 0, -1)))
lta_win_len = np.concatenate((np.ones(len(x) - lta) * lta,
np.arange(lta, 0, -1)))
cf = (sta_win / sta_win_len) / (lta_win / lta_win_len)
elif method == 'iterative':
for i in xrange(len(x)):
cf[i] = np.mean(x_norm[i:i + sta]) / np.mean(x_norm[i:i + lta])
event_t = findpeaks.find_peaks(cf, threshold, order=peak_window * fs)
return event_t, cf
class StaLta(object):
"""A class to configure an instance of the STA-LTA algorithm and
apply it over a given seismic signal.
Attributes:
sta_length: Length of STA window, in seconds.
Default: 5.0 seconds.
lta_length: length of LTA window, in seconds.
Default 100.0 seconds.
"""
def __init__(self, sta_length=5.0, lta_length=100.0, **kwargs):
super(StaLta, self).__init__()
self.sta_length = sta_length
self.lta_length = lta_length
def run(self, x, fs, threshold=None, peak_window=1.0):
"""Executes STA-LTA algorithm over a given array of data
Args:
x: Seismic data, numpy array type.
fs: Sample rate in Hz.
threshold: Local maxima found in the characteristic function over
this value will be returned by the function as possible events
(detection mode).
If threshold is None, the function will return only the global
maximum (picking mode).
Default value is None.
peak_window: How many seconds on each side of a point of the
characteristic function to use for the comparison to consider
the point to be a local maximum.
If 'threshold' is None, this parameter has no effect.
Default value is 1 s.
Returns:
et: A list of possible event locations, given in samples from the
start of the signal, that correspond to the local maxima of the
characteristic function. If threshold is None, the list contains
only the global maximum of the function.
cf: Characteristic function, numpy array type.
"""
et, cf = sta_lta(x, fs, threshold=threshold,
sta_length=self.sta_length,
lta_length=self.lta_length,
peak_window=peak_window)
return et, cf
@property
def name(self):
return self.__class__.__name__.upper() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/picking/stalta.py | stalta.py |
import numpy as np
from scipy import signal
from apasvo.utils.formats import rawfile
def gutenberg_richter(b=1.0, size=1, m_min=2.0, m_max=None):
"""Generates a random sequence of earthquake magnitudes
according to Gutenberg-Richter law.
See:
BAKER, Jack W. An introduction to probabilistic seismic hazard analysis (PSHA).
White paper, version, 2008, vol. 1.
Args:
b: A parameter that measures the relative ratio between small and large
magnitudes in a region. Default value is 1.0.
size: Size of the generated sequence. Default value is 1.
m_min: Minimum magnitude considered. Default: 2.0.
m_max: Upper bound of earthquake magnitudes.
Default value is None, which means no upper limit is considered.
Returns:
out: A list of random earthquake magnitudes.
"""
if m_max:
bound_term = 1.0 - 10 ** (-b * (m_max - m_min))
else:
bound_term = 1.0
return m_min - np.log10(-np.random.rand(size) * bound_term + 1.0) / b
def generate_artificial_earthquake(tmax, t0, fs, P_signal_db, P_noise_db,
bfirls=None, low_period=50., high_period=10.,
bandwidth=4., overlap=1., f_low=2.,
f_high=18., low_amp=.2, high_amp=.1):
"""Generates a synthetic earthquake signal with background noise.
An artificial earthquake is generated at the desired start point from
white noise band-filtered and modulated by using different envelope
functions for each band.
Similarly, background noise is modeled from white noise and finally
added to the previously generated sequence that contains the synthetic
earthquake.
Args:
tmax: Length of the generated signal in seconds.
t0: Start time of the earthquake in seconds from the beginning
of the signal.
fs: Sample rate in Hz.
P_signal_db: Earthquake power in dB.
P_noise_db: Background noise power in dB.
bfirls: A list of coefficients of a FIR filter that models the
background noise. See:
Peterson, J. (1993). Observations and modeling of seismic
background noise.
Default value is None, which means unfiltered white noise is used
to model the background noise.
low_period: Start value of the range of noise envelope lengths
for the different bands at the multi-band synthesis.
Default: 50 seconds.
high_period: End value of the range of noise envelope lengths
for the different bands at the multi-band synthesis.
Default: 10 seconds.
bandwidth: Bandwidth of each band at the multi-band synthesis.
Default: 4 Hz.
overlap: Overlap between bands at the multi-band synthesis.
Default: 1 Hz.
f_low: Start frequency at the multi-band synthesis.
Default: 2 Hz.
f_high: End frequency at the multi-band synthesis.
Default: 18 Hz.
low_amp: Start value of the range of noise envelope amplitudes
for the different bands at the multi-band synthesis.
Default: 0.2.
high_amp: End value of the range of noise envelope amplitudes
for the different bands at the multi-band synthesis.
Default: 0.1.
Returns:
out: A numpy array containing the generated signal.
"""
# Earthquake generation
artificial_earthquake = generate_seismic_earthquake(tmax, t0, fs,
P_signal_db,
low_period,
high_period,
bandwidth,
overlap,
f_low, f_high,
low_amp, high_amp)
# Noise generation
background_noise = generate_seismic_noise(tmax, fs, P_noise_db, bfirls)
return artificial_earthquake + background_noise
def generate_seismic_earthquake(tmax, t0, fs, P_signal_db, low_period,
high_period, bandwidth, overlap,
f_low, f_high, low_amp, high_amp):
"""Generates a synthetic earthquake signal.
An artificial earthquake is generated at the desired start point from
white noise band-filtered and modulated by using different envelope
functions for each band.
Args:
tmax: Length of the generated signal in seconds.
t0: Start time of the earthquake in seconds from the beginning
of the signal.
fs: Sample rate in Hz.
P_signal_db: Earthquake power in dB.
low_period: Start value of the range of noise envelope lengths
for the different bands at the multi-band synthesis.
high_period: End value of the range of noise envelope lengths
for the different bands at the multi-band synthesis.
bandwidth: Bandwidth of each band at the multi-band synthesis.
overlap: Overlap between bands at the multi-band synthesis.
f_low: Start frequency at the multi-band synthesis.
f_high: End frequency at the multi-band synthesis.
low_amp: Start value of the range of noise envelope amplitudes
for the different bands at the multi-band synthesis.
high_amp: End value of the range of noise envelope amplitudes
for the different bands at the multi-band synthesis.
Returns:
out: A numpy array containing the generated signal.
"""
if fs <= 0:
raise ValueError("fs must be a positive value")
# Signal length in the range 0:1/fs:tmax
L = int(tmax * fs) + 1
# First earthquake sample
n0 = int(t0 * fs)
if n0 >= L:
raise ValueError("Generated earthquake must start before the end of the signal.")
# Value from which the exponential function truncates its fall
betta = high_amp / 100.
# We generate the artificial earthquake from white noise band-filtered
# and modulated by using different envelope functions
w_noise = np.random.randn(L)
f_filt_low = np.arange(f_low, f_high - bandwidth, bandwidth - overlap)
f_filt_high = f_filt_low + bandwidth
N_filt = len(f_filt_low) # N. of applied filters
# Matrix where each column has a band noise
noise_band = np.zeros((N_filt, L))
for i in xrange(N_filt):
b, a = signal.butter(2,
[f_filt_low[i] / (fs / 2.),
f_filt_high[i] / (fs / 2.)], btype='bandpass')
noise_band[i, :] = signal.lfilter(b, a, w_noise)
# Length of noise envelope for the different bands
filt_len = np.linspace(low_period, high_period, N_filt)
n1 = np.round(n0 + filt_len * fs)
# Amplitude of noise envelope for the different bands
filt_amp = np.linspace(low_amp, high_amp, N_filt)
# By using amplitude and length of the noise envelopes we can obtain
# the alpha decay constant
# The exponential form is A0*exp(-alpha(n-n0)). In n=n0 its value is A0
# If we want
alpha = -np.log(betta / filt_amp) / (n1 - n0)
# Generate the signal envelope
noise_env = np.zeros((N_filt, L))
for i in xrange(N_filt):
end = np.minimum(L, n1[i])
noise_env[i, n0:end] = (filt_amp[i] *
np.exp(-alpha[i] *
(np.arange(n0, end) - n0)))
# We multiply the envelope for each noise band
noise_band_envelope = noise_band * noise_env
artificial_earthquake = np.sum(noise_band_envelope, 0)
eq_pw_db = 10 * np.log10(np.var(artificial_earthquake[n0:n0 + 5 * fs]))
# We want the earthquake has a power in dB given by P_signal_db
gamma_signal = 10 ** ((P_signal_db - eq_pw_db) / 20)
artificial_earthquake = gamma_signal * artificial_earthquake
return artificial_earthquake
def generate_seismic_noise(tmax, fs, P_noise_db, bfirls=None):
"""Generates a seismic background noise signal.
Args:
tmax: Length of the generated signal in seconds.
fs: Sample rate in Hz.
P_noise_db: Background noise power in dB.
bfirls: A list of coefficients of a FIR filter that models the
background noise. See:
Peterson, J. (1993). Observations and modeling of seismic
background noise.
Default value is None, which means unfiltered white noise is used
to model the background noise.
Returns:
out: A numpy array containing the generated signal.
"""
if fs <= 0:
raise ValueError("fs must be a positive value")
if bfirls is None:
bfirls = np.array([1])
# Signal length in the range 0:1/fs:tmax
L = int(tmax * fs) + 1
# White noise generation for polluting the earthquake
# We add noise according to Peterson's Model
x = np.random.randn(L)
background_noise = signal.lfilter(bfirls, 1, x)
# We want the white noise has a power in dB given by P_noise_db
bg_noise_pow_db = 10 * np.log10(np.var(background_noise))
gamma_noise = 10 ** ((P_noise_db - bg_noise_pow_db) / 20)
background_noise = gamma_noise * background_noise
return background_noise
class EarthquakeGenerator(object):
"""A class that generates synthetic earthquake signals.
Attributes:
bfirls: A list of coefficients of a FIR filter that models the
background noise. See:
Peterson, J. (1993). Observations and modeling of seismic
background noise.
Default value is None, which means unfiltered white noise is used
to model the background noise.
fs: Sample rate in Hz.
P_noise_db: Background noise power in dB.
low_period: Start value of the range of noise envelope lengths
for the different bands at the multi-band synthesis.
Default: 50 seconds.
high_period: End value of the range of noise envelope lengths
for the different bands at the multi-band synthesis.
Default: 10 seconds.
bandwidth: Bandwidth of each band at the multi-band synthesis.
Default: 4 Hz.
overlap: Overlap between bands at the multi-band synthesis.
Default: 1 Hz.
f_low: Start frequency at the multi-band synthesis.
Default: 2 Hz.
f_high: End frequency at the multi-band synthesis.
Default: 18 Hz.
low_amp: Start value of the range of noise envelope amplitudes
for the different bands at the multi-band synthesis.
Default: 0.2.
high_amp: End value of the range of noise envelope amplitudes
for the different bands at the multi-band synthesis.
Default: 0.1.
"""
def __init__(self, bfirls=None, fs=50.0, P_noise_db=0.0,
low_period=50.0, high_period=10.0, bandwidth=4.0,
overlap=1.0, f_low=2.0, f_high=18.0,
low_amp=0.2, high_amp=0.1, **kwargs):
super(EarthquakeGenerator, self).__init__()
self.bfirls = bfirls
self.fs = fs
self.P_noise_db = P_noise_db
self.low_period = low_period
self.high_period = high_period
self.bandwidth = bandwidth
self.overlap = overlap
self.f_low = f_low
self.f_high = f_high
self.low_amp = low_amp
self.high_amp = high_amp
def load_noise_coefficients(self, fileobj, dtype='float64',
byteorder='native'):
"""Loads 'bfirls' attribute from a given file.
File must be on binary or plain text format.
Args:
fileobj: A binary or text file object containing a list of numeric
coefficients.
dtype: Data-type of the numeric data stored into the file.
byteorder: Byte-order of the numeric data stored into the file.
"""
fhandler = rawfile.get_file_handler(fileobj, dtype=dtype,
byteorder=byteorder)
self.bfirls = fhandler.read()
def generate_events(self, t_average, t_max, b=1.0,
m_min=2.0, m_max=7.0):
"""Generates a random sequence of seismic events from initial
time zero to a given maximum time.
Events are described by their time of occurrence and magnitude.
The time interval between events is generated by using a Poisson
distribution, while their magnitudes are generated according to
Gutenberg-Richter's law. The number of generated events may vary
between function calls even if the same parameters are used.
Args:
t_average: Average time between two consecutive events.
t_max: Maximum time.
b: A parameter that measures the relative ratio between small and
large magnitudes in a region. Default value is 1.0.
m_min: Minimum magnitude considered. Default: 2.0.
m_max: Upper bound of earthquake magnitudes.
If value is None then no upper limit is considered.
Default: 7.0.
Returns:
event_t: Times list of the generated events.
event_m: Magnitudes list of the generated events.
"""
event_t = []
t = np.random.poisson(t_average)
while t < t_max:
event_t.append(t)
t += np.random.poisson(t_average)
event_m = gutenberg_richter(b, len(event_t), m_min, m_max)
return np.array(event_t), event_m
def generate_nevents(self, t_average, event_n, b=1.0,
m_min=2.0, m_max=7.0):
"""Generates a random list of seismic events of a given size.
Events are described by their time of occurrence and magnitude.
The time interval between events is generated by using a Poisson
distribution, while their magnitudes are generated according to
Gutenberg-Richter's law.
Args:
t_average: Average time between two consecutive events.
event_n: Number of events generated.
b: A parameter that measures the relative ratio between small and
large magnitudes in a region. Default value is 1.0.
m_min: Minimum magnitude considered. Default: 2.0.
m_max: Upper bound of earthquake magnitudes.
If value is None then no upper limit is considered.
Default: 7.0.
Returns:
event_t: Times list of the generated events.
event_m: Magnitudes list of the generated events.
"""
event_t = np.cumsum(np.random.poisson(t_average, event_n))
event_m = gutenberg_richter(b, event_n, m_min, m_max)
return event_t, event_m
def generate_earthquake(self, t_max, t0, p_eq):
"""Generates a synthetic earthquake with background noise.
Args:
t_max: Length of the generated signal in seconds.
t0: Start time of the earthquake in seconds from the beginning
of the signal in seconds.
p_eq: Earthquake power in dB.
Returns:
out: A numpy array containing the generated signal.
"""
return generate_artificial_earthquake(t_max, t0, self.fs, p_eq,
self.P_noise_db, self.bfirls,
self.low_period,
self.high_period,
self.bandwidth, self.overlap,
self.f_low, self.f_high,
self.low_amp, self.high_amp)
def generate_noise(self, eq):
"""Adds background noise to a given seismic signal.
Args:
eq: A seismic signal, numpy array type.
Returns:
out: Generated signal, numpy array type.
"""
noise = generate_seismic_noise(len(eq) / self.fs, self.fs, self.P_noise_db,
self.bfirls)
return noise + eq | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/picking/eqgenerator.py | eqgenerator.py |
import numpy as np
from scipy import signal
import collections
from apasvo.picking import findpeaks
def prctile(x, p):
"""Computes a percentile of a vector.
MATLAB like implementation of the percentile algorithm.
Args:
x: An unidimensional data array.
p: A percentage in the range [0,100].
Returns:
If 'x' is not empty returns the 'p' percentile of x,
else returns nan.
"""
# Check range of p values
if isinstance(p, collections.Iterable):
iterable = True
for v in p:
if v < 0 or v > 100:
raise ValueError("p should be in range [0, 100]")
else:
iterable = False
if p < 0 or p > 100:
raise ValueError("p should be in range [0, 100]")
# If x is empty return all NaNs
if len(x) == 0:
if iterable:
return np.empty(len(p)) * np.nan
return np.nan
sorted_x = np.sort(x)
# If p == 50 make the median fast
if p == 50:
return np.median(sorted_x)
q = np.hstack([0,
100 * np.linspace(0.5, len(x) - 0.5, len(x)) / len(x),
100])
xx = np.hstack([sorted_x[0], sorted_x, sorted_x[-1]])
return np.interp(p, q, xx)
def ampa(x, fs, threshold=None, L=None, L_coef=3.,
noise_thr=90, bandwidth=3., overlap=1., f_start=2., max_f_end=12.,
U=12., peak_window=1.):
"""Event picking/detection using AMPA algorithm.
An implementation of the Adaptive Multi-Band Picking Algorithm (AMPA),
as described in:
Álvarez, I., García, L., Mota, S., Cortés, G., Benítez, C.,
& De la Torre, A. (2013).
An Automatic P-Phase Picking Algorithm Based on Adaptive Multiband Processing.
Geoscience and Remote Sensing Letters, IEEE,
Volume: 10, Issue: 6, pp. 1488 - 1492
The AMPA method consists on an adaptive multi-band analysis that includes
envelope detection, noise reduction for each band, and finally a
filter stage that enhances the response to an earthquake arrival.
This approach provides accurate estimation of phase arrivals in
seismic signals strongly affected by background and non-stationary noises.
Args:
x: Seismic data, numpy array type.
fs: Sampling rate in Hz.
threshold: Local maxima found in the characteristic function over
this value will be returned by the function as possible events
(detection mode).
If threshold is None, the function will return only the global
maximum (picking mode).
Default value is None.
L: A list of filter lengths (in seconds).
At the filter stage, the signal is processed by using a set of
enhancement filters of different length L=[l1, l2, ..., ln].
The length of a filter is related to the duration of the detected
events. An enhancement filter for long duration events can negate
short duration events and vice versa. Combining several filters of
different length the algorithm achieves to deal with this issue.
Default: [30.0, 20.0, 10.0, 5.0, 2.5]
L_coef: A parameter that measures the portion of negative response of
an enhancement filter in order to minimize the response to emerging
or impulsive noises.
Default value is 3.0.
noise_thr: A percentile of the amplitude of the envelope that measures
the noise reduction level for each band at the noise reduction
stage.
Default value is 90.
bandwidth: Bandwidth of each band at the adaptive multi-band analysis.
Default: 3 Hz.
overlap: Overlap between bands at the adaptive multi-band analysis.
Default: 1 Hz.
f_start: Start frequency at the adaptive multi-band analysis.
Default: 2 Hz.
max_f_end: End frequency at the adaptive multi-band analysis.
Default: 12 Hz.
U: A parameter used at the end of the enhancement filter stage to avoid
logarithm of zero and to shift the characteristic function to zero.
Given y(n) the product of the outputs of the different filters used
at the end of the enhancement stage, the characteristic function is
then calculated as:
cf(n) = U + log10(y(n) + 10 ** (-U))
Default value is 12.
peak_window: How many seconds on each side of a point of the
characteristic function to use for the comparison to consider the
point to be a local maximum.
If 'threshold' is None, this parameter has no effect.
Default value is 1 s.
Returns:
event_t: A list of possible event locations, given in samples from the
start of the signal, that correspond to the local maxima of the
characteristic function. If threshold is None, the list contains
only the global maximum of the function.
ztot: Characteristic function, numpy array type.
"""
# Check arguments
if fs <= 0:
raise ValueError("fs must be a positive value")
if bandwidth <= 0:
raise ValueError("bandwidth must be a positive value")
if overlap < 0:
raise ValueError("overlap must be a non-negative value")
if overlap >= bandwidth:
raise ValueError("bandwidth must be greater than overlap")
if f_start <= 0:
raise ValueError("f_start must be a positive value")
if max_f_end <= 0:
raise ValueError("max_f_end must be a positive value")
if f_start >= max_f_end:
raise ValueError("max_f_end must be greater than f_start")
if U <= 0:
raise ValueError("U must be a positive value")
if L is None:
L = [30., 20., 10., 5., 2.5]
for v in L:
if v <= 0:
raise ValueError("L should be a positive value")
if v >= len(x) / fs:
raise ValueError("Length of x must be greater than the longest "
"of the values of L")
fs = float(fs)
peak_window = round(peak_window * fs / 2.)
x = x - np.mean(x) # We remove the mean
# The first configurable parameter is the bank of bandpass filters
# Several options can be chosen
f_end = min(fs / 2. - bandwidth, max_f_end)
if f_end <= f_start:
raise ValueError("The end frequency of the filter bank must be greater"
" than its start frequency")
step = bandwidth - overlap
flo = np.arange(f_start, f_end + step, step)
fhi = flo + bandwidth
# We obtain the analytic signal using Hilbert transform
z = np.zeros((len(flo), len(x)))
for i in xrange(len(flo)):
h_aux = 8 - (np.arange(32) / 4.)
h0 = np.zeros(512)
h0[0:32] = h_aux * np.cos(2. * np.pi * ((flo[i] + fhi[i]) / 2.) *
np.arange(32) / fs)
h0o = np.imag(signal.hilbert(h0))
# Filtering the signal
xa = signal.fftconvolve(x, h0)[:len(x)] # Same as signal.lfilter(h0, 1, x)
xao = signal.fftconvolve(x, h0o)[:len(x)] # Same as signal.lfilter(h0o, 1, x)
# Analytic signal
y0 = np.sqrt((xa ** 2) + (xao ** 2))
# Fix a threshold to modify the energies in the channels
thr = prctile(y0, noise_thr)
# Here we modify the amplitudes of the analytic signal. The amplitudes
# below the threshold are set to 1. the amplitudes above the threshold
# are set to the number of times they are higher than the threshold
z0 = (y0 / thr) * (y0 > thr) + (y0 <= thr)
# In the variable z we save the analytic signals (modified by the
# threshold processing) in a matrix structure. Each column corresponds
# to one frequency channel
z[i, :] = z0
# We sum the contribution of all the frequency channels in a single signal
# Then we apply logarithm
ztot = np.sum(z, 0)
lztot = np.log10(ztot) - np.min(np.log10(ztot)) + 1e-2
# This total signal is passed through a non-linear filtering based
# on a set of filters of different length. This is completely configurable
Ztot = np.zeros((len(L), len(x)))
for i in xrange(len(L)):
l = int(L[i] * fs)
B = np.zeros(2 * l)
B[0:l] = range(1, l + 1)
B[l:2 * l] = L_coef * (np.arange(1, l + 1) - (l + 1))
B = B / np.sum(np.abs(B))
Zt = signal.fftconvolve(lztot, B)[:len(x)] # Same as signal.lfilter(B, 1, lztot)
Zt = Zt * (Zt > 0)
Ztot[i, :-l] = np.roll(Zt, -l)[:-l]
ZTOT = np.prod(Ztot, 0)[:-(np.max(L) * fs)]
ZTOT = U + np.log10(np.abs(ZTOT) + (10 ** -U))
event_t = findpeaks.find_peaks(ZTOT, threshold, order=peak_window * fs)
return event_t, ZTOT
class Ampa(object):
"""A class to configure an instance of the AMPA algorithm and
apply it over a given array containing seismic data.
Given some overlap and window sizes, this class applies the AMPA method
by using a sliding window approach.
Attributes:
window: Size of the window in seconds. Default: 100 seconds.
step: Step size. Default: 50 seconds.
L: A list of filter lengths (in seconds) at the enhancement filter
stage. Default: [30.0, 20.0, 10.0, 5.0, 2.5]
L_coef: A parameter that measures the portion of negative response of
an enhancement filter in order to minimize the response to emerging
or impulsive noises.
Default value is 3.0.
noise_thr: A percentile of the amplitude of the envelope that measures
the noise reduction level for each band at the noise reduction
stage.
Default value is 90.
bandwidth: Bandwidth of each band of the adaptive multi-band analysis.
Default: 3 Hz.
overlap: Overlap between bands of the adaptive multi-band analysis.
Default: 1 Hz.
f_start: Start frequency of the adaptive multi-band analysis.
Default: 2 Hz.
max_f_end: End frequency of the adaptive multi-band analysis.
Default: 12 Hz.
U: A parameter used at the end of the enhancement filter stage to avoid
logarithm of zero and to shift the characteristic function to zero.
Default value is 12.
"""
def __init__(self, window=100., step=50.,
L=None, L_coef=3., noise_thr=90.,
bandwidth=3., overlap=1., f_start=2.,
f_end=12., U=12., **kwargs):
super(Ampa, self).__init__()
self.window = window
self.step = step
self.L = L
if self.L is None:
self.L = [30., 20., 10., 5., 2.5]
self.L_coef = L_coef
self.noise_thr = noise_thr
self.bandwidth = bandwidth
self.overlap = overlap
self.f_start = f_start
self.max_f_end = f_end
self.U = U
def run(self, x, fs, threshold=None, peak_window=1.0):
"""Executes AMPA algorithm over a given array of data.
Args:
x: Seismic data, numpy array type.
fs: Sample rate in Hz.
threshold: Local maxima found in the characteristic function over
this value will be returned by the function as possible events
(detection mode).
If threshold is None, the function will return only the global
maximum (picking mode).
Default value is None.
peak_window: How many seconds on each side of a point of the
characteristic function to use for the comparison to consider
the point to be a local maximum.
If 'threshold' is None, this parameter has no effect.
Default value is 1 s.
Returns:
et: A list of possible event locations, given in samples from the
start of the signal, that correspond to the local maxima of the
characteristic function. If threshold is None, the list contains
only the global maximum of the function.
out: Characteristic function, numpy array type.
"""
tail = int(np.max(self.L) * fs)
out = np.zeros(len(x) - tail)
step = int(self.step * fs)
overlapped = max(0, int((self.window - self.step) * fs) - tail)
for i in xrange(0, len(out), step):
size = min(self.window * fs, len(x) - i)
_, cf = ampa(x[i:i + size], fs, L=self.L,
L_coef=self.L_coef, noise_thr=self.noise_thr,
bandwidth=self.bandwidth, overlap=self.overlap,
f_start=self.f_start, max_f_end=self.max_f_end,
U=self.U)
out[i: i + overlapped] = ((out[i: i + overlapped] +
cf[:overlapped]) / 2.)
out[i + overlapped: i + size - tail] = cf[overlapped:]
et = findpeaks.find_peaks(out, threshold, order=peak_window * fs)
return et, out
@property
def name(self):
return self.__class__.__name__.upper() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/picking/ampa.py | ampa.py |
import numpy as np
def takanami(x, n0, n1, p=1, k=5):
"""Event picking using Takanami AR algorithm.
The Takanami algorithm estimates the arrival time of a seismic signal
by using two autoregressive models: a model that fits the earthquake and
a noise model. Assuming that the characteristics before and after the
arrival of the earthquake are quite different, the arrival time is
estimated by searching the time point where the minimum value of the
Akaike's Information Criterion is reached.
See:
Takanami, T., & Kitagawa, G. (1988).
A new efficient procedure for the estimation of onset times of seismic
waves. Journal of Physics of the Earth, 36(6), 267-290.
Args:
x: A seismic signal, numpy array type.
n0: Initial point of the interval [n0,n1] where the method assumes
the arrival time is in.
The value is given in samples from the beginning of 'x'.
n1: Final point of the interval [n0,n1] where the method assumes that
the arrival time is on it.
The value is given in samples from the beginning of 'x'.
p: Step of the autoregressive model.
Default: 1.
k: Order of the autoregressive model.
Default: 5.
Returns:
n_pick: Arrival time.
The value is given in samples from the beginning of 'x'.
total_aic: List of AIC values from 'n0' to 'n1'
"""
l = (n1 - n0) / float(p) # l + 1 models
# Noise Model
noise_aic = _takanami_aic(x, n0, l, k, p)
# Earthquake Model
# Invert the signal, so the procedure is similar to the noise model's one
x = x[::-1]
new_n0 = len(x) - (n1 + 1) + 1 # n0's value changes
earthquake_aic = _takanami_aic(x, new_n0, l, k, p)
earthquake_aic = earthquake_aic[::-1]
# Picking time estimation
total_aic = noise_aic + earthquake_aic
event_idx = np.argmin(total_aic)
n_pick = n0 + (event_idx * p) # When event_idx = 0 --> n_pick = n0 + 1
return n_pick, total_aic
def _takanami_aic(x, n0, l, k=5, p=1):
"""Computes AIC values of an autoregressive model.
Args:
x: A seismic signal, numpy array type.
n0: Initial point of the interval [n0,n1] where the method assumes
the arrival time is in.
The value is given in samples from the beginning of 'x'.
l: Number of possible models in the interval [n0,n1].
k: Order of the autoregressive model.
Default: 5.
p: Step of the autoregressive model.
Default: 1.
Returns:
aic_values: List of AIC values from n0 to n1.
"""
if p <= 0:
raise ValueError("p should be a positive value")
if k <= 0:
raise ValueError("k should be a positive value")
if n0 <= k:
raise ValueError("n0 should be greater than k")
if l <= 0:
raise ValueError("l should be a positive value")
aic_0_l = np.zeros(l + 1)
sigma2 = np.zeros(k + 1)
aic_i = np.zeros(k + 1)
# Calculate AIC value for n0
x_n0 = x[:n0]
# Initialize X0 matrix
X0 = np.zeros((n0 - k, k + 1))
for i in xrange(k):
X0[:, i] = x_n0[k - i - 1:-1 - i]
X0[:, k] = x_n0[k:]
# Householder transformation by QR decomposition
R0 = np.linalg.qr(X0, mode='r')
R0 = R0[:k + 1, :k + 1]
# Calculate variances and AIC...
c1 = 1. / (n0 - k)
c2 = n0 - k
for j in xrange(k + 1):
sigma2[j] = c1 * np.sum(R0[j:k + 1, k] ** 2)
aic_i[j] = c2 * np.log(sigma2[j]) + 2 * (j + 1)
aic_0_l[0] = np.min(aic_i)
# Calculate AIC from n_1 to n_l
R = np.zeros((k + 1 + p, k + 1))
S = R0
for i in xrange(1, int(l + 1)):
aug_data = x[(n0 + i * p - k - 1):(n0 + i * p)] # Augmented Data
R[:k + 1, :k + 1] = S
R[k + 1:k + 1 + p, :k] = aug_data[-2::-1]
R[k + 1:k + 1 + p, k] = aug_data[-1]
# QR decomposition
S = np.linalg.qr(R, mode='r')
S = S[:k + 1, :k + 1]
# Calculate variances and AIC...
c1 = 1. / (n0 + i * p - k)
c2 = n0 + i * p - k
for j in xrange(k + 1):
sigma2[j] = c1 * np.sum(S[j:k + 1, k] ** 2)
aic_i[j] = c2 * np.log(sigma2[j]) + 2 * (j + 1)
aic_0_l[i] = np.min(aic_i)
return aic_0_l
class Takanami(object):
"""A class to configure an instance of Takanami AR algorithm and
apply it over a given seismic signal.
Attributes:
p: Step of the autoregressive model.
Default: 1.
k: Order of the autoregressive model.
Default: 5.
"""
def __init__(self, p=1, k=5):
super(Takanami, self).__init__()
self.p = p
self.k = k
def run(self, x, fs, t_start=0.0, t_end=np.inf):
"""Executes Takanami AR algorithm over a given array of data.
The function searches an arrival time between two time points 't_start'
and 't_end'. The Takanami method assumes that the characteristics before
and after the arrival of the earthquake are quite different, so it's
important to choose a narrow interval in order to get good results.
Args:
x: Seismic data, numpy array type.
fs: Sample rate in Hz.
t_start: Start time point of the interval [t_start,t_end] where the
arrival time is supposed to be.
t_end: End time point of the interval [t_start, t_end] where the
arrival time is supposed to be.
Return:
et: Arrival time, given in samples from the beginning of 'x'.
aic: List of AIC values.
n0: Start time point of 'aic'.
The value is given in samples from the beginning of 'x'.
"""
i_from = int(max(0, t_start * fs))
i_to = int(min(len(x), (t_end * fs) + 1))
n0 = (self.k + 1) * 2
n1 = (i_to - i_from) - n0
pt, aic = takanami(x[i_from:i_to], n0, n1, p=self.p, k=self.k)
return i_from + pt, aic, i_from + n0 | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/picking/takanami.py | takanami.py |
import numpy as np
import obspy as op
import multiprocessing as mp
import itertools
from obspy.core.utcdatetime import UTCDateTime
from obspy.core.event import Pick
from obspy.core.event import ResourceIdentifier
from obspy.core.event import CreationInfo
from obspy.core.event import WaveformStreamID
from obspy.core.event import Comment
from obspy.core.event import Catalog
from obspy.core.event import Event
from obspy.signal import filter
import csv
import copy
import os
import uuid
import gc
from copy import deepcopy
from apasvo.picking import takanami
from apasvo.picking import envelope as env
from apasvo.utils.formats import rawfile
from apasvo.utils import collections
from apasvo.utils import clt
method_other = 'other'
method_takanami = 'Takanami'
method_stalta = 'STALTA'
method_stalta_takanami = 'STALTA+Takanami'
method_ampa = 'AMPA'
method_ampa_takanami = 'AMPA+Takanami'
ALLOWED_METHODS = (
method_other,
method_takanami,
method_stalta,
method_stalta_takanami,
method_ampa,
method_ampa_takanami
)
PHASE_VALUES = (
"P",
"S",
"Other",
)
mode_manual = 'manual'
mode_automatic = 'automatic'
status_preliminary = 'preliminary'
status_reviewed = 'reviewed'
status_confirmed = 'confirmed'
status_rejected = 'rejected'
status_final = 'final'
DEFAULT_DTYPE = '=f8' # Set the default datatype as 8 bits floating point, native ordered
DEFAULT_DELTA = 0.02
def generate_csv(records, fout, delimiter=',', lineterminator='\n'):
"""Generates a Comma Separated Value (CSV) resume file from a list of
Record objects.
The function stores into a file a summary table of the events found
for a given list of records. The table has the following fields:
file_name: Name of the file (absolute path) that stores the data
signal where the event was found.
time: Event arrival time, in seconds from the beginning of the signal.
cf_value: Characteristic function value at the event arrival time.
name: An arbitrary string that identifies the event.
method: A string indicating the algorithm used to find the event.
Possible values are: 'STA-LTA', 'STA-LTA+Takanami', 'AMPA',
'AMPA+Takanami' and 'other'.
mode: Event picking mode. Possible values are: 'manual', 'automatic'
and 'undefined'.
status: Revision status of the event. Possible values are: 'preliminary',
'revised', 'confirmed', 'rejected' and 'undefined'.
comments: Additional comments.
Args:
records: A list of record objects.
fout: Output file object.
delimiter: A delimiter character that separates fields/columns.
Default character is ','.
lineterminator: A delimiter character that separates records/rows.
"""
# Extract data from records
rows = [{'file_name': record.filename,
'time': record.starttime + event.time,
'cf_value': event.cf_value,
'name': event.name,
'method': event.method,
'mode': event.evaluation_mode,
'status': event.evaluation_status,
'comments': event.comments} for record in records
for event in record.events]
# Write data to csv
writer = csv.DictWriter(fout, ['file_name', 'time', 'cf_value', 'name',
'method', 'mode', 'status', 'comments'],
delimiter=delimiter, lineterminator=lineterminator)
writer.writeheader()
for row in rows:
writer.writerow(row)
class ApasvoEvent(Pick):
"""A seismic event found in a Record instance.
This class stores several attributes used to describe a possible event
found in a seismic signal, as well as data results from the computation
of Takanami algorithm in order to refine the arrival time of the event.
Attributes:
record: Record instance where the event was found.
time: Event arrival time, given in samples from the beginning of
record.signal.
cf_value: Characteristic function value at the event arrival time.
name: An arbitrary string that identifies the event.
Default: ''.
comments: Additional comments.
Default: ''.
method: A string indicating the algorithm used to find the event.
Possible values are: 'STALTA', 'STALTA+Takanami', 'AMPA',
'AMPA+Takanami' and 'other'.
Default: 'other'.
Default: 'preliminary'.
n0_aic: Start time point of computed AIC values. The value is given in
samples from the beginning of record.signal.
aic: List of AIC values from n0_aic.
"""
methods = (method_other, method_takanami, method_stalta,
method_stalta_takanami, method_ampa, method_ampa_takanami)
def __init__(self,
trace,
time,
name='',
comments='',
method=method_other,
phase_hint=None,
polarity='undecidable',
aic=None,
n0_aic=None,
*args, **kwargs):
self.trace = trace
if time < 0 or time >= len(self.trace.signal):
raise ValueError("Event position must be a value between 0 and %d"
% len(self.trace.signal))
self.stime = time
self.name = name
self.method = method
self.aic = aic
self.n0_aic = n0_aic
phase_hint = phase_hint if phase_hint in PHASE_VALUES else PHASE_VALUES[0]
super(ApasvoEvent, self).__init__(time=self.time,
method_id=ResourceIdentifier(method),
phase_hint=phase_hint,
polarity=polarity,
creation_info=CreationInfo(
author=kwargs.get('author', ''),
agency_id=kwargs.get('agency', ''),
creation_time=UTCDateTime.now(),
),
waveform_id=WaveformStreamID(
network_code=self.trace.stats.get('network', ''),
station_code=self.trace.stats.get('station', ''),
location_code=self.trace.stats.get('location', ''),
channel_code=self.trace.stats.get('channel', ''),
),
*args,
**kwargs)
self.comments = comments
@property
def cf_value(self):
if 0 <= self.stime < len(self.trace.cf):
return self.trace.cf[self.stime]
else:
return np.nan
def _samples_to_seconds(self, value):
return self.trace.starttime + (self.trace.delta * value)
def _seconds_to_samples(self, value):
return int((value - self.trace.starttime) / self.trace.delta)
def __setattr__(self, key, value):
if key == 'stime':
self.__dict__[key] = value
self.__dict__['time'] = self._samples_to_seconds(value)
elif key == 'time':
self.__dict__[key] = value
self.__dict__['stime'] = self._seconds_to_samples(value)
elif key == 'comments':
self.__dict__['comments'] = Comment(text=value)
else:
super(ApasvoEvent, self).__setattr__(key, value)
def __getattribute__(self, item):
if item == 'comments':
return self.__dict__['comments'].text
else:
return super(ApasvoEvent, self).__getattribute__(item)
def plot_aic(self, show_envelope=True, num=None, **kwargs):
"""Plots AIC values for a given event object.
Draws a figure with two axes: the first one plots magnitude and
envelope of 'self.signal' and the second one plots AIC values computed
after applying Takanami AR method to 'event'. Plotted data goes from
'event.n0_aic' to 'event.n0_aic + len(event.aic)'.
Args:
show_envelope: Boolean value to specify whether to plot the
envelope of 'signal' or not. This function will be drawn
preferably on the first axis together with amplitude of
'signal'.
Default: True.
num: Identifier of the returned MatplotLib figure, integer type.
Default None, which means an identifier value will be
automatically generated.
Returns:
fig: A MatplotLib Figure instance.
"""
if self.aic is None or self.n0_aic is None:
raise ValueError("Event doesn't have AIC data to plot")
# Lazy matplotlib import
import matplotlib.pyplot as pl
from matplotlib import ticker
# Set limits
i_from = int(max(0, self.n0_aic))
i_to = int(min(len(self.trace.signal), self.n0_aic + len(self.aic)))
# Create time sequence
t = np.arange(i_from, i_to) / float(self.trace.fs)
# Create figure
fig, _ = pl.subplots(2, 1, sharex='all', num=num)
fig.canvas.set_window_title(self.trace.label)
fig.set_tight_layout(True)
# Configure axes
for ax in fig.axes:
ax.cla()
ax.grid(True, which='both')
formatter = ticker.FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, self.trace.starttime))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='lower'))
ax.set_xlabel('Time (seconds)')
pl.setp(ax.get_xticklabels(), visible=True)
# Draw signal
fig.axes[0].set_title('Signal Amplitude')
fig.axes[0].set_ylabel('Amplitude')
fig.axes[0].plot(t, self.trace.signal[i_from:i_to], color='black',
label='Signal')
# Draw envelope
if show_envelope:
fig.axes[0].plot(t, env.envelope(self.trace.signal[i_from:i_to]),
color='r', label='Envelope')
fig.axes[0].legend(loc=0, fontsize='small')
# Draw AIC
fig.axes[1].set_title('AIC')
fig.axes[1].plot(t, self.aic)
# Draw event
for ax in fig.axes:
vline = ax.axvline(self.stime / self.trace.fs, label="Event")
vline.set(color='r', ls='--', lw=2)
# Configure limits and draw legend
for ax in fig.axes:
ax.set_xlim(t[0], t[-1])
ax.legend(loc=0, fontsize='small')
return fig
class ApasvoTrace(op.Trace):
"""A seismic data trace.
The class contains a seismic data trace.
Attributes:
signal: Seismic data, numpy array type.
fs: Sample rate in Hz.
cf: Characteristic function, numpy array type, from the beginning
of signal.
events: A list of events.
label: A string that identifies the stored seismic data.
Default: ''.
description: Additional comments.
Default: ''.
"""
def __init__(self,
data=None,
header=None,
label='',
description='',
filename='',
normalize=True,
use_filtered=False,
**kwargs):
"""Initializes a Record instance.
Args:
label: A string that identifies the seismic record. Default: ''.
description: Additional comments.
"""
# Cast data to default datatype
if data is None:
data = np.ndarray((0,), dtype=DEFAULT_DTYPE)
super(ApasvoTrace, self).__init__(data, header)
self.cf = np.array([], dtype=DEFAULT_DTYPE)
if normalize:
self.data = self.data - np.mean(self.data)
#self.data = self.data/ np.max(np.abs(self.data))
self.filtered_signal = deepcopy(self.data)
self.events = []
self.label = label
self.description = description
self.filename = filename
self.use_filtered = False
# Get an uuid for each trace
self.uuid = unicode(uuid.uuid4())
@property
def fs(self):
return 1. / self.stats.delta
@property
def delta(self):
return self.stats.delta
@property
def signal(self):
return self.data if not self.use_filtered else self.filtered_signal
@property
def starttime(self):
return self.stats.starttime
@property
def endtime(self):
return self.stats.endtime
@property
def short_name(self):
return "{0} | {1}".format(os.path.basename(self.filename), self.id)
@property
def name(self):
return "{0} | {1}".format(os.path.basename(self.filename), str(self))
def detect(self, alg, threshold=None, peak_window=1.0,
takanami=False, takanami_margin=5.0, action='append', debug=False, **kwargs):
"""Computes a picking algorithm over self.signal.
Args:
alg: A detection/picking algorithm object, e. g. a
picking.ampa.Ampa or picking.stalta.StaLta instance.
threshold: Local maxima found in the characteristic function above
this value will be returned by the function as possible events
(detection mode).
If threshold is None, the function will return only the global
maximum (picking mode).
Default value is None.
peak_window: How many seconds on each side of a point of the
characteristic function to use for the comparison to consider
the point to be a local maximum.
If 'threshold' is None, this parameter has no effect.
Default value is 1 s.
takanami: A boolean parameter to specify whether Takanami AR method
will be applied over results or not.
Default: False, Takanami wont be applied over results.
takanami_margin: How many seconds on each side of an event time to
use for the application of Takanami method.
If 'takanami' is False, this parameter has no effect.
Default: 5.0 seconds.
action: Two valid choices: 'append' and 'clear'. 'append' adds the
events found to the end of the list of events, while 'clear'
removes the existing events of the list.
Default: 'append'.
Returns:
events: A resulting list of Event objects.
"""
et, self.cf = alg.run(self.signal, self.fs, threshold=threshold,
peak_window=peak_window)
# Build event list
events = []
for t in et:
# set method name
method_name = alg.__class__.__name__.upper()
if method_name not in ApasvoEvent.methods:
method_name = method_other
events.append(ApasvoEvent(self, t, method=method_name,
evaluation_mode=mode_automatic,
evaluation_status=status_preliminary))
# Refine arrival times
if takanami:
events = self.refine_events(events, takanami_margin=takanami_margin)
# Update event list
if action == 'append':
self.events.extend(events)
elif action == 'clear':
self.events = events
else:
raise ValueError("%s is not a valid value for 'action'" % action)
if debug:
print "{} event(s) found so far for trace {}:".format(len(self.events), self.getId())
for event in self.events:
print event.time
return self.events
def sort_events(self, key='time', reverse=False):
"""Sort event list.
Args:
key: Name of the attribute of Event class to use as sorting key.
Default: 'time'.
reverse: Determines whether to sort in reverse order or not.
Default: False.
Returns:
events: Sorted event list.
"""
if key == 'aic':
raise ValueError("Sorting not allowed using key 'aic'")
self.events = sorted(self.events,
key=lambda e: e.__dict__.get(key, None),
reverse=reverse)
return self.events
def refine_events(self, events, t_start=None, t_end=None, takanami_margin=5.0):
"""Computes Takanami AR method over self.events.
Args:
takanami_margin: How many seconds on each side of an event time to
use for the application of Takanami method.
If 'takanami' is False, this parameter has no effect.
Default: 5.0 seconds.
Returns:
events: A resulting list of Event objects.
"""
taka = takanami.Takanami()
for event in events:
t_start = (event.stime / self.fs) - takanami_margin
t_end = (event.stime / self.fs) + takanami_margin
et, event.aic, event.n0_aic = taka.run(self.signal, self.fs,
t_start, t_end)
event.stime = et
# set event method
if event.method == method_ampa:
event.method = method_ampa_takanami
elif event.method == method_stalta:
event.method = method_stalta_takanami
else:
event.method = method_takanami
return events
def bandpass_filter(self, freqmin, freqmax, *args, **kwargs):
self.filtered_signal = filter.bandpass(self.data, freqmin, freqmax, self.fs, *args, **kwargs)
return self.filtered_signal
def save_cf(self, fname, fmt=rawfile.format_text,
dtype=rawfile.datatype_float64,
byteorder=rawfile.byteorder_native):
"""Saves characteristic function in a file.
Args:
fname: Output file name.
fmt: A string indicating the format to store the CF.
Possible values are: 'binary' or 'text'.
Default value: 'binary'.
dtype: Data-type to represent characteristic function values.
Default: 'float64'.
byteorder: Byte-order to store characteristic function values.
Valid values are: 'little-endian', 'big-endian' or 'native'.
Default: 'native'.
"""
if fmt == 'binary':
fout_handler = rawfile.BinFile(fname, dtype=dtype,
byteorder=byteorder)
else:
fout_handler = rawfile.TextFile(fname, dtype=dtype,
byteorder=byteorder)
fout_handler.write(self.cf, header="Sample rate: %g Hz." % self.fs)
def plot_signal(self, t_start=0.0, t_end=np.inf, show_events=True,
show_x=True, show_cf=True, show_specgram=True,
show_envelope=True, threshold=None, num=None, **kwargs):
"""Plots record data.
Draws a figure containing several plots for data stored and computed
by a Record object: magnitude, envelope and spectrogram plots for
self.signal, as well as characteristic function if calculated.
Args:
t_start: Start time of the plotted data segment, in seconds.
Default: 0.0, that is the beginning of 'signal'.
t_end: End time of the plotted data segment, in seconds.
Default: numpy.inf, that is the end of 'signal'
show_events: Boolean value to specify whether to plot
event arrival times or not. Arrival times will be
indicated by using a vertical line.
Default: True.
show_x: Boolean value to specify whether to plot the
magnitude value of 'signal' or not. This function
will be drawn preferably on the first axis.
Default: True.
show_cf: Boolean value to specify whether to plot the
characteristic function or not. This function
will be drawn preferably on the second axis.
Default: True.
show_specgram: Boolean value to specify whether to plot the
spectrogram of 'signal' or not. It will be drawn preferably
on the third axis.
Default: True.
show_envelope: Boolean value to specify whether to plot the
envelope of 'signal' or not. This function will be drawn
preferably on the first axis together with amplitude of
'signal'.
Default: True.
threshold: Boolean value to specify whether to plot threshold
or not. Threshold will be drawn as an horizontal dashed line
together with characteristic function.
Default: True.
num: Identifier of the returned MatplotLib figure, integer type.
Default None, which means an identifier value will be
automatically generated.
Returns:
fig: A MatplotLib Figure instance.
"""
# Lazy matplotlib import
import matplotlib.pyplot as pl
from matplotlib import ticker
# Set limits
i_from = int(max(0.0, t_start * self.fs))
if show_cf:
i_to = int(min(len(self.cf), t_end * self.fs))
else:
i_to = int(min(len(self.signal), t_end * self.fs))
# Create time sequence
t = np.arange(i_from, i_to) / float(self.fs)
# Create figure
nplots = show_x + show_cf + show_specgram
fig, _ = pl.subplots(nplots, 1, sharex='all', num=num)
fig.canvas.set_window_title(self.label)
fig.set_tight_layout(True)
# Configure axes
for ax in fig.axes:
ax.cla()
ax.grid(True, which='both')
formatter = ticker.FuncFormatter(lambda x, pos: clt.float_secs_2_string_date(x, self.starttime))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(ticker.MaxNLocator(nbins=5, prune='lower'))
ax.set_xlabel('Time (seconds)')
pl.setp(ax.get_xticklabels(), visible=True)
# Draw axes
ax_idx = 0
# Draw signal
if show_x:
fig.axes[ax_idx].set_title("Signal Amplitude (%gHz)" % self.fs)
fig.axes[ax_idx].set_ylabel('Amplitude')
fig.axes[ax_idx].plot(t, self.signal[i_from:i_to], color='black',
label='Signal')
#fig.axes[ax_idx].plot(t, signal_norm, color='black',
#label='Signal')
# Draw signal envelope
if show_envelope:
fig.axes[ax_idx].plot(t, env.envelope(self.signal[i_from:i_to]),
color='r', label='Envelope')
fig.axes[ax_idx].legend(loc=0, fontsize='small')
ax_idx += 1
# Draw Characteristic function
if show_cf:
fig.axes[ax_idx].set_title('Characteristic Function')
fig.axes[ax_idx].plot(t, self.cf[i_from:i_to])
# Draw threshold
if threshold:
hline = fig.axes[ax_idx].axhline(threshold, label="Threshold")
hline.set(color='b', ls='--', lw=2, alpha=0.8)
fig.axes[ax_idx].legend(loc=0, fontsize='small')
ax_idx += 1
# Draw spectrogram
if show_specgram:
fig.axes[ax_idx].set_title('Spectrogram')
fig.axes[ax_idx].set_ylabel('Frequency (Hz)')
fig.axes[ax_idx].specgram(self.signal[i_from:i_to], Fs=self.fs,
xextent=(i_from / self.fs, i_to / self.fs))
ax_idx += 1
# Draw event markers
if show_events:
for event in self.events:
arrival_time = event.stime / self.fs
for ax in fig.axes:
xmin, xmax = ax.get_xlim()
if arrival_time > xmin and arrival_time < xmax:
vline = ax.axvline(arrival_time, label="Event")
vline.set(color='r', ls='--', lw=2)
ax.legend(loc=0, fontsize='small')
# Configure limits and draw legend
for ax in fig.axes:
ax.set_xlim(t[0], t[-1])
return fig
def add_event_from_copy(self, event):
event = copy.copy(event)
event.trace = self
event.aic = None
event.n0_aic = None
self.events.append(event)
def _detect(parameters):
alg = parameters[0]
trace_list = parameters[1]
kwargs = parameters[2]
for trace in trace_list:
trace.detect(alg, **kwargs)
return trace_list
class ApasvoStream(op.Stream):
"""
A list of multiple ApasvoTrace objects
"""
def __init__(self, traces, description='', filename='', **kwargs):
super(ApasvoStream, self).__init__(traces)
self.description = description
self.filename = filename
def detect(self, alg, trace_list=None, allow_multiprocessing=True, **kwargs):
"""
"""
trace_list = self.traces if trace_list is None else trace_list[:]
n_traces = len(trace_list)
if allow_multiprocessing and n_traces > 1:
processes = min(mp.cpu_count(), n_traces)
p = mp.Pool(processes=processes)
processed_traces = p.map(_detect, itertools.izip(itertools.repeat(alg),
collections.chunkify(trace_list, n_traces / processes),
itertools.repeat(kwargs)))
processed_traces = collections.flatten_list(processed_traces)
# Update existing traces w. new events and cf from processed events
for trace, processed_trace in zip(trace_list, processed_traces):
new_events = [event for event in processed_trace.events if event not in trace.events]
for event in new_events:
trace.add_event_from_copy(event)
trace.cf = processed_trace.cf[:]
# Cleanup
del processed_traces
del trace_list
p.close()
p.join()
gc.collect(2)
else:
_detect((alg, trace_list, kwargs))
def export_picks(self, filename, trace_list=None, format="NLLOC_OBS", debug=False, **kwargs):
"""
"""
trace_list = self.traces if trace_list is None else trace_list
event_list = []
for trace in trace_list:
event_list.extend([Event(picks=[pick]) for pick in trace.events])
# Export to desired format
if format == 'NLLOC_OBS':
basename, ext = os.path.splitext(filename)
for event in event_list:
ts = event.picks[0].time.strftime("%Y%m%d%H%M%S%f")
event_filename = "%s_%s%s" % (basename, ts, ext)
if debug:
print "Generating event file {}".format(event_filename)
event.write(event_filename, format=format)
else:
event_catalog = Catalog(event_list)
if debug:
print "Generating event file {}".format(filename)
event_catalog.write(filename, format=format, **kwargs)
def read(filename,
format=None,
dtype='float64',
byteorder='native',
description='',
normalize=True,
*args, **kwargs):
"""
Read signal files into an ApasvoStream object
:param filename:
:param format:
:param file_dtype:
:param file_byteorder:
:param description:
:param args:
:param kwargs:
:return:
"""
# Try to read using obspy core functionality
try:
traces = [ApasvoTrace(copy.deepcopy(trace.data), copy.deepcopy(trace.stats), filename=filename, normalize=normalize) \
for trace in op.read(filename, format=format, *args, **kwargs).traces]
# Otherwise try to read as a binary or text file
except Exception as e:
fhandler = rawfile.get_file_handler(filename,
format=format,
dtype=dtype,
byteorder=byteorder)
trace = ApasvoTrace(fhandler.read().astype(DEFAULT_DTYPE, casting='safe'), filename=filename)
sample_fs = kwargs.get('fs')
trace.stats.delta = DEFAULT_DELTA if sample_fs is None else 1. / sample_fs
traces = [trace]
# Convert Obspy traces to apasvo traces
return ApasvoStream(traces, description=description, filename=filename) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/picking/apasvotrace.py | apasvotrace.py |
import sys
import datetime
def float_secs_2_string_date(x, starttime=datetime.datetime.utcfromtimestamp(0)):
""""""
t = (starttime + datetime.timedelta(seconds=x))
if t.microsecond == 0:
return t.strftime('%X')
else:
return t.strftime('%H:%M:%S.%f')
def print_msg(msg):
"""Prints 'msg' in standard output"""
sys.stdout.write(msg)
sys.stdout.flush()
def query_yes_no_all_quit(question, default="yes"):
"""Ask a yes/no/all/quit question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no", "all", "quit" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes", "no", "all" or "quit".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no",
"all": "all", "al": "all", "a": "all",
"quit": "quit", "qui": "quit", "qu": "quit", "q": "quit"}
if default == None:
prompt = " [y(yes)/n(no)/a(all)/q(quit)] "
elif default == "yes":
prompt = " [Y(Yes)/n(no)/a(all)/q(quit)] "
elif default == "no":
prompt = " [y(yes)/N(No)/a(all)/q(quit)] "
elif default == "all":
prompt = " [y(yes)/n(no)/A(All)/q(quit)] "
elif default == "quit":
prompt = " [y(yes)/n(no)/a(all)/Q(Quit)] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with "
"'yes', 'no', 'all' or 'quit'.\n")
def query_custom_answers(question, answers, default=None):
"""Ask a question via raw_input() and return the chosen answer.
@param question {str} Printed on stdout before querying the user.
@param answers {list} A list of acceptable string answers. Particular
answers can include '&' before one of its letters to allow a
single letter to indicate that answer. E.g., ["&yes", "&no",
"&quit"]. All answer strings should be lowercase.
@param default {str, optional} A default answer. If no default is
given, then the user must provide an answer. With a default,
just hitting <Enter> is sufficient to choose.
"""
prompt_bits = []
answer_from_valid_choice = {
# <valid-choice>: <answer-without-&>
}
clean_answers = []
for answer in answers:
if '&' in answer and not answer.index('&') == len(answer) - 1:
head, _, tail = answer.partition('&')
prompt_bits.append(head.lower() + '(' + tail[0].lower() + ')' + tail[1:].lower())
clean_answer = head + tail
shortcut = tail[0].lower()
else:
prompt_bits.append(answer.lower())
clean_answer = answer
shortcut = None
if default is not None and clean_answer.lower() == default.lower():
prompt_bits[-1] += " (default)"
answer_from_valid_choice[clean_answer.lower()] = clean_answer
if shortcut:
answer_from_valid_choice[shortcut] = clean_answer
clean_answers.append(clean_answer.lower())
# This is what it will look like:
# Frob nots the zids? [Yes (default), No, quit] _
# Possible alternatives:
# Frob nots the zids -- Yes, No, quit? [y] _
# Frob nots the zids? [*Yes*, No, quit] _
# Frob nots the zids? [_Yes_, No, quit] _
# Frob nots the zids -- (y)es, (n)o, quit? [y] _
prompt = " [%s] " % ", ".join(prompt_bits)
leader = question + prompt
if len(leader) + max(len(c) for c in answer_from_valid_choice.keys() + ['']) > 78:
leader = question + '\n' + prompt.lstrip()
leader = leader.lstrip()
valid_choices = answer_from_valid_choice.keys()
if clean_answers:
admonishment = "*** Please respond with '%s' or '%s'. ***" \
% ("', '".join(clean_answers[:-1]), clean_answers[-1])
while 1:
sys.stdout.write(leader)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in answer_from_valid_choice:
return answer_from_valid_choice[choice]
else:
sys.stdout.write("\n" + admonishment + "\n\n\n")
class ALIGN:
"""Use with Column class to specify the alignment mode of a column.
>>> tb = clt.Table(clt.Column('Column A', [1, 2, 3, 4, 5],
align=clt.ALIGN.LEFT),
clt.Column('Column B', [10, 20, 30, 40, 50],
align=clt.ALIGN.RIGHT))
>>> print tb
+----------+----------+
| Column A | Column B |
+----------+----------+
| 1 | 10 |
| 2 | 20 |
| 3 | 30 |
| 4 | 40 |
| 5 | 50 |
+----------+----------+
"""
LEFT, RIGHT = '-', ''
class Column():
"""A class that represents a column in a table.
Use with Table class to draw tables in a CLI.
Attributes:
data: List of numeric data stored in the column, where each element
corresponds to a row in the column.
name: Header of the column.
width: Column width in characters.
format: A specific format string for the elements of 'data'.
Default format is '%.6g'.
"""
def __init__(self, name, data, align=ALIGN.RIGHT, fmt='%.6g'):
self.data = [fmt % x for x in data]
self.name = name
self.width = max(len(x) for x in self.data + [name])
self.format = ' %%%s%ds ' % (align, self.width)
class Table:
"""A class for drawing tabular numeric data in a CLI application.
>>> tb = clt.Table(clt.Column('Column A', [1, 2, 3, 4, 5]),
clt.Column('Column B', [10, 20, 30, 40, 50]))
>>> print tb
+----------+----------+
| Column A | Column B |
+----------+----------+
| 1 | 10 |
| 2 | 20 |
| 3 | 30 |
| 4 | 40 |
| 5 | 50 |
+----------+----------+
Attributes:
columns: A list of column objects corresponding
to the columns of the table.
length: Number of rows of the table.
"""
def __init__(self, *columns):
self.columns = columns
self.length = max(len(column.data) for column in columns)
def get_row(self, i=None):
for x in self.columns:
if i is None:
yield x.format % x.name
else:
yield x.format % x.data[i]
def get_line(self):
for x in self.columns:
yield '-' * (x.width + 2)
def join_n_wrap(self, char, elements):
return ' ' + char + char.join(elements) + char
def get_rows(self):
yield self.join_n_wrap('+', self.get_line())
yield self.join_n_wrap('|', self.get_row(None))
yield self.join_n_wrap('+', self.get_line())
for i in range(0, self.length):
yield self.join_n_wrap('|', self.get_row(i))
yield self.join_n_wrap('+', self.get_line())
def __str__(self):
return '\n'.join(self.get_rows())
class ProgressBar:
"""A class to draw a command line progress bar.
>>> pbar = clt.ProgressBar(totalWidth=30)
>>> print pbar
[ 0% ]
>>> pbar.updateAmount(40)
>>> print pbar
[########### 40% ]
>>> pbar.updateAmount(80)
>>> print pbar
[############80%####### ]
>>> pbar.updateAmount(100)
>>> print pbar
[###########100%#############]
Attributes:
min: Initial value of the progress. Default value is 0.
max: Final value of the progress, which corresponds to a complete task.
Default value is 100.
span: Length of the range for the progress value. i.e. max - min.
width: The number of steps of the progress bar.
amount: A value in the range [min..max] indicating the current progress.
"""
def __init__(self, minValue=0, maxValue=100, totalWidth=12):
self.progBar = "[]" # This holds the progress bar string
self.min = minValue
self.max = maxValue
self.span = maxValue - minValue
self.width = totalWidth
self.amount = 0 # When amount == max, we are 100% done
self.updateAmount(0) # Build progress bar string
def updateAmount(self, newAmount=0):
"""Sets the value of the current progress."""
if newAmount < self.min:
newAmount = self.min
if newAmount > self.max:
newAmount = self.max
self.amount = newAmount
# Figure out the new percent done, round to an integer
diffFromMin = float(self.amount - self.min)
percentDone = (diffFromMin / float(self.span)) * 100.0
percentDone = round(percentDone)
percentDone = int(percentDone)
# Figure out how many hash bars the percentage should be
allFull = self.width - 2
numHashes = (percentDone / 100.0) * allFull
numHashes = int(round(numHashes))
# build a progress bar with hashes and spaces
self.progBar = "[" + '#' * numHashes + ' ' * (allFull - numHashes) + "]"
# figure out where to put the percentage, roughly centered
percentPlace = (len(self.progBar) / 2) - len(str(percentDone))
percentString = str(percentDone) + "%"
# slice the percentage into the bar
self.progBar = (self.progBar[0:percentPlace] + percentString +
self.progBar[percentPlace + len(percentString):])
def __str__(self):
return str(self.progBar) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/clt.py | clt.py |
import argparse
import os
import glob
from apasvo.utils import futils
def filein(arg):
"""Determines whether an argument is a regular file or not
(e.g. a directory)."""
if not os.path.isfile(arg):
msg = "%r is not a regular file" % arg
raise argparse.ArgumentTypeError(msg)
return arg
def positive_float(arg):
"""Checks whether an argument is a positive float number or not."""
value = float(arg)
if value <= 0:
msg = "%r is not a positive float number" % arg
raise argparse.ArgumentTypeError(msg)
return value
def positive_int(arg):
"""Checks whether an argument is a positive integer number or not."""
value = int(arg)
if value <= 0:
msg = "%r is not a positive integer number" % arg
raise argparse.ArgumentTypeError(msg)
return value
def non_negative_int(arg):
"""Checks whether an argument is a non negative integer number or not."""
value = int(arg)
if value < 0:
msg = "%r is a negative integer number" % arg
raise argparse.ArgumentTypeError(msg)
return value
def percentile(arg):
"""Checks if an argument is a valid percentile.
A correct percentile value must be an integer value in the range 0..100.
"""
value = float(arg)
if value < 0 or value > 100:
msg = "%r is not a percentile" % arg
raise argparse.ArgumentTypeError(msg)
return value
def fraction(arg):
"""Determines if an argument is a number in the range [0,1)."""
value = float(arg)
if value < 0 or value > 1:
msg = "%r must be a value between [0,1)" % arg
raise argparse.ArgumentTypeError(msg)
return value
class GlobInputFilenames(argparse.Action):
"""Finds all the pathnames according to the specified filename arguments.
Expands a list of string arguments that represent pathnames. They can be
either absolute (e.g. /usr/bin/example.txt ) or relative pathnames
(e.g. ./examples/*.bin).
Returns a list containing an argparse.FileType object for each filename
that matches the pattern list.
"""
def __call__(self, parser, namespace, values, option_string=None):
fnames = []
for pname in values:
if '*' in pname or '?' in pname:
fnames.extend(glob.glob(pname))
else:
fnames.append(pname)
setattr(namespace, self.dest, fnames)
def _fopen(self, fname):
if futils.istextfile(fname):
ft = argparse.FileType('r')
else:
ft = argparse.FileType('rb')
return ft(fname)
class CustomArgumentParser(argparse.ArgumentParser):
"""Custom implementation of ArgumentParser class that supports
comments in argument files.
Every sequence of characters preceded by '#' is treated as a comment
until the end of the line.
"""
def __init__(self, *args, **kwargs):
super(CustomArgumentParser, self).__init__(*args, **kwargs)
def convert_arg_line_to_args(self, line):
for arg in line.split():
if not arg.strip():
continue
if arg[0] == '#':
break
yield arg | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/parse.py | parse.py |
import re
import shutil
import os
from struct import pack
# A function that takes an integer in the 8-bit range and returns
# a single-character byte object in py3 / a single-character string
# in py2.
#
_text_characters = (
b''.join(chr(i) for i in range(32, 127)) +
b'\n\r\t\f\b')
def istextfile(filename, blocksize=512):
""" Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than 30% of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file.
"""
with open(filename, 'rb') as fileobj:
block = fileobj.read(blocksize)
fileobj.seek(0)
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# An empty file is considered a valid text file
return True
# Use translate's 'deletechars' argument to efficiently remove all
# occurrences of _text_characters from the block
nontext = block.translate(None, _text_characters)
return float(len(nontext)) / len(block) <= 0.30
def is_little_endian():
"""Checks whether the current architecture is little-endian or not"""
if pack('@h', 1) == pack('<h', 1):
return True
return False
def read_in_chunks(file_object, chunk_size=1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k."""
while True:
data = file_object.read(int(chunk_size))
if data:
yield data
else:
return
def read_txt_in_chunks(file_object, n=1024, comments='#'):
"""Lazy function (generator) to read a text file in chunks.
Default chunk size: 1024 characters"""
numeric_pattern = r'[+-]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?'
data = []
for line in file_object.xreadlines():
line, _, _ = line.partition(comments) # Eliminate comments
data.extend(re.findall(numeric_pattern, line))
if len(data) >= n:
yield data[:n]
data = data[n:]
yield data
def getSize(f):
"""Gets the size of a file in bytes."""
f.seek(0, 2) # move the cursor to the end of the file
size = f.tell()
f.seek(0)
return size
def get_delimiter(fileobject, lines=16):
"""Infers the delimiter used in a text file containing a list of numbers.
The text file must contain on each line a list of numbers separated
by a delimiter character, e.g.:
# Example comment
12.5,10,12
30,5,3
3,5,0.5,2.3
In this case the function will return ',' as delimiter
Args:
fileobject: A text file like object.
lines: The maximum number of lines to be read from the beginning
of the file in order to detect the delimiter.
Returns:
A character corresponding to the delimiter detected.
An empty string if nothing was found.
"""
comment = r'\s*#.*'
integer = r'[+-]?\d+'
decimal = r'\d+(e[+-]\d+)?'
number = r'{integer}\.{decimal}'.format(integer=integer, decimal=decimal)
pattern = (r'{comment}|({number}((?P<sep>[\W]+){number})*({comment})?)'.
format(number=number, comment=comment))
delimiters = {}
for i in xrange(lines):
line = fileobject.readline()
if line == '':
break
else:
m = re.match(pattern, line)
if m:
delimiter = m.groupdict()['sep']
if delimiter:
if delimiter in delimiters:
delimiters[delimiter] += 1
else:
delimiters[delimiter] = 1
fileobject.seek(0)
if delimiters:
return max(delimiters, key=lambda x: delimiters[x])
else:
return ''
def get_sample_rate(filename, max_header_lines=64, comments='#'):
"""Search for a sample rate value in the header of a text file containing
a seismic signal.
Args:
filename: Name of a text file containing a seismic signal.
max_header_lines: Maximum number of lines to be read from the beginning
of the file in order to get the sample rate value.
comments: Character used to indicate the start of a comment
Returns:
out: Sample rate, in Hz.
None if no sample rate value is found in header.
"""
units = {'hz': 10.0 ** 0, 'khz': 10.0 ** 3,
'mhz': 10.0 ** 6, 'ghz': 10.0 ** 9}
pattern = r'sample\s+(rate|frequency).*?(?P<fs>\d+(\.\d*)?(e[+-]?\d+)?).*?(?P<unit>(ghz|mhz|khz|hz))'
with open(filename, 'r') as f:
for i in xrange(max_header_lines):
line = f.readline()
_, _, comment = line.partition(comments) # Select comments
if comment != '':
m = re.search(pattern, comment.lower())
if m:
fs = m.groupdict()['fs']
if fs:
return int(float(fs) * units[m.groupdict()['unit']])
return None
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy an entire directory tree.
The destination directory must not already exist.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/futils.py | futils.py |
from matplotlib import mlab
from scipy import signal
import numpy as np
SPECGRAM_WINDOWS = ("boxcar", "hamming", "hann", "bartlett",
'blackman', "blackmanharris")
SPECGRAM_WINDOWS_NAMES = ("Rectangular", "Hamming", "Hann", "Bartlett",
"Blackman", "Blackman-Harris")
def plot_specgram(ax, data, fs, nfft=256, noverlap=128, window='hann',
cmap='jet', interpolation='bilinear', rasterized=True):
if window not in SPECGRAM_WINDOWS:
raise ValueError("Window not supported")
elif window == "boxcar":
mwindow = signal.boxcar(nfft)
elif window == "hamming":
mwindow = signal.hamming(nfft)
elif window == "hann":
mwindow = signal.hann(nfft)
elif window == "bartlett":
mwindow = signal.bartlett(nfft)
elif window == "blackman":
mwindow = signal.blackman(nfft)
elif window == "blackmanharris":
mwindow = signal.blackmanharris(nfft)
specgram, freqs, time = mlab.specgram(data, NFFT=nfft, Fs=fs,
window=mwindow,
noverlap=noverlap)
specgram = 10 * np.log10(specgram[1:, :])
specgram = np.flipud(specgram)
freqs = freqs[1:]
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freqs[1] - freqs[0]) / 2.0
extent = (time[0] - halfbin_time, time[-1] + halfbin_time,
freqs[0] - halfbin_freq, freqs[-1] + halfbin_freq)
ax.imshow(specgram, cmap=cmap, interpolation=interpolation,
extent=extent, rasterized=rasterized)
ax.axis('tight')
def reduce_data(x, y, width, xmin=0, xmax=None):
"""Given x-axis data and y-axis data returns a smaller representation
of both datasets with a desired length for faster plotting.
Given a width value, which usually corresponds with the desired pixel width
of the plot, splits represented x-data range into 'width' partitions and
returns the (x,y) minimum and maximum data pairs for each partition.
Args:
x: x-axis data. Numpy array type.
y: y-axis data. Numpy array type.
width: Desired plot width, usually related to plot's pixel width.
xmin: Position of the first (x,y) data pair to be represented
xmax: Position of the last (x,y) data pair to be represented
Returns:
x_reduced: Reduced x-axis dataset.
y_reduced: Reduced y-axis dataset.
"""
if len(x) != len(y):
raise ValueError("x and y must have the same length.")
if not isinstance(x, np.ndarray):
x = np.array(x)
if not isinstance(y, np.ndarray):
y = np.array(y)
# Init xmax and xmin values
length = len(x)
xmax = xmax if xmax is not None else length - 1
xmax = min(length - 1, xmax if xmax > 0 else 0)
xmin = max(0, xmin if xmin < length else length - 1)
if xmin > xmax:
raise ValueError("xmax must be greater or equal than xmin")
n_points = 2 * width
data_size = xmax - xmin
# If the length of the datasets is too small returns the datasets
if data_size <= n_points:
return x[xmin:xmax + 1], y[xmin:xmax + 1]
indexes = np.empty(n_points + 2, dtype=int)
# Initial and final (x,y) pairs of the reduced data corresponds
# with the initial and final (x,y) values of the represented data
indexes[0], indexes[-1] = xmin, xmax
i = 1
limits = np.ceil(np.linspace(xmin, xmax, width + 1)).astype(int)
for j in xrange(int(width)):
left = limits[j]
right = limits[j + 1]
indexes[i] = left + y[left:right + 1].argmax(axis=0)
i += 1
indexes[i] = left + y[left:right + 1].argmin(axis=0)
i += 1
indexes.sort()
return x[indexes], y[indexes]
def adjust_axes_height(ax, max_value=None, min_value=None, margin=0.1):
max_values = [max_value] if max_value else []
min_values = [min_value] if min_value else []
for line in ax.lines:
try:
xdata = list(line.get_xdata())
ydata = list(line.get_ydata())
except TypeError:
continue
if len(xdata) == 2 and len(ydata) == 2:
# Check for horizontal lines and discard
if xdata == [0, 1] and ydata[0] == ydata[1]:
continue
# Check for vertical lines and discard
if ydata == [0, 1] and xdata[0] == xdata[1]:
continue
else:
max_values.append(max(ydata))
min_values.append(min(ydata))
if max_values and min_values:
maximum = max(max_values)
minimum = min(min_values)
margin_height = (maximum - minimum) * margin
ax.set_ylim(minimum - margin_height, maximum + margin_height) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/plotting.py | plotting.py |
import numpy as np
from apasvo.utils import futils
format_binary = 'binary'
format_text = 'text'
datatype_int16 = 'int16'
datatype_int32 = 'int32'
datatype_int64 = 'int64'
datatype_float16 = 'float16'
datatype_float32 = 'float32'
datatype_float64 = 'float64'
byteorder_little_endian = 'little-endian'
byteorder_big_endian = 'big-endian'
byteorder_native = 'native'
class RawFile(object):
"""An abstract class representing a binary or plain text file."""
_datatypes = {datatype_float16: 'f2',
datatype_float32: 'f4',
datatype_float64: 'f8',
datatype_int16: 'i2',
datatype_int32: 'i4',
datatype_int64: 'i8', }
_byteorders = {byteorder_little_endian: '<',
byteorder_big_endian: '>',
byteorder_native: '=', }
def __init__(self):
super(RawFile, self).__init__()
def read(self):
raise NotImplementedError
def read_in_blocks(self, block_size):
raise NotImplementedError
def write(self, array):
raise NotImplementedError
class BinFile(RawFile):
"""A binary file.
Data type and byte-order info must be known in advance in order
to read the data correctly.
Attributes:
dtype: Type of the data stored in the file.
filename: Name of the file.
"""
def __init__(self, filename, dtype='float64', byteorder='native'):
"""Inits a BinFile object.
Args:
filename: Name of the file.
dtype: Data-type of the data stored in the file.
Possible values are: 'float16', 'float32' and 'float64'.
Default value is 'float64'.
byteorder: Byte-order of the data stored in the file.
Possible values are: 'little-endian', 'big-endian' and 'native'.
Default value is 'native'.
"""
super(BinFile, self).__init__()
self.dtype = np.dtype(self._byteorders[byteorder] + self._datatypes[dtype])
self.filename = filename
def read(self, **kwargs):
"""Constructs a numpy array from the data stored in the file.
Data-type and byte-order of the returned array are the object's same.
"""
return np.fromfile(self.filename, dtype=self.dtype)
def read_in_blocks(self, block_size=1024):
"""Lazy function (generator) that reads a binary file in chunks.
Default chunk size is 1k.
Data-type and byte-order of the returned data are the object's same.
"""
with open(self.filename, 'rb') as f:
chunk_size = block_size * self.dtype.itemsize
for data in futils.read_in_chunks(f, chunk_size):
yield np.frombuffer(data, dtype=self.dtype)
def write(self, array, **kwargs):
"""Stores an array into the binary file."""
if array.dtype != np.dtype(self.dtype):
return array.astype(self.dtype).tofile(self.filename)
return array.tofile(self.filename)
class TextFile(RawFile):
"""A plain text file containing numeric data.
Attributes:
dtype: Once data is read from file, this will be the data type
of the resulting array.
filename: Name of the file.
"""
def __init__(self, filename, dtype='float64', byteorder='native'):
"""Inits a TextFile object.
Args:
filename: Name of the file.
dtype: Data-type of the array data returned.
Possible values are: 'float16', 'float32' and 'float64'.
Default value is 'float64'.
byteorder: Byte-order of the array data returned.
Possible values are: 'little-endian', 'big-endian' and 'native'.
Default value is 'native'.
"""
super(TextFile, self).__init__()
self.dtype = np.dtype(self._byteorders[byteorder] + self._datatypes[dtype])
self.filename = filename
def read(self, **kwargs):
"""Constructs a numpy array from the data stored in the file.
Data-type and byte-order of the returned array are the object's same.
The following arguments are taken from the documentation
of the numpy function 'loadtxt'.
Args:
dtype: Data-type of the resulting array.
comments: String indicating the start of a comment.
Default: '#'.
delimiter: String used to separate values.
Default: ' '.
"""
return np.loadtxt(self.filename, dtype=self.dtype, **kwargs)
def read_in_blocks(self, block_size=1024):
"""Lazy function (generator) that reads a text file in chunks.
Default chunk size is 1k characters.
Data-type and byte-order of the returned data are the object's same.
"""
with open(self.filename, 'r') as f:
for data in futils.read_txt_in_chunks(f, block_size):
yield np.array(data, dtype=self.dtype)
def write(self, array, **kwargs):
"""Stores an array into the text file.
The following arguments are taken from the
documentation of the numpy function 'savetxt'.
Args:
fmt: A string format.
Default value is '%.18e'.
delimiter: Character separating columns.
Default: ' '.
newline: Character separating lines.
Default: '\n'.
header: String that will be written at the beginning
of the file. Default: ''.
footer: String that will be written at the end of the file.
Default: ''.
comments: String that will be prepended to header and footer
to mark them as comments. Default: '# '.
"""
return np.savetxt(self.filename, array, **kwargs)
def get_file_handler(filename, fmt='', dtype='float64', byteorder='native', **kwargs):
"""Gets a handler for a binary or text file.
Args:
filename: name of the file.
fmt: The format of the data file to read.
Possible values are 'binary', 'text' or ''.
If '' is selected, the function will detect whether the file
is a binary or a text file.
dtype: Data-type of the numeric data stored in the file.
Possible values are 'int16', 'int32', 'int64', 'float16', 'float32'
and 'float64'. Default value is 'float64'.
byteorder: Byte-order of the numeric data stored in the file.
Possible values are 'little-endian', 'big-endian' and 'native'.
Default value is 'native'.
Returns:
A BinFile or TextFile object, depending of 'fmt'.
"""
if isinstance(filename, file):
filename = filename.name
formats = [format_binary, format_text]
if fmt not in formats:
fmt = format_text if futils.istextfile(filename) else format_binary
if fmt == format_text:
return TextFile(filename, dtype=dtype, byteorder=byteorder)
else:
return BinFile(filename, dtype=dtype, byteorder=byteorder) | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/formats/rawfile.py | rawfile.py |
import numpy as np
import struct
import datetime
#HEADER FIELDS
HEADER_FLOAT_FIELDS = (
'DELTA', 'DEPMIN', 'DEPMAX', 'SCALE', 'ODELTA',
'B', 'E', 'O', 'A', 'INTERNAL',
'T0', 'T1', 'T2', 'T3', 'T4',
'T5', 'T6', 'T7', 'T8', 'T9',
'F', 'RESP0', 'RESP1', 'RESP2', 'RESP3',
'RESP4', 'RESP5', 'RESP6', 'RESP7', 'RESP8',
'RESP9', 'STLA', 'STLO', 'STEL', 'STDP',
'EVLA', 'EVLO', 'EVEL', 'EVDP', 'MAG',
'USER0', 'USER1', 'USER2', 'USER3', 'USER4',
'USER5', 'USER6', 'USER7', 'USER8', 'USER9',
'DIST', 'AZ', 'BAZ', 'GCARC', 'INTERNAL',
'INTERNAL', 'DEPMEN', 'CMPAZ', 'CMPINC', 'XMINIMUM',
'XMAXIMUM', 'YMINIMUM', 'YMAXIMUM', 'UNUSED', 'UNUSED',
'UNUSED', 'UNUSED', 'UNUSED', 'UNUSED', 'UNUSED',
)
HEADER_INTEGER_FIELDS = (
'NZYEAR', 'NZJDAY', 'NZHOUR', 'NZMIN', 'NZSEC',
'NZMSEC', 'NVHDR', 'NORID', 'NEVID', 'NPTS',
'INTERNAL', 'NWFID', 'NXSIZE', 'NYSIZE', 'UNUSED',
'IFTYPE', 'IDEP', 'IZTYPE', 'UNUSED', 'IINST',
'ISTREG', 'IEVREG', 'IEVTYP', 'IQUAL', 'ISYNTH',
'IMAGTYP', 'IMAGSRC', 'UNUSED', 'UNUSED', 'UNUSED',
'UNUSED', 'UNUSED', 'UNUSED', 'UNUSED', 'UNUSED',
)
HEADER_LOGICAL_FIELDS = (
'LEVEN', 'LPSPOL', 'LOVROK', 'LCALDA', 'UNUSED',
)
HEADER_ALPHANUMERIC_FIELDS = (
'KSTNM', 'KEVNM0', 'KEVNM1',
'KHOLE', 'KO', 'KA',
'KT0', 'KT1', 'KT2',
'KT3', 'KT4', 'KT5',
'KT6', 'KT7', 'KT8',
'KT9', 'KF', 'KUSER0',
'KUSER1', 'KUSER2', 'KCMPNM',
'KNETWK', 'KDATRD', 'KINST',
)
class SACFile(object):
"""A SAC file.
Attributes:
filename: Name of the file
"""
def __init__(self):
"""Inits a SACFile object.
Args:
filename: Name of the file.
"""
super(SACFile, self).__init__()
self.byte_order = '>'
self.header = {'NPTS': -12345,
'NVHDR': 6,
'B': -12345.0,
'E': -12345.0,
'IFTYPE': 1,
'LEVEN': True,
'DELTA': -12345.0}
self.data = np.array([], dtype='float64')
self.time = np.array([], dtype='datetime64')
def read(self, fp, **kwargs):
try:
file_in = open(fp, 'rb')
except:
#Assume fp is a file-like object
file_in = fp
header = file_in.read(158 * 4) #Header length is 158 words
#Check header version & byte order
NVHDR = struct.unpack(">i", header[76 * 4:77 * 4])[0]
self.byte_order = "<" if NVHDR > 6 else ">"
#Read float fields
data = struct.unpack("%s70f" % self.byte_order, header[:70 * 4])
for field_name, field in zip(HEADER_FLOAT_FIELDS, data):
if field_name != 'UNUSED':
self.header[field_name] = field
#Read integer fields
data = struct.unpack("%s35i" % self.byte_order, header[70 * 4:105 * 4])
for field_name, field in zip(HEADER_INTEGER_FIELDS, data):
if field_name != 'UNUSED':
self.header[field_name] = field
#Read logical fields
data = struct.unpack("%s5i" % self.byte_order, header[105 * 4:110 * 4])
for field_name, field in zip(HEADER_LOGICAL_FIELDS, data):
if field_name != 'UNUSED':
self.header[field_name] = bool(field)
#Read alphanumeric fields
data = [str(header[n * 4:(n + 2) * 4]) for n in range(110, 158, 2)]
for field_name, field in zip(HEADER_ALPHANUMERIC_FIELDS, data):
if field_name != 'UNUSED':
self.header[field_name] = field.replace('\x00', '')
#Concatenate KEVNM (see IRIS format)
self.header['KEVNM'] = "%s%s" % (self.header['KEVNM0'], self.header['KEVNM1'])
del self.header['KEVNM0']
del self.header['KEVNM1']
#Read Data Section
data = file_in.read(self.header['NPTS'] * 4)
self.data = np.array(struct.unpack("%s%sf" % (self.byte_order, self.header['NPTS']), data),
dtype='float64')
#Create time vector
start_time = datetime.datetime.strptime("%s%s%s%s%s%s" % (self.header['NZYEAR'],
self.header['NZJDAY'],
self.header['NZHOUR'],
self.header['NZMIN'],
self.header['NZSEC'],
self.header['NZMSEC'] * 1000),
"%Y%j%H%M%S%f")
end_time = start_time + datetime.timedelta(seconds = (self.header['DELTA'] * self.header['NPTS']))
step = datetime.timedelta(seconds = self.header['DELTA'])
self.time = np.arange(start_time, end_time, step)
def write(self, fp, **kwargs):
try:
file_out = open(fp, 'wb')
except:
#Assume fp is a file-like object
file_out = fp
#Store header
header = []
# Store float fields
unused = struct.pack("%sf" % self.byte_order, -12345.0)
for field in HEADER_FLOAT_FIELDS:
if field == 'UNUSED':
header.append(unused)
else:
header.append(struct.pack("%sf" % self.byte_order, self.header[field]))
# Store integer fields
unused = struct.pack("%si" % self.byte_order, -12345)
for field in HEADER_INTEGER_FIELDS:
if field == 'UNUSED':
header.append(unused)
else:
header.append(struct.pack("%si" % self.byte_order, self.header[field]))
# Store logical fields
unused = struct.pack("%si" % self.byte_order, False)
for field in HEADER_LOGICAL_FIELDS:
if field == 'UNUSED':
header.append(unused)
else:
header.append(struct.pack("%si" % self.byte_order, self.header[field]))
# Store alphanumeric fields
for field in HEADER_ALPHANUMERIC_FIELDS:
if field == 'KEVNM0':
header.append(struct.pack("%s16s" % self.byte_order, self.header['KEVNM']))
elif field == 'KEVNM1':
pass
else:
header.append(struct.pack("%s8s" % self.byte_order, self.header[field]))
#Store data section
data = self.data.astype('%sf' % self.byte_order).tostring()
file_out.write("%s%s" % (''.join(header), data))
file_out.flush() | APASVO | /APASVO-0.0.6.tar.gz/APASVO-0.0.6/apasvo/utils/formats/sacfile.py | sacfile.py |
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from lmfit import Model
from copy import deepcopy
from apav.utils import validate
from apav.core.range import Range
from apav.utils.helpers import intervals_intersect
from apav.analysis import models
import apav.utils.constants
import numpy as n
from numpy import ndarray
from lmfit.minimizer import MinimizerResult
from lmfit.models import PowerLawModel
settings = {"method": "least_squares", "nan_policy": "raise", "xtol": 1e-7, "ftol": 1e-7}
class Background:
"""
Defines a background model through a specified range of a mass spectrum.
"""
def __init__(
self,
fit_interval: Union[Tuple[Number, Number], Sequence[Tuple[Number, Number]]],
include_interval: Union[Tuple[Number, Number], Sequence[Tuple[Number, Number]]] = (),
model: Type[Model] = models.PowerLawShiftModel(),
):
"""
A background is fit on the provided fit_interval(s). Intervals provided to `include_interval` define
ranges where mass spectrum ranges will use the background model for background subtraction. For example:
>>> bkg = Background((10, 12), (13, 20), models.PowerLawShiftModel())
Fits a shifted power law on the interval from 10 -> 12 Da. The background will be subtracted and applied to
any mass ranges that begin in the interval from 13 -> 20 Da.
>>> bkg = Background([(10, 12), (15, 16)], [(13, 20), (22, 24)], models.PowerLawShiftModel())
Will fit the same power law model on 2 intervals from 10 -> 12 and 15 -> 16 Da. This background will be
subtracted and applied to any mass ranges that begin in either of the intervals 13 ->20 Da or 22 -> 24 Da. This
allows simple control over which background applies to which mass ranges, without being too explicit.
:param fit_interval: single or multiple intervals on the x to fit the background model
:param include_interval: mass ranges to apply the background subtraction to
:param model: the background model to be used
"""
if not isinstance(model, Model):
raise TypeError(f"Background model must be an lmfit Model, not {type(model)}.")
self.model = model
self._include_intervals = []
self._fit_intervals = []
self._area = None
assert len(fit_interval) > 0, "Background must be initialized with valid fit range"
if hasattr(fit_interval[0], "__iter__"):
for i in fit_interval:
validate.positive_interval(i)
self.add_fit_interval(i)
else:
self.add_fit_interval(validate.positive_interval(fit_interval))
if len(include_interval) > 0:
if hasattr(include_interval[0], "__iter__"):
for intv in include_interval:
validate.positive_interval(intv)
self.add_include_interval(intv)
else:
self.add_include_interval(validate.positive_interval(include_interval))
self._fit_results = None
@property
def fit_intervals(self) -> List[Tuple[Number, Number]]:
"""
Get the fit intervals assigned to this background
"""
return self._fit_intervals
@property
def lower(self) -> Number:
"""
Get the lower value of the fit interval
"""
return min(i[0] for i in self.fit_intervals)
@property
def upper(self) -> Number:
"""
Get the upper value of the fit interval
"""
return max(i[1] for i in self.fit_intervals)
@property
def area(self) -> Number:
"""
Get the area of the fit
"""
assert self._area is not None, "Background that has not been fit has no area"
return self._area
@property
def fit_results(self) -> MinimizerResult:
"""
Get the fit results of the background
"""
return self._fit_results
def eval(self, x: ndarray) -> ndarray:
"""
Compute the background curve on the given interval x
:return: the background estimate on the given range
"""
assert self.fit_results is not None, "Background must be fit before it can be evaluated."
return self.fit_results.eval(x=x)
def contains_mass(self, value) -> bool:
"""
Determine if this background contains the mass/charge ratio in any include range interval [min-max)
"""
for imin, imax in self._include_intervals:
if imin <= value < imax:
return True
return False
def contains_range(self, rng: Range) -> bool:
"""
Determine if a Range falls within this Background's include range
:param rng: Range instance
"""
return self.contains_mass(rng.lower)
@property
def include_intervals(self) -> List[Tuple[Number, Number]]:
"""
Get the list of intervals that assign ranges (:class:`Range`) to the background
"""
return self._include_intervals
def fit(self, spec_x: ndarray, spec_y: ndarray):
"""
Fit the background to the spectrum (spec_x, spec_y)
:param spec_x: array of x axis
:param spec_y: array of y axis
"""
x = n.array([])
y = n.array([])
for i in self.fit_intervals:
idx = n.argwhere((i[0] <= spec_x) & (i[1] > spec_x)).ravel()
x = n.concatenate((x, spec_x[idx]))
y = n.concatenate((y, spec_y[idx]))
params = self.model.guess(y, x=x)
try:
self._fit_results = self.model.fit(
y,
params=params,
x=x,
method=settings["method"],
nan_policy=settings["nan_policy"],
fit_kws={"xtol": settings["xtol"], "ftol": settings["ftol"]},
)
except Exception as e:
raise RuntimeError(
f"Fit failed on background with fit interval(s) {self.fit_intervals}, with error:\n\t{e}"
)
self._area = n.sum(self.fit_results.best_fit)
def add_fit_interval(self, minmax: Tuple[Number, Number]):
"""
Add fit interval to the background
:param minmax: the new fit interval
"""
validate.positive_interval(minmax)
if any(intervals_intersect(minmax, i) for i in self.fit_intervals):
raise validate.IntervalIntersectionError(f"Cannot have intersecting fit ranges ({minmax[0]}-{minmax[1]})")
self._fit_intervals.append(minmax)
def add_include_interval(self, minmax: Tuple[float, float]):
"""
Add a specified interval through which any containing mass ranges will be matched. This will occur
if the lower bound of a given mass range is contained within rng. Overlapping ranges are allowed.
"""
validate.positive_interval(minmax)
self._include_intervals.append(minmax)
def reset(self):
"""
Clear the current background fit and data
"""
# self.fit_coef = None
# self.fit_curve = None
self._fit_results = None
self._area = None
class BackgroundCollection:
"""
Handles operate on a collection of Backgrounds
"""
def __init__(self, backgrounds: Sequence[Background] = ()):
"""
This class is used to contain all backgrounds that operate on a spectrum/ROI.
>>> bkgs = BackgroundCollection()
>>> bkg1 = Background((10, 12), (13, 14))
>>> bkgs.add(bkg1)
or
>>> bkgs = BackgroundCollection()
>>> bkgs.add(Background((10, 12), (13, 14)), models.ExponentialModel)
>>> bkgs.add(Background((48, 52), (60, 65)), models.PowerLawShiftModel)
This will fit the first background model on the interval 10 -> 12 Da and apply to mass ranges begining from
13 -> 14 Da using an exponential decay model. The second background is fit from 48 -> 52 Da and is applied to
mass ranges beginning between 60 -> 65 Da, using a Power Law model.
"""
self._bkgs = []
self.__index = 0
for bkg in backgrounds:
self.add(bkg)
def __iter__(self):
self.__index = 0
return self
def __next__(self):
if len(self.backgrounds) == 0 or self.__index == len(self.backgrounds):
raise StopIteration
else:
self.__index += 1
return self.backgrounds[self.__index - 1]
def __len__(self):
return len(self.backgrounds)
def __str__(self):
bkgs = self.sorted()
retn = "Background collection\n"
retn += f"Number of backgrounds: {len(self)}\n"
for i, bkg in enumerate(bkgs):
retn += f"Background {i+1}:\n"
retn += f"\t{bkg.model.name}\n"
retn += f"\tFit interval: {bkg.lower}-{bkg.upper} Da\n"
for j, incl_rng in enumerate(bkg.include_intervals):
retn += f"\tInclude mass range {j+1}: {incl_rng[0]}-{incl_rng[1]} Da\n"
if bkg.fit_results is not None:
retn += f"\tRed chi^2: {bkg.fit_results.redchi}\n"
return retn
def __getitem__(self, item: int) -> Background:
return self._bkgs[item]
@property
def backgrounds(self) -> List[Background]:
"""
Get a list of backgrounds (:class:`Background`) in the collection
:return:
"""
return self._bkgs
def sort(self):
"""
Sort the BackgroundCollection in-place
"""
self._bkgs = sorted(self._bkgs, key=lambda x: x.lower)
def sorted(self) -> "BackgroundCollection":
"""
Get a sorted copy of the BackgroundCollection
"""
retn = deepcopy(self)
retn.sort()
return retn
def add(self, newbkg: Background) -> Background:
"""
Add a new :class:`Background` to the collection
:param newbkg: the new background
"""
if not isinstance(newbkg, Background):
raise TypeError("Expected a Background object, not {}".format(type(newbkg)))
for bkg in self._bkgs:
if _background_includes_overlap(newbkg, bkg):
raise RuntimeError(f"Cannot have overlapping include ranges between backgrounds.")
self._bkgs.append(newbkg)
return newbkg
@classmethod
def from_bkg(cls, fpath: str):
"""
Load background information from file (see BackgroundCollection.export)
"""
validate.file_exists(fpath)
raise NotImplementedError()
def export(self, fpath: str):
"""
Save background information to file for reuse
"""
validate.file_exists(fpath)
raise NotImplementedError()
def find_background(self, rng: Range):
"""
Find a background whos included mass ranges intersects a Range's mass range.
If no background is found return None
"""
for bkg in self._bkgs:
if bkg.contains_mass(rng.lower):
return bkg
return None
def reset(self):
"""
Clear the calculated data for each background
"""
for bkg in self._bkgs:
bkg.reset()
def fit(self, spec_x: ndarray, spec_y: ndarray):
"""
Do the calculation of each background with respect the provided mass spectrum
:param spec_x: array of x axis
:param spec_y: array of y axis
"""
self.reset()
for bkg in self.backgrounds:
bkg.fit(spec_x, spec_y)
def _background_includes_overlap(bkg1: Background, bkg2: Background) -> bool:
"""
Determine if two backgrounds' included ranges overlap
"""
for rng1 in bkg1.include_intervals:
for rng2 in bkg2.include_intervals:
if intervals_intersect(rng1, rng2) is True:
return True
return False | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/analysis/background.py | background.py |
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
from apav.analysis.base import AnalysisBase
from apav.utils import validate
from apav import Roi, RangeCollection, Ion
from apav.core.histogram import histogram2d_binwidth
from apav.core.multipleevent import get_mass_indices
from apav.core.isotopic import Element
from scipy.ndimage import gaussian_filter
import numpy as n
import multiprocessing as mp
from apav.analysis.grid_transfer import transfer as _transfer
def ion_transfer(X: n.ndarray, Y: n.ndarray, Z: n.ndarray, pos: n.ndarray, stddev3: Number) -> ndarray:
"""
Transfer an array of ion positions to a binned grid.
:param X: 3D array of x-coordinates of grid
:param Y: 3D array of y-coordinates of grid
:param Y: 3D array of z-coordinates of grid
:param pos: 2D array of positions
:param stddev3: 3sigma standard deviation of gaussian distribution
:return: 3D array of counts
"""
if len(pos.shape) != 2:
raise ValueError("Positions must be a 2D array")
if pos.size == 0:
raise ValueError("At least one ion position must be provided")
if any(len(i.shape) != 3 for i in [X, Y, Z]):
raise ValueError("All grid coordinate arrays must be three-dimensional")
validate.positive_number(stddev3)
if n.isclose(stddev3, 0):
binx = X[1, 0, 0] - X[0, 0, 0]
biny = Y[0, 1, 0] - Y[0, 0, 0]
binz = Z[0, 0, 1] - Z[0, 0, 0]
x_edge = n.concatenate([X[:, 0, 0] - binx / 2, [X[-1, 0, 0] + binx / 2]])
y_edge = n.concatenate([Y[0, :, 0] - biny / 2, [Y[0, -1, 0] + biny / 2]])
z_edge = n.concatenate([Z[0, 0, :] - binz / 2, [Z[0, 0, -1] + binz / 2]])
counts, _ = n.histogramdd(pos, bins=(x_edge, y_edge, z_edge))
return counts
else:
return _transfer(
X.astype(n.double), Y.astype(n.double), Z.astype(n.double), pos.astype(n.double), float(stddev3)
)
def make_coordinate_grids(
extents: Sequence[Tuple[Number, Number]], bin_width: Union[Sequence[Number], Number], edges=False
) -> Tuple[ndarray, ndarray, ndarray]:
"""
Generate 3D x/y/z coordinate arrays for indexing into compositional grids
:param extents: The x/y/z extent to generate the grids for
:param bin_width: The bin width of each bin, a single number or sequence of numbers for each dimension
:param edges: Whether the coordinates represent the edges of the bins or centers
"""
assert len(extents) == 3
for i in extents:
validate.interval(i)
assert all(len(i) == 2 for i in extents)
if hasattr(bin_width, "__iter__"):
assert len(bin_width) == 3
if isinstance(bin_width, (float, int)):
bin_width = [
bin_width,
] * 3
bin_width = [float(i) for i in bin_width]
validate.all_positive_nonzero(bin_width)
ext_x, ext_y, ext_z = extents
dx = n.abs(n.diff(ext_x)[0])
dy = n.abs(n.diff(ext_y)[0])
dz = n.abs(n.diff(ext_z)[0])
nx = int(n.ceil(dx / bin_width[0]))
ny = int(n.ceil(dy / bin_width[1]))
nz = int(n.ceil(dz / bin_width[2]))
x = n.array([ext_x[0] + i * bin_width[0] for i in range(nx)])
y = n.array([ext_y[0] + i * bin_width[1] for i in range(ny)])
z = n.array([ext_z[0] + i * bin_width[2] for i in range(nz)])
if x[-1] % 1 == 0:
x = n.concatenate([x, x[-1] + [bin_width[0]]])
y = n.concatenate([y, y[-1] + [bin_width[1]]])
z = n.concatenate([z, z[-1] + [bin_width[2]]])
if edges is True:
x -= bin_width[0] / 2
y -= bin_width[1] / 2
z -= bin_width[2] / 2
x = n.concatenate([x, [x[-1] + bin_width[0]]])
y = n.concatenate([y, [y[-1] + bin_width[1]]])
z = n.concatenate([z, [z[-1] + bin_width[2]]])
return n.meshgrid(x, y, z, indexing="ij")
class RangedGrid(AnalysisBase):
"""
Compute the ionic and elemental composition spatially distributed among a structured grid
"""
def __init__(
self,
roi: Roi,
ranges: RangeCollection,
bin_width: Number = 1,
first_pass: bool = True,
delocalization: Union[Number, Sequence[Number]] = n.array([3, 3, 1.5]),
gauss_trunc: Number = 4,
):
"""
:param roi: Parent the RangedGrid is competed on
:param ranges: RangeCollection defining the ranges
:param bin_width: symmetric bin width size
:param first_pass: Whether the first pass delocalization is computed using a gaussian transfer function.
:param delocalization: The delocalization distances (as 3 standard deviations of a normal distribution)
:param gauss_trunc: Number of standard deviations to truncate the gaussian kernel for second pass delocalization
"""
super().__init__(roi)
self._ranges = validate.is_type(ranges, RangeCollection)
self._voxel = float(validate.positive_nonzero_number(bin_width))
if isinstance(delocalization, Real):
self._delocalization = n.array([delocalization])
else:
self._delocalization = n.array(delocalization)
if len(self._delocalization.shape) == 1 and self._delocalization.shape[0] == 1:
self._delocalization = n.ones(3) * self._delocalization[0]
if not all(i > 0 for i in self._delocalization):
raise ValueError("Delocalization distances must be positive and non-zero")
if self._delocalization.shape[0] != 3:
raise ValueError(f"Unexpected delocalization shape, expected 3 got {self._delocalization.shape[0]}")
self._gauss_trunc = validate.positive_nonzero_number(gauss_trunc)
self._X = ndarray([])
self._Y = ndarray([])
self._Z = ndarray([])
self._ion_counts = {}
self._elem_counts_array = ndarray([])
self._elem_frac = {}
self._elem_counts = {}
self._elem_cum_counts = None
self._first_pass = first_pass
self._calculate()
@property
def ranges(self) -> RangeCollection:
"""
The ranges used for ranging the mass spectrum
"""
return self._ranges
@property
def extents(self) -> Tuple[Tuple[float, float], Tuple[float, float], Tuple[float, float]]:
"""
Get the spatial extents (by center positions) of the grids
"""
return (
(self._X.min(), self._X.max()),
(self._Y.min(), self._Y.max()),
(self._Z.min(), self._Z.max()),
)
@property
def first_pass(self) -> bool:
"""
Whether to compute first pass delocalization
"""
return self._first_pass
@property
def centers(self) -> Tuple[ndarray, ndarray, ndarray]:
"""
The center positions of the structured grids
For MxNxP voxels this returns 3 arrays of dimensions: Mx1x1, 1xNx1, 1x1xP
"""
return self._X, self._Y, self._Z
@property
def bin_width(self) -> float:
"""
Bin width of the voxels
"""
return self._voxel
@property
def delocalization(self) -> ndarray:
"""
Amount of smoothing used during the delocalization process
"""
return self._delocalization
@property
def gauss_trunc(self) -> Number:
"""
Where to truncate the gaussian kernel for second pass delocalization
"""
return self._gauss_trunc
@property
def all_ionic_counts(self) -> Dict[Ion, ndarray]:
"""
Get all ionic count grids in a dict
"""
return self._ion_counts
@property
def all_elemental_frac(self) -> Dict[Element, ndarray]:
"""
Get all elemental fraction grids as a dict
"""
return self._elem_frac
@property
def all_elemental_frac_str(self) -> Dict[str, ndarray]:
"""
Get all elemental fraction grids as a dictionary (using elemental symbols)
"""
return {i.symbol: j for i, j in self._elem_frac.items()}
@property
def elemental_counts_total(self) -> Number:
"""
Get the total (sum) of all elemental counts
"""
return self._elem_cum_counts
@property
def elemental_counts_grid(self) -> ndarray:
"""
Get an array of the cumulative elemental counts in each bin
"""
return self._elem_counts_array
def ionic_counts(self, ion: Ion) -> ndarray:
"""
Get a single ionic counts grid
:param ion: The ion of the grid to return
"""
if ion not in self.all_ionic_counts.keys():
raise ValueError("Ion {} does not exist in the RangedGrid".format(ion.hill_formula))
return self.all_ionic_counts[ion]
def elemental_frac(self, element: Union[str, Element]) -> ndarray:
"""
Get a single elemental fraction grid
:param element: the elemental of the grid to return (Element or str)
"""
if isinstance(element, str):
el = None
for i, j in self.all_elemental_frac.items():
if i.symbol == element:
el = i
break
return self.all_elemental_frac[el]
elif isinstance(element, Element):
return self.all_elemental_frac[element]
else:
raise TypeError("Expected elemental symbol string or Element type, got {} instead".format(type(element)))
def _calculate(self):
"""
Compute the ranged grids
"""
dims = self.roi.dimensions
n_voxels = n.ceil(dims / self.bin_width).ravel().astype(int)
dx, dy, dz = self.roi.xyz_extents
range_elems = self.ranges.elements()
self._ion_counts = {i.ion: n.zeros(n_voxels) for i in self.ranges.ranges}
r = self.bin_width / 2
X, Y, Z = make_coordinate_grids(self.roi.xyz_extents, self.bin_width)
self._X = X
self._Y = Y
self._Z = Z
if not self.first_pass:
pass1_3sigma = 0
stddev = self.delocalization / 3
else:
pass1_3sigma = self.bin_width / 2
stddev = n.sqrt((self.delocalization / 3) ** 2 - n.tile(pass1_3sigma / 3, 3) ** 2)
stddev_vox = stddev / self.bin_width
init_counts = []
final_counts = []
def ranged_xyz(rng):
low, up = rng.interval
idx = n.argwhere((self.roi.mass >= low) & (self.roi.mass < up)).ravel()
init_counts.append(idx.shape[0])
return self.roi.xyz[idx].astype(float)
N = len(self.ranges)
nproc = min(N, mp.cpu_count())
if self.first_pass:
result = [ion_transfer(X, Y, Z, ranged_xyz(i), pass1_3sigma) for i in self.ranges]
else:
result = []
for i, rng in enumerate(self.ranges):
coords = ranged_xyz(rng)
counts, _ = n.histogramdd(coords, bins=n_voxels)
result.append(counts)
for i, data in zip(self.ranges, result):
final_counts.append(n.sum(data))
nan = n.count_nonzero(n.isnan(data))
if nan > 0:
raise ArithmeticError(
"NaNs encountered during first pass delocalization, try picking a different bin width"
)
self._ion_counts[i.ion] += gaussian_filter(
data,
sigma=stddev_vox,
# mode="constant",
truncate=self.gauss_trunc,
)
self._elem_frac = {i: 0 for i in range_elems}
self._elem_counts = {i: 0 for i in range_elems}
elem_counts = self._elem_counts
for ion, counts in self._ion_counts.items():
for elem, mult in ion.comp_dict.items():
elem_counts[elem] += mult * counts
self._elem_counts_array = sum(array for array in elem_counts.values())
norm = sum(i for i in elem_counts.values())
self._elem_cum_counts = norm
for key in elem_counts.keys():
ary = elem_counts[key]
self._elem_frac[key] = n.divide(ary, norm, where=ary > 0)
class DensityHistogram(AnalysisBase):
"""
Compute density histograms on an Roi
"""
def __init__(self, roi: Roi, bin_width=0.3, axis="z", multiplicity="all"):
"""
:param roi: region of interest
:param bin_width: width of the bin size in Daltons
:param axis: which axis the histogram should be computed on ("x", "y", or "z")
:param multiplicity: the multiplicity order to compute histogram with
"""
super().__init__(roi)
self.bin_width = validate.positive_nonzero_number(bin_width)
self._multiplicity = validate.multiplicity_any(multiplicity)
if multiplicity != "all":
roi.require_multihit_info()
self._histogram = None
self._histogram_extents = None
self._axis = validate.choice(axis, ("x", "y", "z"))
self._bin_vol = None
self._calculate_histogram()
@property
def multiplicity(self) -> Union[str, int]:
return self._multiplicity
@property
def bin_vol(self) -> Number:
return self._bin_vol
@property
def axis(self) -> str:
return self._axis
@property
def histogram(self) -> ndarray:
return self._histogram
@property
def histogram_extents(self) -> ndarray:
return self._histogram_extents
def _calculate_histogram(self):
orient_map = {"x": 0, "y": 1, "z": 2}
ax1, ax2 = (self.roi.xyz[:, val] for key, val in orient_map.items() if key != self.axis)
ext_ax1, ext_ax2 = (self.roi.xyz_extents[val] for key, val in orient_map.items() if key != self.axis)
ext = (ext_ax1, ext_ax2)
if self.multiplicity == "all":
self._histogram = histogram2d_binwidth(ax1, ax2, ext, self.bin_width)
else:
idx = get_mass_indices(self.roi.misc["ipp"], self.multiplicity)
self._histogram = histogram2d_binwidth(ax1[idx], ax2[idx], ext, self.bin_width)
self._histogram_extents = ext | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/analysis/spatial.py | spatial.py |
from collections import defaultdict, OrderedDict
from copy import deepcopy
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
import numpy as n
from tabulate import tabulate
from lmfit.models import PowerLawModel
from apav.core.multipleevent import MultipleEventExtractor, get_mass_indices
from apav.core.histogram import histogram2d_binwidth
from apav import RangeCollection, Range
from apav.core.roi import DummyRoiHistogram, Roi
from apav.visualization import plotting
from apav.analysis.base import AnalysisBase
from apav.utils import validate
from apav.analysis.background import BackgroundCollection, Background
from apav.utils.logging import log
class CorrelationHistogram(AnalysisBase):
"""
Statistical analysis of the correlation evaporation by ion pair histograms
"""
def __init__(
self,
roi: Roi,
extents: Tuple[Tuple, Tuple] = ((0, 200), (0, 200)),
bin_width: Number = 0.1,
multiplicity: Union[int, str] = 2,
symmetric: bool = False,
flip: bool = False,
):
"""
Correlation histograms are computed by forming all ion pairs in given multiple-events. The correlation histogram
may be computed using any multiplicity "order", for example creating histograms from the 2nd and 5th order
multiple events:
>>> roi = Roi.from_epos("path_to_epos_file.epos")
>>> corr_2 = CorrelationHistogram(roi, multiplicity=2)
>>> corr_5 = CorrelationHistogram(roi, multiplicity=5)
The ions from higher order multiplicities are separated into ion pair combinations. For example a 5th order
multiple event composed of 5 ions, ABCDE is separated in the 10 ion pairs: AB AC AD AE BC BD BE CD CE DE. See
the MultipleEventExtractor class for more detail.
The histogram may also be formed from the ion pairs for of all multiple events combined together. This is
achieved by passing the value "multiples" to the multiplicity keyword, indicating all ion pairs should be used:
>>> roi = Roi.from_epos("path_to_epos_file.epos")
>>> corr_2 = CorrelationHistogram(roi, multiplicity="multiples")
:param roi: region of interest
:param extents: x and y extents for the mass_histogram to be calculated
:param bin_width: bin width in daltons
:param multiplicity: the multiplicity to compute the histogram with
:param symmetric: make the mass_histogram symmetric across the diagonal
:param flip: flip the histogram along its diagonal (i.e. swap the ion1/ion2 axes)
"""
super().__init__(roi)
self.roi.require_multihit_info()
self._update_suppress = True
# Parameters
self.extents = extents
self.multiplicity = validate.multiplicity_non_singles(multiplicity)
self.bin_width = bin_width
self.symmetric = symmetric
self.flip = flip
self._histogram = None
self._pairs = None
self._pair_idx = None
self._mevent_extractor = None
self._update_suppress = False
self._process()
@property
def histogram(self) -> ndarray:
"""
Get the raw histogram
"""
return self._histogram
@property
def symmetric(self) -> bool:
"""
Whether or not the histogram is symmetrized
Returns:
"""
return self._symmetric
@symmetric.setter
def symmetric(self, new: bool):
"""
Set the histogram symmetry
"""
self._symmetric = validate.boolean(new)
self._process()
@property
def bin_width(self) -> float:
"""
Get the bin width in Da
"""
return self._bin_width
@bin_width.setter
def bin_width(self, new: float):
"""
Set the bin width
"""
self._bin_width = validate.positive_nonzero_number(new)
self._process()
@property
def extents(self) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""
Get the histogram boundaries as ((ion1_min, ion1_max), (ion2_min, ion2_max))
"""
return self._extents
@extents.setter
def extents(self, new: Tuple[Tuple, Tuple]):
"""
Set the boundaries of the histogram
"""
self._extents = validate.positive_interval_2d(new)
self._process()
@property
def multiplicity(self) -> Union[int, str]:
"""
Get the ion pair multiplicity
"""
return self._multiplicity
@multiplicity.setter
def multiplicity(self, new: Union[str, int]):
"""
Set the multiplicity of the histogram
"""
self._multiplicity = validate.multiplicity_non_singles(new)
self._process()
@property
def flip(self) -> bool:
"""
Whether the histogram was flipped (transposed)
"""
return self._flip
@flip.setter
def flip(self, new: bool):
"""
Set flip, whether the histogram has ion 1 on the y and ion 2 on the x
"""
self._flip = validate.boolean(new)
self._process()
@property
def event_extractor(self) -> MultipleEventExtractor:
"""
Get the MultipleEventExtractor used to filter the ion pairs
"""
return self._mevent_extractor
@property
def pairs(self) -> ndarray:
"""
Get all pairs in correlation histogram
"""
return self._pairs
def _process(self):
"""
Calculate the correlation mass_histogram
"""
# Dont calculate until the constructor is finished
if self._update_suppress is True:
return
self._mevent_extractor = MultipleEventExtractor(self.roi, self.multiplicity, self.extents)
if len(self._mevent_extractor.pairs) == 0:
self._histogram = n.array([])
return
self._pairs = self.event_extractor.pairs
mults = self._pairs
if self.symmetric is True:
mults = n.vstack((mults, mults[:, ::-1]))
rng_h, rng_v = self.extents
self._histogram = histogram2d_binwidth(mults[:, 0], mults[:, 1], (rng_h, rng_v), self.bin_width)
if self.flip is True:
self._histogram = self._histogram.T
def plot(self):
"""
Interactively view the histogram. This function is for exploring the dataset interactively, while maintaining
performance. Publication quality plots should be created in other software intended for plotting. See the
`export` function for saving the histogram in a format suitable for conventional plotting.
"""
widget = plotting.CorrelationHistogramPlot(self)
return widget
def export(self, path: str):
"""
Export the histogram as a text image, which should processable in most other applications.
:param path: Filepath for the image
"""
n.savetxt(path, self.histogram, delimiter=",")
class RangedMassSpectrum(AnalysisBase):
"""
Uncorrected ranged mass spectrum analysis
"""
def __init__(
self,
roi: Roi,
ranges: RangeCollection,
bin_width: Number = 0.01,
decompose: bool = False,
percent: bool = False,
upper: Number = 200,
multiplicity: Union[int, str] = "all",
):
"""
This class is used for performing mass spectrum quantification on uncorrected mass histograms. No background
subtractions are performed, see :class:`NoiseCorrectedMassSpectrum` and :class:`LocalBkgCorrectedMassSpectrum` for
different levels of background correction.
This computation does not use any histogram to perform its computation (the composition), however, the histogram
is computed for the sake of plotting the ranged mass spectrum in :meth:`RangedMassSpectrum.plot` and can be accessed from
:attr:`RangedMassSpectrum.histogram`
The mass ranges are provided as a :class:`RangeCollection` instance to the `ranges` argument.
:param roi: Region of interest
:param ranges: RangeCollection defining mass ranges
:param decompose: Decompose polyatomic ions into their elemental components or not
:param percent: Return composition as percentage instead of fraction
:param upper: The upper value for the mass spectrum
:param multiplicity: Option to quantify specific multiple-hit events, either "all"
for the all hits, int=1 for singles, int > 1 for specific multi-hits, or "multiples" for all multiples
"""
super().__init__(roi)
if not isinstance(ranges, RangeCollection):
raise TypeError(f"Expected type RangeCollection not {type(ranges)}")
self._ranges = ranges
self._percent = validate.boolean(percent)
self._decompose = decompose
self._bin_width = validate.positive_nonzero_number(bin_width)
self._multiplicity = validate.multiplicity_any(multiplicity)
self._upper = validate.positive_nonzero_number(upper)
if multiplicity != "all":
self.roi.require_multihit_info()
# The mass_histogram is not strictly necessary in the quantification scheme for this class, but we use it
# for plotting the ranges and for derived classes
self._histogram = self.roi.mass_histogram(self.bin_width, multiplicity=multiplicity, norm=False, upper=upper)
self._results_dict = None
self._preprocess()
self._process()
@property
def upper(self) -> Number:
"""
Get the upper limit of the calculated histogram
"""
return self._upper
@property
def histogram(self) -> ndarray:
"""
Get the binned computed histogram
"""
return self._histogram
@property
def ranges(self) -> RangeCollection:
"""
Get the RangeCollection defining all mass ranges
"""
return self._ranges
@property
def percent(self) -> bool:
"""
Whether the quantification is in percentage or not (fraction)
"""
return self._percent
@property
def decompose(self) -> bool:
"""
Whether or not molecular species are broken down into elemental forms
"""
return self._decompose
@property
def multiplicity(self) -> Union[str, int]:
"""
Get the ion multiplicity user for quantification
Returns:
"""
return self._multiplicity
@property
def bin_width(self) -> Number:
"""
Get the bin width in Da
"""
return self._bin_width
@property
def quant_dict(self) -> Dict[str, Tuple[float, float]]:
"""
Get a dictionary of the quantification results
The keys are a string of the range composition and the values are a tuple of composition % or
fraction (depending on the percent kwarg) and the total counts in that range
"""
return deepcopy(self._results_dict)
def print(self):
"""
Convenience to print the quantification results
"""
quant = self.quant_dict
data = [(i[0], i[1][0], i[1][1]) for i in quant.items()]
print(tabulate(data, headers=("Ion", "Composition", "Counts")))
def counts_in_range(self, rng: Range) -> int:
"""
Calculate the number of counts within a specified mass/charge range. This method should be overridden
in subclasses since the calculated counts in a range is implementation-specific (i.e. background subtraction).
"""
if self.multiplicity != "all":
self.roi.require_multihit_info()
if not isinstance(rng, Range):
raise TypeError("Expected a Range object")
if self.multiplicity == "all":
idx = n.argwhere((self.roi.mass >= rng.lower) & (self.roi.mass < rng.upper))
elif isinstance(self.multiplicity, int) or self.multiplicity == "multiples":
idxs = get_mass_indices(self.roi.misc["ipp"], multiplicity=self.multiplicity)
idx = n.argwhere((self.roi.mass[idxs] >= rng.lower) & (self.roi.mass[idxs] < rng.upper))
else:
raise ValueError("Bad input")
return idx.size
def _preprocess(self):
"""
Any pre-processing that should be done prior to quantification. This method should be overridden
in subclasses (if) any data needs to be available prior to calls to self.counts_in_range
"""
def _process(self):
"""
Perform the quantification. This method should NOT be overridden and works for all
variation in mass spectrum quantification scheme. Instead, self.counts_in_range() should be re-implemented
for any subclassed quantification method, which is called here to do the computation
"""
# Normalization factor if percentage flag is enabled
norm = 100 if self.percent else 1
ion_counts = defaultdict(lambda: 0)
atomic_counts = defaultdict(lambda: 0)
for rng in self.ranges:
a = rng.ion.hill_formula
ranged_counts = self.counts_in_range(rng)
ion_counts[rng.formula] += ranged_counts
for element, number in rng.ion.comp_dict.items():
atomic_counts[element.symbol] += number * ranged_counts
# Ionic quantification
ion_total = sum(ion_counts.values())
ionic_quant = {}
for ion, count in ion_counts.items():
ionic_quant[ion] = norm * count / ion_total
# Atomic quantification
atomic_total = sum(atomic_counts.values())
atomic_quant = {}
for element, count in atomic_counts.items():
atomic_quant[element] = norm * count / atomic_total
quant = OrderedDict()
if self.decompose is False:
for i in ionic_quant.keys():
quant[i] = (ionic_quant[i], ion_counts[i])
elif self.decompose is True:
for i in atomic_quant.keys():
quant[i] = (atomic_quant[i], atomic_counts[i])
self._results_dict = dict(quant)
def plot(self):
"""
Get a plot to visualize the mass spectrum
"""
plt = plotting.MassSpectrumPlotRanged(self)
return plt
class NoiseCorrectedMassSpectrum(RangedMassSpectrum):
"""
Ranged mass spectrum analysis with correction for random noise background
"""
def __init__(
self,
roi: Roi,
ranges: RangeCollection,
noise_background: Background = Background((0.1, 0.75), model=PowerLawModel()),
**kwargs,
):
"""
This class performs mass spectrum quantification on mass histograms after removing background from random
noise. I.e. this noise is corrected by fitting a background model to the initial portion of the mass spectrum,
usual before 1 Dalton. The default model for the fit can be modified using the ``noise_background`` keyword
argument.
Additional keyword arguments are passed to :class:`RangedMassSpectrum`.
The quantification can be done on manually provided mass spectra instead of an Roi by using the alternate
constructor :meth:`NoiseCorrectedMassSpectrum.from_array`.
:param roi: Region of interest
:param ranges: RangeCollection defining mass ranges
:param noise_background: Background defining the random noise background
:param **kwargs: Keyword arguments passed to NoiseCorrectedMassSpectrum constructor
"""
self._noise_bkg = noise_background
self.__corrected_hist = None
self._noise_fit_data = None
super().__init__(roi, ranges, **kwargs)
@classmethod
def from_array(
cls, x: ndarray, y: ndarray, ranges: RangeCollection, decompose: bool = False, percent: bool = False
):
"""
Create the :class:`NoiseCorrectedMassSpectrum` from a numpy array (of a mass spectrum) instead of a :class:`Roi`
"""
binwidth = x[1] - x[0]
retn = cls(DummyRoiHistogram(x, y), ranges, decompose=decompose, percent=percent, bin_width=binwidth)
return retn
@property
def noise_corrected_histogram(self) -> ndarray:
"""
Get the noise corrected mass histogram as a numpy array
"""
return self.__corrected_hist
@property
def noise_background(self) -> Background:
"""
Get the :class:`Background` instance defining the noise background
:return:
"""
return self._noise_bkg
@property
def noise_fit_data(self) -> ndarray:
"""
The fit to the random noise background
"""
return self._noise_fit_data
@property
def noise_counts(self) -> float:
"""
Get the area of the noise fit
"""
return self.noise_background.area
def counts_in_range(self, rng: Range) -> int:
"""
Calculate the corrected counts in a range on the mass spectrum. Overwritten from
:meth:`RangedMassSpectrum.counts_in_range`.
:param rng: Range instance for the computation
"""
if not isinstance(rng, Range):
raise TypeError(f"Expected a Range object not type {type(rng)}")
x, y = self.noise_corrected_histogram
idx = n.argwhere((x >= rng.lower) & (x < rng.upper))
counts = int(n.sum(y[idx]))
return counts
def _preprocess(self):
# Construct the corrected mass_histogram prior to quantification
x, y = self.histogram
self.noise_background.fit(x, y)
fit_y = self.noise_background.eval(x)
self._noise_fit_data = x, fit_y
self.__corrected_hist = x, y - fit_y
def plot(self):
"""
Get a interactive plot of the noise corrected mass spectrum.
"""
plt = plotting.MassSpectrumPlotNoiseCorrected(self)
return plt
class LocalBkgCorrectedMassSpectrum(NoiseCorrectedMassSpectrum):
"""
Ranged mass spectrum analysis with correction for random noise and local background subtraction
"""
def __init__(
self,
roi: Roi,
ranges: RangeCollection,
local_background: BackgroundCollection,
show_warnings: bool = False,
**kwargs,
):
"""
This class performs mass spectrum quantification correcting for random noise background, as well as local
background subtraction for isotopic peaks.
The random noise background subtraction is performed by :class:`NoiseCorrectedMassSpectrum` and can be adjusted
by the keyword arguments passed to it.
The local background subtractions are defined by the `local_background` parameter which is a
:class:`BackgroundCollection` instance. See the :class:`BackgroundCollection` for detail. Generally, backgrounds are defined
on a fit interval(s), and each background defines an interval(s) which determines the mass ranges it will
apply to. For example:
>>> roi = Roi.from_pos("path_to_pos_ file.pos")
>>> ranges = RangeCollection.from_rng("path_to_range_file.rng")
>>> bkgs = BackgroundCollection()
>>> bkgs.add(Background((10, 12), [(14, 15), (20, 22)]))
>>> bkgs.add(Background([(32, 40), (42, 45)], (47, 50)))
>>> quant = LocalBkgCorrectedMassSpectrum(roi, ranges, bkgs)
>>> quant.print()
Does a local background subtracted quantification using 2 backgrounds fits and prints the result.
:param roi: Region of interest
:param ranges: RangeCollection defining mass ranges
:param local_background: BackgroundCollection defining background model
:param noise_background: Background defining the random noise background
:param disable_warnings: Disable warnings (such as when a range has not been assigned a background)
:param **kwargs: Keyword arguments passed to :class:`NoiseCorrectedMassSpectrum` constructor
"""
self._local_bkg = local_background
self._show_warnings = show_warnings
self.__local_bkg_hist = None
self.__corrected_hist = None
super().__init__(roi, ranges, **kwargs)
@classmethod
def from_array(
cls,
x: ndarray,
y: ndarray,
ranges: RangeCollection,
background: BackgroundCollection,
decompose: bool = False,
percent: bool = False,
):
"""
Mass spectrum analysis from an array
"""
# Call constructor with a dummy roi
binwidth = x[1] - x[0]
retn = cls(
DummyRoiHistogram(x, y),
ranges,
background,
decompose=decompose,
percent=percent,
bin_width=binwidth,
cutoff=x[-1],
)
return retn
@property
def show_warnings(self):
"""
Whether or not to show warnings when there are ranges that are not background subtracted
"""
return self._show_warnings
@property
def background_collection(self) -> BackgroundCollection:
"""
Get the :class:`BackgroundCollection` applied to the computation
"""
return self._local_bkg
@property
def local_bkg_fit(self) -> ndarray:
return self.__local_bkg_hist
@property
def local_bkg_corrected_histogram(self) -> ndarray:
return self.__corrected_hist
def counts_in_range(self, rng: Range) -> int:
"""
Calculate the number of counts within a specified mass/charge range.
Overwritten from :meth:`NoiseCorrectedMassSpectrum.counts_in_range`
"""
if not isinstance(rng, Range):
raise TypeError(f"Expected a Range object not type {type(rng)}")
x, y = self.local_bkg_corrected_histogram
idx = n.argwhere((x >= rng.lower) & (x < rng.upper))
counts = int(n.sum(y[idx]))
return counts
def _preprocess(self):
super()._preprocess()
x, y = self.noise_corrected_histogram
self.background_collection.fit(x, y)
sig_y = n.zeros_like(x)
bkg_y = n.zeros_like(x)
for rng in self.ranges:
bkg = self.background_collection.find_background(rng)
xmin = rng.lower
xmax = rng.upper
idx = n.argwhere((x >= xmin) & (x < xmax)).ravel()
if bkg is not None:
# self._background_map[rng.id] = bkg
rng_bkg = bkg.eval(x[idx])
rng_signal = y[idx] - rng_bkg
sig_y[idx] = rng_signal
bkg_y[idx] = rng_bkg
else:
sig_y[idx] = y[idx]
if self.show_warnings:
log.warn(
f"No background could be matched to {rng.ion.hill_formula} from {rng.lower}-{rng.upper} Da."
f" This range will not be background corrected."
)
self.__local_bkg_hist = (x, bkg_y)
self.__corrected_hist = (x, sig_y)
def plot(self):
"""
Get an interactive plot showing the background fits
"""
plt = plotting.MassSpectrumPlotLocalBkgCorrected(self)
return plt | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/analysis/massspectrum.py | massspectrum.py |
import numpy as n
from numpy.polynomial import Polynomial
from lmfit.models import Model, update_param_vals
from lmfit.models import ExponentialGaussianModel as _EXPGauss
from numpy import ndarray
class ExponentialGaussianModel(_EXPGauss):
"""
Exponential gaussian model
"""
def __init__(self, *args, **kwargs):
"""
This model defines an exponentially modified gaussian with refined parameter min/max/initial values
for APAV
"""
super().__init__(*args, **kwargs)
def guess(self, *args, **kwargs):
vals = super().guess(*args, **kwargs)
x = kwargs["x"]
vals["center"].set(max=x.min(), min=0)
vals["amplitude"].set(min=0)
return vals
class PowerLawShiftModel(Model):
"""
Shifted power law model
"""
def __init__(self, *args, **kwargs):
"""
This model defines a shifted power law with refined parameter min/max/initial values
for APAV
"""
def power_law(x, amplitude, center, exponent):
# Cannot take fractional power of negative numbers
xx = x - center
xx[xx < 0] = 1
return amplitude * xx**exponent
super().__init__(power_law, *args, **kwargs)
def guess(self, data, x: ndarray = None, **kwargs):
"""
Estimate initial model parameter values from data.
"""
try:
cen = x.min() - x.min() * 0.1
idx = n.argwhere(data > 0).ravel()
xx, yy = n.log((x[idx] - cen) + 1.0e-14), n.log(data[idx] + 1.0e-14)
amp, expon = Polynomial.fit(xx, yy, 1)
except TypeError:
cen = 0
expon, amp = 1, n.log(abs(max(data) + 1.0e-9))
pars = self.make_params(amplitude=n.exp(amp), exponent=expon, center=cen)
update_param_vals(pars, self.prefix, **kwargs)
pars["amplitude"].set(min=1)
pars["exponent"].set(min=-1e3, max=-1)
pars["center"].set(min=0, max=x.min())
return pars | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/analysis/models.py | models.py |
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
import fast_histogram as fh
import numpy as n
from apav.utils import validate
def centers2edges(data: ndarray, bin_width: Real) -> ndarray:
"""
Convert the x values of a histogram from bin centers to edges. This increases the size of the domain by 1.
:param data: histogram centers
:param bin_width: width of the bins
"""
if len(data.shape) != 1:
raise AttributeError("Array must be one dimensional to convert centers to edges")
validate.positive_nonzero_number(bin_width)
retn = data - bin_width / 2.0
retn = n.append(retn, retn[-1] + bin_width)
return retn
def histogram2d(
x: ndarray, y: ndarray, extents: Tuple[Tuple[Number, Number], Tuple[Number, Number]], bins: int
) -> ndarray:
"""
Calculate two-dimensional histograms by specifying the number of bins.
:param x: Array 1
:param y: Array 2
:param extents: (tuple, tuple) designating range to perform mass_histogram
:param bins: Number of bins
"""
counts = fh.histogram2d(x, y, bins, extents)
return counts
def histogram1d(data: ndarray, bin_width: Number, rng: Tuple[Number, Number]) -> Tuple[ndarray, ndarray]:
"""
1d mass_histogram that returns array of counts and array of bin centers.
:param data: data to compute the histogram on
:param bin_width: bin width of the bins
:param rng: boundaries of the histogram
"""
assert len(data.shape) == 1
edges = n.round(n.arange(rng[0], rng[1] + bin_width, bin_width), 6)
centers = n.round(n.arange(rng[0] + bin_width / 2, rng[1] + bin_width, bin_width), 6)
nbins = edges.size - 1
counts = fh.histogram1d(data, nbins, (rng[0], edges[-1]))
return counts, centers[0 : counts.size]
def histogram2d_binwidth(
x: ndarray, y: ndarray, extents: Tuple[Tuple[Number, Number], Tuple[Number, Number]], bin_width: Number = 0.1
) -> ndarray:
"""
Calculate two-dimensional histograms by bin width instead of number of bins.
:param x: Array 1
:param y: Array 2
:param extents: (tuple, tuple) designating range to perform mass_histogram
:param bin_width: Width of the bins in Daltons
"""
nbinsx = int((extents[0][1] - extents[0][0]) / bin_width)
nbinsy = int((extents[1][1] - extents[1][0]) / bin_width)
retn = histogram2d(x, y, extents, (nbinsx, nbinsy))
return retn | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/core/histogram.py | histogram.py |
from __future__ import annotations
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
import numpy as n
from apav.utils import validate
from apav.utils.validate import NoMultiEventError
if TYPE_CHECKING:
from apav.core.roi import Roi
class MultipleEventExtractor:
"""
Handle extraction and access of multiple-event related data from an Roi
"""
def __init__(self, roi, multiplicity: Union[int, str], extents: Tuple[Tuple, Tuple] = ((0, 200), (0, 200))):
"""
This class is responsible for extracting arbitrary multiple event data from Rois. The output is formatted
in as pairs, i.e. a 6th order multiple event of the composition ABCDEF is formatted into the 15 ion pairs:
AB
AC
AD
AE
AF
BC
BD
BE
BF
CD
CE
CF
DE
DF
EF
The number of ion pairs generated from any nth order multiplicity can be calculated using
:func:`pairs_per_multiplicity`.
This class supports any multiplicity > 1, and also supports extracting the combined pairs all multiple events.
For example, all of these are valid:
>>> roi = Roi.from_epos("path_to_epos_file.epos")
>>> mevents = MultipleEventExtractor(roi, multiplicity=2) # Ion pairs from 2nd order multiple events
>>> mevents = MultipleEventExtractor(roi, multiplicity=11) # Ion pairs from 11th order multiple events
>>> mevents = MultipleEventExtractor(roi, multiplicity="multiples") # Ion pairs from all multiple events
The pairs can be access from
>>> mevents.pairs
More broadly, the indices of the pairs are accessed via
>>> mevents.pair_indices
Which allows for performing arbitrary analysis of multiple events on any data set attached to the Roi. For
example, to form a plot of the difference in mass/charge ratio to distance between the ion pair in detector
coordinates (to look at orientational properties of molecular dissociation):
>>> roi = Roi.from_epos("path_to_epos_file.epos")
>>> mevents = MultipleEventExtractor(roi, multiplicity="multiples") # Use all multiple events
>>> mass = roi.mass[mevents.pair_indices]
>>> det_x = roi.misc["det_x"][mevents.pair_indices]
>>> det_y = roi.misc["det_y"][mevents.pair_indices]
>>> dx = np.diff(det_x)
>>> dy = np.diff(det_y)
>>> diff_det = np.linalg.norm([dx, dy], axis=0)
>>> diff_mass = np.diff(mass)
>>> plt.hist2d(diff_det, diff_mass, bins=100)
>>> plt.plot()
:param roi: region of interest
:param multiplicity: multiplicity order (int > 1 or "multiples")
:param extents: two dimensional range to extract events from (think correlation histograms)
"""
self._roi = roi
self._multiplicity = validate.multiplicity_non_singles(multiplicity)
self._pair_indices = ndarray([])
self._pairs = ndarray([])
self._extents = validate.positive_interval_2d(extents)
self._process()
@property
def roi(self) -> Roi:
"""
Get the :class:`Roi` used in the :class:`MultipleEventExtractor`
"""
return self._roi
@property
def multiplicity(self) -> Union[int, str]:
"""
Get the multiplicity of multiple events in the :class:`MultipleEventExtractor`. Either: int>1 or "multiples
"""
return self._multiplicity
@property
def pairs(self) -> ndarray:
"""
Get the array of all ion pairs. These are ion pairs regardless of the multiplicity so the arrays is Mx2
"""
return self._pairs
@property
def n_pairs(self) -> int:
"""
Get the number of pairs extracted
"""
return self.pairs.shape[0]
@property
def pair_indices(self) -> ndarray:
"""
Get the array of indices for the pairs. This is the same shape as :meth:`MultipleEventExtractor.pairs`
but this is used to index map ion pairs to other positional data in the :class:`Roi`.
"""
return self._pair_indices
@property
def extents(self) -> Tuple[Tuple[Number, Number], Tuple[Number, Number]]:
"""
Get the 2-dimensional boundary that was used to extract the ion pairs
"""
return self._extents
def _process(self):
pairs, idx = _multievents2pairs(self.roi, self.multiplicity, self.extents)
self._pairs = pairs
self._pair_indices = n.array(idx, dtype=int)
def pairs_per_multiplicity(multiplicity: int):
"""
The number of unique ion pairs produced from a single multiple event of given multiplicity.
A 6th-order multiple event composed of ions ABCDEF produces the 15 ion pairs:
AB
AC
AD
AE
AF
BC
BD
BE
BF
CD
CE
CF
DE
DF
EF
>>> pairs_per_multiplicity(6)
15
:param multiplicity: integer multiplicity order
"""
validate.multiplicity_singular_two_or_greater(multiplicity)
return int(((multiplicity - 1) ** 2 + (multiplicity - 1)) / 2)
def _multievents2pairs(
roi: "Roi", multiplicity: Union[int, str], extents: Tuple[Tuple, Tuple]
) -> Tuple[ndarray, ndarray]:
"""
Generate ion pairs from an roi with any multiplicity, or all multiples
:param roi: Roi object
:param multiplicity: Any multiplicity value >= 2 or "multiples" for all
:param extents: 2-dimensional boundary to extract pars from
"""
multiplicity = validate.multiplicity_non_singles(multiplicity)
roi.require_multihit_info()
if isinstance(multiplicity, int):
data, idx_ary = _aggregate_multiples_with_idx(roi, multiplicity)
# For multiplicity 2 the result from aggregate_multiples is already formatted in pairs
if multiplicity == 2:
pairs = data
pairs_idx = idx_ary
else:
tri = n.array(n.triu_indices(multiplicity, 1)).T
idx = tri.ravel() + multiplicity * n.arange(data.shape[0])[None].T
pairs = data.ravel()[idx.reshape((-1, 2))]
pairs_idx = idx_ary.ravel()[idx.reshape((-1, 2))]
# Filter out pairs outside the supplied extents
filt = n.where(
(pairs[:, 0] >= extents[0][0])
& (pairs[:, 0] <= extents[0][1])
& (pairs[:, 1] >= extents[1][0])
& (pairs[:, 1] <= extents[1][1])
)[0]
return pairs[filt], pairs_idx[filt]
elif multiplicity == "multiples":
mults, counts = roi.multiplicity_counts()
total_pairs = sum(
int(counts[i] * pairs_per_multiplicity(mults[i])) for i in range(mults.shape[0]) if mults[i] > 1
)
pairs = n.zeros([total_pairs, 2])
pairs_idx = n.zeros_like(pairs)
last_idx = 0
dat = []
idxs = []
for i, mult in enumerate(mults):
if mult < 2:
continue
new_pairs, new_idx = _multievents2pairs(roi, mult, extents)
dat.append(new_pairs)
idxs.append(new_idx)
pairs[last_idx : last_idx + new_pairs.shape[0]] = new_pairs
pairs_idx[last_idx : last_idx + new_pairs.shape[0]] = new_idx
last_idx += new_pairs.shape[0]
if len(dat) == 0:
return n.array([]), n.array([])
else:
return n.concatenate([i for i in dat]), n.concatenate([i for i in idxs])
def _aggregate_multiples_with_idx(roi, multiplicity: int = 2) -> Tuple[ndarray, ndarray]:
"""
Find the hits belonging to multi-hits of a specified multiplicity
:param roi: Roi
:param multiplicity: The event multiplicity, int > 0
:return: a MxN matrix of mass/charge values, M=number of multiple hits N=multiplicity
"""
if not roi.has_multiplicity_info():
raise NoMultiEventError()
validate.multiplicity_singular_one_or_greater(multiplicity)
init_idx = n.where(roi.misc["ipp"] == multiplicity)[0]
retn = n.zeros([init_idx.size, multiplicity])
indices = n.zeros_like(retn)
for i in range(multiplicity):
retn[:, i] = roi.mass[init_idx + i]
indices[:, i] = init_idx + i
return retn, indices
def get_mass_indices(ipp: ndarray, multiplicity: Union[int, str]) -> ndarray:
"""
Get the array indices corresponding to multi-event events of a specific multiplicity
:param ipp: array of ions per pulse
:param multiplicity: vent multiplicity
:return: array of indices into ipp
"""
validate.multiplicity_any_singular_or_all_multiples(multiplicity)
if isinstance(multiplicity, (int, n.int64, n.int32, n.int16, n.int8)):
if multiplicity == 1:
return n.where(ipp == 1)[0]
else:
nhits = n.count_nonzero(ipp == multiplicity)
retn = n.zeros(multiplicity * nhits)
first = n.where(ipp == multiplicity)[0]
retn[0::multiplicity] = first
for i in range(1, multiplicity):
retn[i::multiplicity] = first + i
return retn.astype(int)
elif multiplicity == "multiples":
# All multiple hits
return n.where(ipp != 1)[0]
else:
raise ValueError("Bad input") | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/core/multipleevent.py | multipleevent.py |
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
import periodictable as pt
from periodictable import elements as el
from periodictable.core import Element
import tabulate as tab
import numpy as n
from itertools import product
from operator import attrgetter
import math
from collections import defaultdict
import re
from copy import deepcopy
from apav.utils import validate
_syms = [
"Ac",
"Ag",
"Al",
"Am",
"Ar",
"As",
"At",
"Au",
"B",
"Ba",
"Be",
"Bh",
"Bi",
"Bk",
"Br",
"C",
"Ca",
"Cd",
"Ce",
"Cf",
"Cl",
"Cm",
"Cn",
"Co",
"Cr",
"Cs",
"Cu",
"Db",
"Ds",
"Dy",
"Er",
"Es",
"Eu",
"F",
"Fe",
"Fl",
"Fm",
"Fr",
"Ga",
"Gd",
"Ge",
"H",
"He",
"Hf",
"Hg",
"Ho",
"Hs",
"I",
"In",
"Ir",
"K",
"Kr",
"La",
"Li",
"Lr",
"Lu",
"Lv",
"Mc",
"Md",
"Mg",
"Mn",
"Mo",
"Mt",
"N",
"Na",
"Nb",
"Nd",
"Ne",
"Nh",
"Ni",
"No",
"Np",
"O",
"Og",
"Os",
"P",
"Pa",
"Pb",
"Pd",
"Pm",
"Po",
"Pr",
"Pt",
"Pu",
"Ra",
"Rb",
"Re",
"Rf",
"Rg",
"Rh",
"Rn",
"Ru",
"S",
"Sb",
"Sc",
"Se",
"Sg",
"Si",
"Sm",
"Sn",
"Sr",
"Ta",
"Tb",
"Tc",
"Te",
"Th",
"Ti",
"Tl",
"Tm",
"Ts",
"U",
"V",
"W",
"Xe",
"Y",
"Yb",
"Zn",
"Zr",
]
_comp_re = re.compile(r"([A-Z][a-z]?)([0-9]*)")
class UnknownElement:
def __init__(self, symbol: str):
self._symbol = symbol
def __repr__(self):
return self.symbol
def __eq__(self, other):
if not isinstance(other, (UnknownElement, Element)):
return NotImplemented
elif other.symbol == self.symbol:
return True
else:
return False
def __hash__(self):
return hash((self.symbol))
@property
def name(self):
return self._symbol
@property
def symbol(self):
return self._symbol
@property
def mass(self):
return 0
@property
def number(self):
return 0
@property
def isotopes(self):
raise ValueError(f"Placeholder element {self.symbol} does not have isotopes")
def add_isotope(self, number):
raise ValueError(f"Placeholder element {self.symbol} does not have isotopes")
def str2composition(formula: str) -> Dict[Element, int]:
"""
Convert a chemical formula string to a dict
:param formula: the chemical string
"""
if not isinstance(formula, str):
raise TypeError(f"Chemical formula must be string not {type(formula)}")
if not formula:
raise ValueError("Formula cannot be null")
matches = re.findall(_comp_re, formula)
if len(matches) == 0:
raise ValueError("Formula cannot be null")
all_matches = "".join([i[0] + i[1] for i in matches])
# If the original formula is not the same length as the regex matches then the formula is invalid
if len(all_matches) != len(formula):
raise ValueError(f"Unable to interpret chemical formula string: {formula}")
comp = defaultdict(lambda: 0)
# Convert the matches to a dict
for elem, count in matches:
if elem in _syms:
element = el.symbol(elem)
else:
element = UnknownElement(elem)
if count:
if count == "0":
raise ValueError(f"Element {elem} cannot have 0 atoms")
num = int(count)
comp[element] += num
else:
comp[element] += 1
return dict(comp)
class Ion:
def __init__(self, formula: str, charge: int = 0):
self._charge = int(charge)
self._comp_dict = str2composition(formula)
self._formula = formula
def __eq__(self, other):
if not isinstance(other, Ion):
return NotImplemented
if self.comp_dict != other.comp_dict:
return False
elif self.charge != other.charge:
return False
else:
return True
def __str__(self):
return self.formula + " " + str(self.charge) + "+"
def __hash__(self):
return hash((self.hill_formula, self.charge))
def items(self):
return self.comp_dict.items()
@property
def formula(self) -> str:
"""
Get the ion's formula
"""
return self._formula
@property
def charge(self) -> int:
"""
Get the ion charge
"""
return self._charge
@property
def comp_str_dict(self) -> Dict[str, int]:
"""
Get the composition as a dictionary of (element str: num) key/value pairs
"""
retn = {}
for elem, num in self.comp_dict.items():
retn[elem.symbol] = num
return retn
@property
def comp_dict(self) -> Dict[Element, int]:
"""
Get the composition as a dictionary of (element: num) key/value pairs
"""
return deepcopy(self._comp_dict)
@property
def elements(self) -> List[Element]:
"""
Get a list of all unique elements in the ion
"""
return [i for i in self.comp_dict.keys()]
@property
def number(self) -> Number:
"""
Get the cumulative atomic number of the ion
"""
retn = 0
for elem, count in self._comp_dict.items():
retn += elem.number * count
return retn
@property
def mass(self) -> Number:
"""
Get the cumulative atomic mass of the ion
"""
retn = 0
for elem, count in self._comp_dict.items():
retn += elem.mass * count
return retn
@property
def num_atoms(self) -> int:
"""
Get the total number of atoms in the ion
"""
return sum(i for i in self.comp_dict.values())
@property
def hill_formula(self) -> str:
"""
Get the formula as a hill formula
"""
comp = self.comp_dict
carbon = ""
hydrogen = ""
if el.C in comp.keys():
count = comp.pop(el.C)
carbon = "C" + str(count) if count > 1 else "C"
if el.H in comp.keys():
count = comp.pop(el.H)
hydrogen = "H" + str(count) if count > 1 else "H"
rest = [(elem.symbol, count) for elem, count in comp.items()]
rest.sort(key=lambda x: x[0])
retn = carbon + hydrogen
for elem_str, count in rest:
retn += elem_str + str(count) if count > 1 else elem_str
return retn
def all_real_elements(self) -> bool:
"""
Determine if any elements in the composition are not Number (place holder or unknown elements)
"""
return all(not isinstance(i, UnknownElement) for i in self.elements)
class Isotope:
"""
A single isotope
"""
def __init__(self, ion: Ion, number: int, mass: Number, abundance: Number):
"""
This class defines a single isotopic species, defined by composition, charge, mass, and absolute abundance.
These values must be provided manually, see `IsotopeSeries` for calculating isotope distributions (which uses
this class in its calculations). This class may be used for defining custom/modified isotopes for
`IsotopeSeries`.
>>> carbon_12 = Isotope(Ion("C", 1), 12, 6, 98.93)
:param ion: Ion composition
:param number: atomic number
:param mass: atomic mass
:param abundance: absolute isotopic abundance
"""
if not isinstance(ion, Ion):
raise validate.IonTypeError(ion)
self._ion = ion
self._number = validate.positive_nonzero_int(number)
self._mass = validate.positive_nonzero_number(mass)
self._abundance = validate.number_in_interval(abundance, 0, 1, lower_open=True, upper_open=False)
def __repr__(self):
return f"Isotope: {self.ion.hill_formula} +{self.charge} {self.number} @ {n.round(self.mass, 3)} Da {n.round(self.abundance*100, 2)} %"
def __eq__(self, other):
if (
self.ion == other.ion
and self.number == other.number
and self.mass == other.mass
and self.abundance == other.abundance
):
return True
else:
return False
@property
def ion(self) -> Ion:
"""
Get the :class:`Ion` (Composition and charge)
"""
return self._ion
@property
def number(self) -> int:
"""
Get the atomic number of the isotope
"""
return self._number
@property
def mass(self) -> Number:
"""
Get the atomic mass of the isotope
"""
return self._mass
@property
def abundance(self) -> Number:
"""
Get the absolute abundance of the isotope
"""
return self._abundance
@property
def charge(self) -> int:
"""
Get the cumulative charge of the isotope
"""
return self._ion.charge
class IsotopeSeries:
"""
Compute isotopic distributions
"""
def __init__(self, *args, isotopes: Optional[List[Isotope]] = None, threshold: Number = 0.01):
"""
This class computes isotopic distributions of arbitrary elemental or molecular compositions. The only physical
requirement is that the charge is not 0.
This computation can be constructed by providing either the Ion instance directly, or by providing a string of
the composition and an integer of the charge. i.e.:
>>> ion1 = IsotopeSeries(Ion("GdCuO2", 3))
>>> ion2 = IsotopeSeries("GdCuO2", 3)
>>> ion1 == ion2
These are equivalent. Complex compositions can sometimes produce very large number of isotopologues with very
small abundances that are quite below the detectability of most atom probe experiments. As a result the
computation is thresholded to only display isotopologues above this threshold. As a result, the sum of the
absolute abundances from
>>> IsotopeSeries.isotopes
is not guaranteed to be unity. If this is important, the threshold can be set to 0 to get all isotopologues, or
consider working with relative abundances instead.
This computation works for both elemental and molecular ions, i.e.
>>> IsotopeSeries("CuO2", 2)
>>> IsotopeSeries("Cu", 3)
Are both valid signatures.
The calculation used is derived from the following work:
Margrave, J. L., & Polansky, R. B. (1962). Relative abundance calculations for isotopic molecular species.
Journal of Chemical Education, 39(7), 335–337. https://doi.org/10.1021/ed039p335
:param *args: Either Ion type, or composition (str) and charge (int)
:param isotopes: "None" to calculate, otherwise must be provided explicitly
"""
if isinstance(args[0], Ion) and len(args) == 1:
ion = args[0]
elif isinstance(args[0], str) and len(args) == 1:
ion = Ion(args[0], 1)
elif len(args) == 2:
if isinstance(args[0], str) and isinstance(args[1], int):
ion = Ion(args[0], args[1])
else:
raise ValueError("Expected string as first argument and charge int as second")
else:
raise ValueError("Could not decipher arguments")
if not isinstance(ion, Ion):
raise validate.IonTypeError(ion)
self._ion = ion
if self.ion.charge == 0:
raise ValueError("Can only calculate isotopes of non-neutral ions (charge != 0)")
# Set the isotopes
self._all_isotopes = []
self._isotopes = []
if isotopes is not None:
self._init_isotopes_as_manual(isotopes)
else:
if ion.num_atoms == 1:
self._init_isotopes_as_element()
else:
self._init_isotopes_as_molecular()
self.__index = 0
self._threshold = None
self.threshold = threshold
def __repr__(self):
thresh_str = f", threshold: {self.threshold*100}%" if self.threshold else ", all isotopes"
retn = f"IsotopeSeries: {self.ion.hill_formula} +{self.ion.charge}{thresh_str}\n"
max_val = max(i.abundance for i in self.isotopes)
data = [
[i + 1, iso.ion.hill_formula, iso.number, iso.mass, iso.abundance * 100, iso.abundance / max_val * 100]
for i, iso in enumerate(self.isotopes)
]
table = tab.tabulate(data, ("", "Ion", "Isotope", "Mass", "Abs. abundance %", "Rel. abundance %"))
retn += table
return retn
def __len__(self) -> int:
return len(self._isotopes)
def __iter__(self):
self.__index = 0
return self
def __next__(self) -> Isotope:
if len(self._isotopes) == 0:
raise StopIteration
elif self.__index == len(self._isotopes):
self.__index = 0
raise StopIteration
else:
self.__index += 1
return self._isotopes[self.__index - 1]
def __getitem__(self, index: int) -> Isotope:
return self._isotopes[index]
@property
def charge(self) -> int:
"""
Get the cumulative charge of the ion
"""
return self._ion.charge
@property
def ion(self) -> Ion:
"""
Get the :class:`Ion` (Composition and charge)
"""
return self._ion
@property
def abundances(self) -> ndarray:
"""
Get an array of the abundances of each isotope
"""
return n.array([i.abundance for i in self])
@property
def masses(self) -> ndarray:
"""
Get an array of the mass/charge ratios of each isotope
"""
return n.array([i.mass for i in self])
@property
def isotopes(self) -> List[Isotope]:
"""
Get an array of all isotopes
"""
return self._isotopes
@property
def threshold(self) -> Number:
"""
Get the threshold used for computing the isotopes
"""
return self._threshold
@threshold.setter
def threshold(self, new: Number):
"""
Set the threshold. This represents the absolute abundance limit for the computed isotopes
:param new: the new absolute abundance limit/threshold
"""
self._threshold = validate.number_in_interval(new, 0, 1, lower_open=False, upper_open=False)
if self.threshold == 0:
self._isotopes = self._all_isotopes
else:
self._isotopes = [iso for iso in self._all_isotopes if iso.abundance >= self.threshold]
def _init_isotopes_as_manual(self, isotopes):
if not all(iso.ion == self.ion for iso in isotopes):
raise ValueError("All isotopes must be of the same ion")
if len(set(iso.mass for iso in isotopes)) != len(isotopes):
raise ValueError("Cannot have duplicate isotopes")
if not all(iso.charge == self.charge for iso in isotopes):
raise ValueError("All isotopes must have the same charge")
self._all_isotopes = sorted(isotopes, key=lambda iso: iso.mass)
def _init_isotopes_as_element(self):
"""
Initialize the isotopes from an elemental ion (1 element 1 atom)
"""
pt_elem = pt.elements.symbol(self.ion.elements[0].symbol)
isos = [
Isotope(self.ion, pt_elem[i].isotope, pt_elem[i].mass / self.ion.charge, pt_elem[i].abundance / 100)
for i in pt_elem.isotopes
if pt_elem[i].abundance > 0
]
self._init_isotopes_as_manual(isos)
assert self._is_unity
def _init_isotopes_as_molecular(self):
"""
Initialize the isotopes from a molecular ion (multiple elements/atoms)
Calculation is derived from the following work:
Margrave, J. L., & Polansky, R. B. (1962). Relative abundance calculations for isotopic molecular species.
Journal of Chemical Education, 39(7), 335–337. https://doi.org/10.1021/ed039p335
"""
def elem2ion(elem: Element, charge: int):
return Ion(elem.symbol, charge)
# Get the isotopes for each element (we ignore the scaling of charge until later)
elem_isos_series = dict((i.symbol, IsotopeSeries(elem2ion(i, 1), threshold=0)) for i in self.ion.elements)
inputs = []
for elem_str, elem_num in self.ion.comp_str_dict.items():
for i in range(int(elem_num)):
inputs.append(elem_isos_series[elem_str].isotopes)
# Create all possible combinations of isotopes
combinations = list(product(*inputs))
# Sort each isotopic combination by element first then isotope
for i, item in enumerate(combinations):
combinations[i] = sorted(item, key=attrgetter("ion.hill_formula", "number"))
# Extract each unique isotopic combination
unique_isos = []
for i in combinations:
if i not in unique_isos:
unique_isos.append(i)
# Calculate the isotopic abundances of the unique isotope combinations
rslts = []
for u_iso in unique_isos:
iso_dict: Dict[str, List[Tuple[Isotope, int]]] = defaultdict(lambda: [])
for iso in u_iso:
if iso in (i[0] for i in iso_dict[iso.ion.hill_formula]):
continue
else:
count = u_iso.count(iso)
iso_dict[iso.ion.hill_formula].append((iso, count))
elem_parts = []
for elem, isos in iso_dict.items():
elem_amount = math.factorial(self.ion.comp_str_dict[elem])
isotope_amounts = n.prod([math.factorial(i[1]) for i in isos])
isotope_abundances = n.prod([i[0].abundance ** (i[1]) for i in isos])
elem_parts.append(elem_amount * isotope_abundances / isotope_amounts)
rslts.append(n.prod(elem_parts))
# Make the Isotope objects
new_isos = []
for isos, abundance in zip(unique_isos, rslts):
number = sum(i.number for i in isos)
mass = sum(i.mass for i in isos) / n.abs(self.ion.charge)
newiso = Isotope(self.ion, number, mass, abundance)
new_isos.append(newiso)
self._init_isotopes_as_manual(new_isos)
assert self._is_unity
def _is_unity(self):
"""
For internal checking when all isotopic abundances equal unity
"""
return n.isclose(sum(i.abundance for i in self._all_isotopes), 1, 1e-10) | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/core/isotopic.py | isotopic.py |
from sys import getsizeof
from functools import lru_cache
import os
import struct
from enum import Enum
from warnings import warn
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from numpy import ndarray
import numpy as n
import numpy.linalg as la
from apav.utils import validate
from apav.utils.helpers import unique_vals, minmax, array2native_byteorder
from apav.core.multipleevent import get_mass_indices
from apav.utils.validate import NoMultiEventError, NoDetectorInfoError, NoTOFError
from apav.core.histogram import histogram1d
from apav.visualization import plotting
from apav.utils.logging import log
_bf = ">f4"
_bi = ">i4"
_lf = "<f4"
_li = "<i4"
class Roi:
"""
High level container for operating on atom probe data sets.
"""
_ato_dtype = n.dtype(
[
("xyz", (_lf, 3)),
("mass", _lf),
("cluster_id", _li),
("pulse_number", _lf),
("dc_voltage", _lf), # kV
("tof", _lf),
("det_x", _lf), # cm
("det_y", _lf), # cm
("pulse_voltage", _lf), # kV
("vvolt", _lf),
("fourier_r", _lf),
("fourier_i", _lf),
]
)
_epos_dtype = [
("xyz", (_bf, 3)),
("mass", _bf),
("tof", _bf),
("dc_voltage", _bf),
("pulse_voltage", _bf),
("det_x", _bf),
("det_y", _bf),
("psl", _bi),
("ipp", _bi),
]
def __init__(self, xyz: ndarray, mass: ndarray, misc: dict = None):
"""
This data structure is the entry point for loading or constructing atom probe data set for use with other
components of APAV. Fictitious atom probe data set can be created by providing the required XYZ and mass/charge
arrays, or by the alternate constructors for loading from common file types. For example:
Manual data set::
>>> xyz = np.array([[1.2, 0.3, 12.6], [-76.2, 45.6, 0.7]])
>>> mass = np.array([12.4, 6.1, 14.9])
>>> fake_data = Roi(xyz, mass)
Load from file::
>>> pos_data = Roi.from_pos("path_to_pos_file.pos") # load a pos file
>>> epos_data = Roi.from_epos("path_to_epos_file.epos") # load a epos file
>>> ato_data = Roi.from_ato("path_to_ato_file.ato") # load an ato file
:param xyz: xyz atom coordinates
:param mass: Mass to charge ratios
:param misc: Dict of other data, (i.e. ions per pulse or detector x pos)
"""
super().__init__()
if not isinstance(xyz, ndarray) or not isinstance(mass, ndarray):
raise TypeError("Mass and xyz coordinates must be numpy arrays")
if len(xyz.shape) != 2 or xyz.shape[1] != 3:
raise ValueError(f"XYZ array is not correct shape {xyz.shape} should be (Mx3)")
if len(mass.shape) != 1:
print("Mass shape", len(mass.shape))
raise ValueError("Mass array must be 1 dimensional")
if xyz.shape[0] != mass.shape[0]:
raise ValueError("XYZ and mass must have the same number of entries (per atom)")
self._filepath = ""
self._xyz = xyz
self._mass = mass
self._misc = misc or {}
# The mask is a 1d array of indices into the raw data (xyz/mass/etc). If the mask is None then
# we use the whole data set, otherwise accessing any of this data will first slice using this mask.
# The mask gets set when creating sub-rois from existing (i.e. a cylinder roi).
self._mask = None
# Delay computing these values until they called for the first time, greatly increases initialization speed
# with large data sets. numpy.unique can be slower than reading and initializing the data in certain cases
self._multiplicities = None
self._multiplicity_counts = None
self._xyz_extents = None
self._dimensions = None
self._from_pos_or_epos = False
@property
def filepath(self) -> str:
"""
Get the file path, if the :class:`Roi` was loaded from a file
"""
return self._filepath
@property
def multiplicities(self) -> ndarray:
"""
Get an array of the sorted multiplicities.
"""
if not self.has_multiplicity_info():
raise NoMultiEventError()
elif self.has_multiplicity_info() and self._multiplicities is None:
self._multiplicities = unique_vals(self.misc["ipp"])
self._multiplicities.sort()
self._multiplicities = self._multiplicities[1:]
return self._multiplicities
@property
def xyz(self) -> n.ndarray:
"""
Get the Mx3 array of the x/y/z positions
"""
if self._mask is None:
return self._xyz
else:
return self._xyz[self._mask]
@property
def mass(self) -> n.ndarray:
"""
Get the Mx1 array of the mass/charge ratios of each position
"""
if self._mask is None:
return self._mass
else:
return self._mass[self._mask]
@property
def misc(self) -> dict:
"""
Get the dictionary of miscellaneous data for each position. This is usually populated
automatically when the :class:`Roi` is create from :meth:`Roi.from_epos` or :meth:`Roi.from_ato`.
"""
if self._mask is None:
return self._misc
else:
return {key: value[self._mask] for key, value in self._misc.items()}
@property
def counts(self) -> int:
"""
Get the total number of detected ions
"""
return self.xyz.shape[0]
@property
def dimensions(self) -> ndarray:
"""
Get the x/y/z dimensions of the dataset
"""
# return tuple(i[1] - i[0] for i in self.xyz_extents)
return n.diff(self.xyz_extents).ravel()
@property
def mass_extents(self) -> Tuple[float, float]:
"""
Get the min and max detected mass/charge ratio
"""
return self.mass.min(), self.mass.max()
@property
def xyz_extents(self) -> Tuple[Tuple[float, float], ...]:
"""
Get the min/max spatial values of the x/y/z coordinates in nm.
"""
if self._xyz_extents is None:
# The 3 major 64-bit operating systems are little endian, so we must byte swap before using the numba
# accelerated minmax function if the roi was originally from a pos or epos file (these files are big endian)
# if self._from_pos_or_epos is True:
# self._xyz_extents = tuple(minmax(self.xyz[:, i].byteswap().view()) for i in range(self.xyz.shape[1]))
# else:
self._xyz_extents = tuple(minmax(self.xyz[:, i]) for i in range(self.xyz.shape[1]))
return self._xyz_extents
@property
def xyz_center(self) -> ndarray:
"""
Get the center of all positions as the mean of all x/y/z values
"""
return n.mean(self.xyz, axis=0)
@property
def detector_extents(self) -> Tuple[Tuple[float, float], Tuple[float, float]]:
"""
Get the min/max spatial values in x/y detector coordinates
"""
if "det_x" not in self.misc.keys():
raise NoDetectorInfoError()
dx = (self.misc["det_x"].min(), self.misc["det_x"].max())
dy = (self.misc["det_y"].min(), self.misc["det_y"].max())
return dx, dy
def has_detailed_info(self) -> bool:
"""
Get if the Roi has any supplementary information available (other than x/y/z/mass-charge).
"""
return bool(len(self.misc))
def has_multiplicity_info(self) -> bool:
"""
Get if the Roi has multiple detector event information
"""
return "ipp" in self.misc.keys()
def has_tof_info(self) -> bool:
"""
Get if the Roi has time of flight information
"""
return "tof" in self.misc.keys()
@classmethod
def from_apt(cls, filepath: str, verbose: bool = False):
"""
Read the contents of an apt file into a Roi container
:param filepath: Path to apt file
:param verbose: Print the structure of the apt file as it is read (for debug purposes)
"""
validate.file_exists(filepath)
log.info("Reading apt file: {}".format(filepath))
class RelType(Enum):
REL_UNKNOWN = 0
ONE_TO_ONE = 1
INDEXED = (2,)
UNRELATED = 3
ONE_TO_MANY = 4
class RecordType(Enum):
RT_UNKNOWN = 0
FIXED_SIZE = 1
VARIABLE_SIZE = 2
VARIABLE_INDEXED = 3
class RecordDataType(Enum):
DT_UNKNOWN = 0
INT = 1
UINT = 2
FLOAT = 3
CHARSTRING = 4
OTHER = 5
class Dtype(Enum):
int32 = 4
int64 = 8
char = 1
wchar_t = 2
filetime = 8
def record_dtype2numpy_dtype(rec_dtype: RecordDataType, size: int):
"""
Map a section's record data type to its equivalent numpy dtype
"""
if rec_dtype in (RecordDataType.UINT, RecordDataType.CHARSTRING):
raise ValueError("Cannot map to UINT or CHARSTRING")
int_map = {8: n.int8, 16: n.int16, 32: n.int32, 64: n.int64}
float_map = {32: n.float32, 64: n.float64}
if rec_dtype == RecordDataType.INT:
return int_map[size]
elif rec_dtype == RecordDataType.FLOAT:
return float_map[size]
else:
raise ValueError(f"Unexpected record data type {rec_dtype}")
# Maps the apt format data type to str format needed for struct.unpack
dtype2fmt = {Dtype.int32: "i", Dtype.int64: "q", Dtype.char: "c", Dtype.filetime: "Q", Dtype.wchar_t: "c"}
# Maps the apt format data type to python data type
dtype2constr = {
Dtype.int32: int,
Dtype.int64: int,
Dtype.char: lambda x: x.decode("utf-8"),
Dtype.wchar_t: lambda x: x.decode("utf-16"),
Dtype.filetime: int,
}
with open(filepath, "rb") as dat:
def read_chunk(dtype: Dtype, count: int = 1, start: Union[None, int] = None) -> Union[Tuple[Any], Any]:
if isinstance(start, int):
dat.seek(start)
fmt = dtype2fmt[dtype] * count
constructor = dtype2constr[dtype]
dtype_size = dtype.value
if dtype in (Dtype.wchar_t, Dtype.char):
return constructor(dat.read(dtype_size * count)).replace("\x00", "")
else:
retn = struct.unpack("<" + fmt, dat.read(dtype_size * count))
if len(retn) == 1:
return constructor(retn[0])
else:
return tuple(constructor(i) for i in retn)
cSignature = read_chunk(Dtype.char, 4)
# Read the APT file header --------------------------------------------------------------------------------
iHeaderSize = read_chunk(Dtype.int32)
iHeaderVersion = read_chunk(Dtype.int32)
wcFileName = read_chunk(Dtype.wchar_t, 256)
ftCreationTime = read_chunk(Dtype.filetime)
llIonCount = read_chunk(Dtype.int64)
if verbose:
print(f"\nReading header of {filepath}")
print(f"\tcSignature: " + cSignature)
print(f"\tiHeaderSize: {iHeaderSize}")
print(f"\tiHeaderVersion: {iHeaderVersion}")
print(f"\twcFileName: {wcFileName}")
print(f"\tftCreationTime: {ftCreationTime}")
print(f"\t11IonCount: {llIonCount}")
# Read the APT sections ----------------------------------------------------------------------------
section_start = iHeaderSize
section_data = {}
while True:
sec_sig = read_chunk(Dtype.char, 4, section_start)
if sec_sig == "":
# EOF reached
break
# Flag used to not include a section in the Roi when a configuration
# situation is not implemented or handled
skip_sec = False
sec_header_size = read_chunk(Dtype.int32)
sec_header_ver = read_chunk(Dtype.int32)
sec_type = read_chunk(Dtype.wchar_t, 32)
sec_ver = read_chunk(Dtype.int32)
sec_rel_type = RelType(read_chunk(Dtype.int32))
is_one_to_one = sec_rel_type == RelType.ONE_TO_ONE
if not is_one_to_one:
warn(f'APAV does not handle REL_TYPE != ONE_TO_ONE, section "{sec_type}" will be ignored')
skip_sec = True
sec_rec_type = RecordType(read_chunk(Dtype.int32))
is_fixed_size = sec_rec_type == RecordType.FIXED_SIZE
if not is_fixed_size:
warn(f'APAV does not handle RECORD_TYPE != FIXED_SIZE, section "{sec_type}" will be ignored')
skip_sec = True
sec_rec_dtype = RecordDataType(read_chunk(Dtype.int32))
if sec_rec_dtype in (RecordDataType.DT_UNKNOWN, RecordDataType.OTHER, RecordDataType.CHARSTRING):
warn(f'APAV does not handle RECORD_TYPE == {sec_rec_dtype}, section "{sec_type}" will be ignored')
skip_sec = True
sec_dtype_size = read_chunk(Dtype.int32)
sec_rec_size = read_chunk(Dtype.int32)
sec_data_unit = read_chunk(Dtype.wchar_t, 16)
sec_rec_count = read_chunk(Dtype.int64)
sec_byte_count = read_chunk(Dtype.int64)
if verbose:
print("\nReading new section")
print(f"\tSection header sig: {sec_sig}")
print(f"\tSection header size: {sec_header_size}")
print(f"\tSection header version: {sec_header_ver}")
print(f"\tSection type: {sec_type}")
print(f"\tSection version: {sec_ver}")
print(f"\tSection relative type: {sec_rel_type}")
print(f"\tSection record type: {sec_rec_type}")
print(f"\tSection record data type: {sec_rec_dtype}")
print(f"\tSection data type size (bits): {sec_dtype_size}")
print(f"\tSection record size: {sec_rec_size}")
print(f"\tSection data type unit: {sec_data_unit}")
print(f"\tSection record count: {sec_rec_count}")
print(f"\tSection byte count: {sec_byte_count}")
if not skip_sec:
columns = int(sec_rec_size / (sec_dtype_size / 8))
records = int(sec_rec_count)
count = records * columns
in_data = n.fromfile(
filepath,
record_dtype2numpy_dtype(sec_rec_dtype, sec_dtype_size),
count,
offset=section_start + sec_header_size,
)
if columns > 1:
section_data[sec_type] = in_data.reshape(records, columns)
else:
section_data[sec_type] = in_data
section_start = section_start + sec_byte_count + sec_header_size
has_mass_data = "Mass" in section_data.keys()
has_pos_data = "Position" in section_data.keys()
# Map some APT section names to those that APAV expects, otherwise the provided name is retained
name_map = {
"Multiplicity": "ipp",
"Time of Flight": "tof",
"XDet_mm": "det_x",
"YDet_mm": "det_y",
"Voltage": "dc_voltage",
"Pulse Voltage": "pulse_voltage",
}
# Require mass and position data, clean up some sections, and account for possible duplicate sections (i.e.
# XDet_mm + YDet_mm combined with Detector Coordinates
if not has_mass_data:
raise AttributeError("APT file must have include a mass section")
elif not has_pos_data:
raise AttributeError("APT file must have include a position section")
mass = section_data.pop("Mass")
pos = section_data.pop("Position")
# There are 2 difference ways that detector space coordinates can be included in an apt file, as a single
# section containing both x/y or the x and y in separate sections. Only when the separate x/y sections are not
# present we will load the combined x/y data (which we separate into different x and y arrays).
if "Detector Coordinates" in section_data.keys():
temp = section_data.pop("Detector Coordinates")
if "XDet_mm" not in section_data.keys():
section_data["det_x"] = temp[:, 0]
if "YDet_mm" not in section_data.keys():
section_data["det_y"] = temp[:, 1]
roi = cls(pos, mass)
roi._filepath = filepath
for key, data in section_data.items():
name = key if key not in name_map.keys() else name_map[key]
roi.misc[name] = data
return roi
@classmethod
def from_pos(cls, filepath: str):
"""
Read the contents of a pos file into a Roi container
:param filepath: Path to pos file
"""
validate.file_exists(filepath)
log.info("Reading pos file: {}".format(filepath))
dtype = n.dtype(">f4")
data = n.fromfile(filepath, dtype=dtype, sep="")
data.shape = (int(data.size / 4), 4)
# Data in epos files are big endian which most operating systems are not. Convert to native byte-order
# to prevent errors in compiled c extensions
data = array2native_byteorder(data)
retn = cls(data[:, :3], data[:, 3])
retn._filepath = filepath
retn._from_pos_or_epos = True
return retn
@classmethod
def from_epos(cls, filepath: str):
"""
Read the contents of an extended pos file into an Roi container. Suitable for multiple-hit analysis.
:param filepath: Path to epos file
"""
validate.file_exists(filepath)
log.info("Reading epos file: {}".format(filepath))
data = n.fromfile(filepath, dtype=n.dtype(Roi._epos_dtype))
# Data in epos files are big endian which most operating systems are not. Convert to little endian
# to prevent errors in compiled c extensions
data = array2native_byteorder(data)
retn = cls(
data["xyz"],
data["mass"],
{
"tof": data["tof"],
"dc_voltage": data["dc_voltage"],
"pulse_voltage": data["pulse_voltage"],
"det_x": data["det_x"],
"det_y": data["det_y"],
"psl": data["psl"],
"ipp": data["ipp"].astype(n.uint8),
},
)
retn._filepath = filepath
retn._from_pos_or_epos = True
return retn
@classmethod
def from_ato(cls, filepath: str):
"""
Read the contents of an extended ato file into an Roi container. Suitable for multiple-hit analysis.
:param filepath: Path to ato file
"""
validate.file_exists(filepath)
log.info("Reading ato file: {}".format(filepath))
with open(filepath, "rb") as ato:
ato.seek(8, os.SEEK_SET)
f = "<f4"
i = "<i4"
data = n.fromfile(ato, dtype=Roi._ato_dtype)
data = array2native_byteorder(data)
# Process some data to make units consistent and other cleaning
pulsen = data["pulse_number"].copy().astype(n.int64)
diff = n.diff(data["pulse_number"])
switch_idx = n.argwhere(diff < 0).ravel()
begin = n.concatenate((n.array([0]), switch_idx + 1))
end = n.concatenate((switch_idx, pulsen.shape))
data["dc_voltage"] *= 1000 # to volts
data["pulse_voltage"] *= 1000 # to volts
data["tof"] *= 1e-3 # to nanoseconds
for j, startstop in enumerate(zip(begin, end)):
start, stop = startstop
pulsen[start : stop + 1] += j * 2**24
data["pulse_number"] = pulsen
retn = cls(
data["xyz"],
data["mass"],
{
"tof": data["tof"],
"dc_voltage": data["dc_voltage"],
"pulse_voltage": data["pulse_voltage"],
"pulse_number": data["pulse_number"],
"det_x": data["det_x"],
"det_y": data["det_y"],
},
)
retn._filepath = filepath
return retn
def to_pos(self, filepath: str):
"""
Write the roi to a standard pos file
:param filepath: path of the pos file
"""
validate.dir_is_writable(filepath)
log.info("Writing pos file to: {}".format(filepath))
dtype = n.dtype(">f4")
out = n.hstack([self.xyz, self.mass[None].T]).astype(dtype)
out.tofile(filepath, sep="")
def to_epos(self, filepath: str):
"""
Write the roi to a standard extended pos file
:param filepath: path of the epos file
"""
validate.dir_is_writable(filepath)
epos_misc_entries = ["tof", "dc_voltage", "pulse_voltage", "det_x", "det_y", "psl", "ipp"]
if not all(i in self.misc.keys() for i in epos_misc_entries):
raise AttributeError("Roi does not contain the required misc keys to write to epos")
log.info("Writing epos file to: {}".format(filepath))
out = n.zeros(int(self.counts), dtype=Roi._epos_dtype)
out["xyz"] = self.xyz
out["mass"] = self.mass
out["tof"] = self.misc["tof"]
out["dc_voltage"] = self.misc["dc_voltage"]
out["pulse_voltage"] = self.misc["pulse_voltage"]
out["det_x"] = self.misc["det_x"]
out["det_y"] = self.misc["det_y"]
out["psl"] = self.misc["psl"]
out["ipp"] = self.misc["ipp"]
out.tofile(filepath, sep="")
def memory_size(self) -> float:
"""
Get the approximate memory footprint in Mb
"""
val = getsizeof(self)
val += self.xyz.nbytes
val += self.mass.nbytes
for i in self.misc.values():
val += i.nbytes
return round(val / 1024**2, 3)
def multiplicity_counts(self) -> Tuple[ndarray, ndarray]:
"""
Get the statistical count of each degree of multiple-detector events
"""
if not self.has_multiplicity_info():
raise validate.NoMultiEventError()
if self._multiplicity_counts is None:
if self.multiplicities.size == 0:
return n.array([]), n.array([])
counts = n.zeros(self.multiplicities.size)
for i, j in enumerate(self.multiplicities):
counts[i] = get_mass_indices(self.misc["ipp"], j).size
self._multiplicity_counts = self.multiplicities, counts
return self._multiplicity_counts
def multiplicity_percentage(self) -> Tuple[ndarray, ndarray]:
"""
Get the statistical percentage of each degree of multiple-detector events
"""
mult, counts = self.multiplicity_fraction()
return mult, counts * 100
def multiplicity_fraction(self) -> Tuple[ndarray, ndarray]:
"""
Get the statistical fraction of each degree of multiple-detector events
"""
mult, counts = self.multiplicity_counts()
return mult, counts / counts.sum()
@lru_cache(50)
def tof_histogram(
self,
bin_width: Number = 1,
multiplicity: Union[str, int] = "all",
norm: Union[bool, Tuple[Number, Number]] = False,
cutoff: float = 2000,
) -> (ndarray, ndarray):
"""
Get the time-of-flight histogram of the given Roi. This function is cached to increase speed under repeated
calls.
:param bin_width: Bin width in Da
:param multiplicity: The portion of multiplicity to generate the histogram from. "All" for all hits, int >= 1
for a specific multiplicity, "multiples" for all multiple hits.
:param norm: Normalize the histogram to unity. True/False to generate normalization constant from the whole
spectrum, or Tuple[float, float] to generate normalization constant from a range on the spectrum.
:param cutoff: Maximum time of flight value to generate the histogram
"""
self.require_tof_info()
validate.multiplicity_any(multiplicity)
validate.positive_nonzero_number(bin_width)
validate.positive_nonzero_number(cutoff)
if multiplicity != "all":
self.require_multihit_info()
extents = (0, cutoff)
# If multi-hit information is available
if multiplicity == "all":
counts, edges = histogram1d(self.misc["tof"], bin_width, extents)
else:
idxs = get_mass_indices(self.misc["ipp"], multiplicity)
counts, edges = histogram1d(self.misc["tof"][idxs], bin_width, extents)
# Normalization
norm_val = 1
if norm is True:
norm_val = counts.max()
elif isinstance(norm, (tuple, list)):
# Range base normalization
if len(norm) != 2 or (norm[1] <= norm[0]) or any(i < 0 for i in norm):
raise ValueError("Invalid normalization range")
norm_idx = n.argwhere((edges >= norm[0]) & (edges <= norm[1]))
norm_val = counts[norm_idx].upper()
counts /= norm_val
return edges, counts
@lru_cache(50)
def mass_histogram(
self,
bin_width: Number = 0.05,
lower: Number = 0,
upper: Number = 200,
multiplicity: Union[str, int] = "all",
norm: Union[bool, Tuple[Number, Number]] = False,
) -> (ndarray, ndarray):
"""
Get the mass/charge ratio histogram of the given Roi. This function is cached to increase speed under repeated
calls.
:param bin_width: Bin width in daltons
:param lower: Minimum mass/charge ratio
:param upper: Minimum mass/charge ratio
:param multiplicity: The portion of multiplicity to generate the histogram from. "All" for all hits, int >= 1
for a specific multiplicity, "multiples" for all multiple hits.
:param norm: Normalize the histogram to unity. True/False to generate normalization constant from the whole
spectrum, or Tuple[float, float] to generate normalization constant from a range on the spectrum.
"""
validate.multiplicity_any(multiplicity)
validate.positive_nonzero_number(bin_width)
validate.positive_interval((lower, upper))
if multiplicity != "all":
self.require_multihit_info()
extents = (lower, upper)
# If multi-hit information is available
if multiplicity == "all":
counts, centers = histogram1d(self.mass, bin_width, extents)
else:
idxs = get_mass_indices(self.misc["ipp"], multiplicity)
counts, centers = histogram1d(self.mass[idxs], bin_width, extents)
# Normalization
norm_val = 1
if norm is True:
norm_val = counts.max()
elif isinstance(norm, (tuple, list)):
# Range base normalization
if len(norm) != 2 or (norm[1] <= norm[0]) or any(i < 0 for i in norm):
raise ValueError("Invalid normalization range")
norm_idx = n.argwhere((centers >= norm[0]) & (centers <= norm[1]))
norm_val = counts[norm_idx].upper()
counts /= norm_val
return centers, counts
def plot_mass_spectrum(self):
"""
Get an interactive plot of the mass spectrum of the Roi.
"""
return plotting.MassSpectrumPlot(self)
def require_multihit_info(self):
"""
Use when a function/argument requires multiple hit information
"""
if not self.has_multiplicity_info():
raise NoMultiEventError()
def require_detector_info(self):
"""
Use when a function/argument requires detector information
"""
if any(i not in self.misc.keys() for i in ("det_x", "det_y")):
raise NoDetectorInfoError()
def require_tof_info(self):
"""
Use when a function/argument requires time of flight information
"""
if "tof" not in self.misc:
raise NoTOFError()
class RoiSubsetType(Roi):
"""
For generating Roi instances from subsets of existing Roi. Also set certain restriction that otherwise
would not make sense (such as file loading methods).
"""
@classmethod
def from_ato(cls, filepath: str):
"""
Cannot load data into subset types
"""
raise Exception("Loading files from roi subset types is not allowed")
@classmethod
def from_pos(cls, filepath: str):
"""
Cannot load data into subset types
"""
raise Exception("Loading files from roi subset types is not allowed")
@classmethod
def from_epos(cls, filepath: str):
"""
Cannot load data into subset types
"""
raise Exception("Loading files from roi subset types is not allowed")
class RoiRectPrism(RoiSubsetType):
"""
Creates a new roi from an existing roi, containing ions within a rectangular prism
"""
def __init__(self, parent: Roi, center: Tuple[Number, Number, Number], widths: Tuple[Number, Number, Number]):
"""
:param parent: Parent Roi to generate the subset from
:param center: Geometric center to place the rectangular prism
:param widths: x, y, z lengths of the rectangular prism
"""
self._parent = parent
self._center = center
self._widths = validate.all_positive_nonzero(widths)
super().__init__(parent.xyz, parent.mass, misc=parent.misc)
self._from_pos_or_epos = self._parent._from_pos_or_epos
xc, yc, zc = center
dx, dy, dz = [width / 2 for width in widths]
# Axis boundaries
xext, yext, zext = (xc - dx, xc + dx), (yc - dy, yc + dy), (zc - dz, zc + dz)
xyz = self.xyz
idx = n.argwhere((xext[0] < xyz[:, 0]) & (xyz[:, 0] < xext[1])).ravel()
idx = idx[n.argwhere((yext[0] < xyz[idx][:, 1]) & (xyz[idx][:, 1] < yext[1])).ravel()]
idx = idx[n.argwhere((zext[0] < xyz[idx][:, 2]) & (xyz[idx][:, 2] < zext[1])).ravel()]
log.debug("Created {} with:\n\tcenter = {}\n\twidths = {}".format(self.__class__, center, widths))
self._mask = idx
class RoiSphere(RoiSubsetType):
"""
Creates a new roi from an existing roi, containing ions within a sphere
"""
def __init__(self, parent: Roi, center: Tuple[Number, Number, Number], radius: Number):
"""
:param parent: Parent Roi to generate the subset from
:param center: Geometric center of the sphere
:param radius: Radius of the sphere
"""
self._parent = parent
self._center = center
self._radius = validate.positive_nonzero_number(radius)
super().__init__(parent.xyz, parent.mass, misc=parent.misc)
self._from_pos_or_epos = self._parent._from_pos_or_epos
xc, yc, zc = center
# Axis boundaries
xext, yext, zext = (xc - radius, xc + radius), (yc - radius, yc + radius), (zc - radius, zc + radius)
xyz = self.xyz
# First filter out everything outside the bounding box
idx = n.argwhere((xext[0] < xyz[:, 0]) & (xyz[:, 0] < xext[1])).ravel()
idx = idx[n.argwhere((yext[0] < xyz[idx][:, 1]) & (xyz[idx][:, 1] < yext[1])).ravel()]
idx = idx[n.argwhere((zext[0] < xyz[idx][:, 2]) & (xyz[idx][:, 2] < zext[1])).ravel()]
# Then filter out everything not within the sphere radius
r = la.norm((xyz[idx] - center), axis=1)
idx = idx[n.argwhere(r < radius).ravel()]
log.debug(f"Created {self.__class__} with:\n\tcenter = {center}\n\tradius = {radius}")
self._mask = idx
class RoiCylinder(RoiSubsetType):
"""
Creates a new roi from an existing roi, containing ions within a cylinder
"""
def __init__(
self, parent: Roi, center: Tuple[Number, Number, Number], radius: Number, height: Number, axis: str = "z"
):
"""
:param parent: Parent Roi to generate the subset from
:param center: Geometric center to place the cylinder
:param radius: Radius of the cylinder
:param height: Height of the cylinder
:param axis: Axis to orient the cylinder. Either "x", "y", or "z"
"""
self._parent = parent
self._center = center
self._radius = validate.positive_nonzero_number(radius)
self._height = validate.positive_nonzero_number(height)
self._axis = validate.choice(axis, ("x", "y", "z"))
super().__init__(parent.xyz, parent.mass, misc=parent.misc)
self._from_pos_or_epos = self._parent._from_pos_or_epos
xc, yc, zc = center
axis_map = {"x": 0, "y": 1, "z": 2}
# index of the axial direction
axial_idx = axis_map[axis]
# Indices of the non-axial directions
non_axial_idx = [i for i in range(3) if i != axial_idx]
# Map the axis to the value corresponding to the difference from the center to that axes' outer boundary
diff_map = {ax: radius if ax != axis else height / 2 for ax in ("x", "y", "z")}
# Axis boundaries
xext, yext, zext = (
(xc - diff_map["x"], xc + diff_map["x"]),
(yc - diff_map["y"], yc + diff_map["y"]),
(zc - diff_map["z"], zc + diff_map["z"]),
)
xyz = self.xyz
# First filter out everything outside of the bounding box of the cylinder
idx = n.argwhere((xext[0] < xyz[:, 0]) & (xyz[:, 0] < xext[1])).ravel()
idx = idx[n.argwhere((yext[0] < xyz[idx][:, 1]) & (xyz[idx][:, 1] < yext[1])).ravel()]
idx = idx[n.argwhere((zext[0] < xyz[idx][:, 2]) & (xyz[idx][:, 2] < zext[1])).ravel()]
# Then filter out everything not within the cylinder radius
r = la.norm((xyz[idx] - center)[:, non_axial_idx], axis=1)
idx = idx[n.argwhere(r < radius).ravel()]
log.debug(
f"Created {self.__class__} with:\n\tcenter = {center}\n\tradius = {radius}\n\theight = {height}\n\taxis = {axis}"
)
self._mask = idx
class DummyRoiHistogram(Roi):
"""
This is a dummy roi class with an explicitly specified constant mass spectrum mass_histogram. This is used when an
analysis needs to bypass loading the pos/epos data, such as when doing a MassSpectrum analysis on a mass spectrum
from a csv file. One may choose to do this to avoid the cost of repeatedly loading very large datasets.
"""
def __init__(self, x: ndarray, y: ndarray):
"""
:param x: the x values of the mass histogram
:param y: the y values of the mass histogram
"""
super().__init__(n.array([[0, 0, 0]]), n.array([0]))
self.__histogram = (x, y)
def mass_histogram(self, *args, **kwargs):
"""
Override :meth:`Roi.mass_histogram()` to always return the specified mass histogram
"""
return self.__histogram | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/core/roi.py | roi.py |
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
from apav.core.isotopic import Element
from collections import OrderedDict
from configparser import ConfigParser
import copy
from tabulate import tabulate
import numpy as n
from apav.utils import helpers, validate
import apav as ap
from apav.utils.logging import log
class Range:
"""
A single mass spectrum range
"""
__next_id = 0
def __init__(
self,
ion: Union["ap.Ion", str],
minmax: Tuple[Number, Number],
vol: Number = 1,
color: Tuple[Number, Number, Number] = (0, 0, 0),
):
"""
Define a singular mass spectrum range composed of a composition, interval, volume, and color. i.e.
Created as:
>>> cu = Range("Cu", (62, 66), color=(0.5, 1, 0.25))
:param ion: the range composition
:param minmax: (min, max) tuple of the mass spectrum range
:param vol: the "volume" of the atom used during reconstruction
:param color: the color as RGB fractions
"""
super().__init__()
if any(i < 0 for i in (minmax[0], minmax[1])):
raise ValueError("Range limits cannot be negative")
elif minmax[0] >= minmax[1]:
raise ValueError("Range lower bound cannot be larger than range upper bound")
if isinstance(ion, str):
ion = ap.Ion(ion)
elif not isinstance(ion, (ap.Ion, str)):
raise TypeError(f"Range ion must be type Ion or string, not {type(ion)}")
self._ion = ion
self._lower = validate.positive_number(minmax[0])
self._upper = validate.positive_nonzero_number(minmax[1])
self._color = validate.color_as_rgb(color)
self._vol = validate.positive_number(vol)
self._id = Range.__next_id
Range.__next_id += 1
def __contains__(self, mass: float) -> bool:
"""
Be able test if range contains a mass ratio
"""
return self.contains_mass(mass)
def __repr__(self):
retn = f"Range: {self.hill_formula},"
col = [round(i, 2) for i in self.color]
retn += f" Min: {self.lower}, Max: {self.upper}, Vol: {self.vol}, Color: {col}"
return retn
def __eq__(self, other: "Range"):
if not isinstance(other, Range):
return NotImplemented
if other.ion == self.ion and n.isclose(other.lower, self.lower) and n.isclose(other.upper, self.upper):
return True
else:
return NotImplemented
@property
def id(self) -> int:
return self._id
@property
def lower(self) -> Number:
"""
Get the lower (closed) boundary of the range
"""
return self._lower
@lower.setter
def lower(self, new: Number):
"""
Set the lower (closed) boundary of the range
:param new:
:return:
"""
validate.positive_number(new)
if new >= self._upper:
raise ValueError(f"Lower bound for {self.ion} ({new}) cannot be >= upper bound ({self.upper})")
self._lower = new
@property
def upper(self) -> Number:
"""
Get the upper (open) boundary of the range
"""
return self._upper
@upper.setter
def upper(self, new: Number):
"""
Set the upper (open) boundary of the range
"""
validate.positive_number(new)
if new <= self._lower:
raise ValueError(f"Upper bound for {self.ion} ({new}) cannot be <= lower bound ({self.lower})")
self._upper = new
@property
def color(self) -> Tuple[Number, Number, Number]:
"""
Get the color of the range as (R, G, B) tuple. Values range from 0-1
"""
return self._color
@color.setter
def color(self, new: Tuple[Number, Number, Number]):
"""
Set the color of the range. Color must be a Tuple(reg, green, blue) where RGB values are between 0-1
"""
self._color = validate.color_as_rgb(new)
@property
def interval(self) -> Tuple[Number, Number]:
"""
Get the (min, max) interval defined the mass spectrum range
"""
return self.lower, self.upper
@property
def vol(self) -> Number:
"""
Get the volume of the range
"""
return self._vol
@vol.setter
def vol(self, new: Number):
"""
Set the volume of the range
:param new: the new volume
"""
self._vol = validate.positive_nonzero_number(new)
def num_elems(self) -> int:
"""
Get the number of unique elements of the range composition
"""
return len(self.ion.elements)
@property
def ion(self) -> "ap.Ion":
"""
Get a tuple of the elements that compose this range
"""
return self._ion
@ion.setter
def ion(self, new: Union["ap.Ion", str]):
"""
Set the composition of the range
:param new: the new composition
"""
if not isinstance(new, (str, ap.Ion)):
raise TypeError(f"Expected type Ion or string not {type(new)}")
if isinstance(new, str):
self._ion = ap.Ion(new)
else:
self._ion = new
@property
def hill_formula(self) -> str:
"""
Get the range composition as a string
"""
return self.ion.hill_formula
@property
def formula(self) -> str:
"""
Get the range composition as a string
"""
return self.ion.hill_formula.replace(" ", "")
def intersects(self, rng: "Range"):
"""
Determine if the range intersects a given :class:`Range`
"""
if self.lower <= rng.lower < self.upper:
return True
elif self.lower < rng.upper < self.upper:
return True
else:
return False
def contains_mass(self, mass: Number) -> bool:
"""
Test if the given mass/charge ratio is contained within range's bounds
:param mass: mass/charge ratio
"""
validate.positive_number(mass)
return self.lower <= mass < self.upper
class RangeCollection:
"""
Operations on multiple ranges
"""
def __init__(self, ranges: Sequence[Range] = ()):
"""
Maintain and operate on a collection of ranges that describe the peaks in a mass spectrum. This is the principle
class used for mass spectrum range definitions. A collection may be created by manually supplying the Range
objects through the constructor, or 1 by 1 through :meth:`RangeCollection.add`. A :class:`RangeCollection` may also
be created using the alternate constructors :meth:`RangeCollection.from_rng` and
:meth:`RangeCollection.from_rrng` to import the ranges from the two common range file types.
A :class:`RangeCollection` can be created as:
>>> rng_lst = [Range("Cu", (62.5, 63.5)), Range("Cu", (63.5, 66))]
>>> rngs = RangeCollection(rng_list)
Or 1 by 1 as:
>>> rngs = RangeCollection()
>>> rngs.add(Range("Cu", (62.5, 63.5)))
>>> rngs.add(Range("Cu", (63.5, 66)))
:param ranges: sequence of Range objects
"""
if not all(isinstance(i, Range) for i in ranges):
raise TypeError("Cannot create RangeCollection from non-Range objects")
self._ranges = list(ranges)
self.__index = 0
self._filepath = ""
def __iter__(self):
self.__index = 0
return self
def __next__(self) -> Range:
if len(self._ranges) == 0:
raise StopIteration
elif self.__index == len(self._ranges):
self.__index = 0
raise StopIteration
else:
self.__index += 1
return self._ranges[self.__index - 1]
def __len__(self) -> int:
return len(self._ranges)
def __repr__(self):
retn = "RangeCollection\n"
retn += f"Number of ranges: {len(self)}\n"
ranges = self.sorted_ranges()
if len(self) > 0:
min, max = ranges[0].lower, ranges[-1].upper
else:
min = ""
max = ""
retn += f"Mass range: {min} - {max}\n"
retn += f"Number of unique elements: {len(self.elements())}\n"
retn += f"Elements: {', '.join(elem.symbol for elem in self.elements())}\n\n"
data = [(i.hill_formula, i.lower, i.upper, i.vol, [round(j, 2) for j in i.color]) for i in self.sorted_ranges()]
head = ("Composition", "Min (Da)", "Max (Da)", "Volume", "Color (RGB 0-1)")
table = tabulate(data, headers=head)
retn += table
return retn
@property
def filepath(self) -> str:
"""
Get the file path the :class:`RangeCollection` was created from, if it was imported from a file
"""
return self._filepath
@property
def ranges(self) -> List[Range]:
"""
Get a copy of the ranges in the RangeCollection. This returns a copy to prevent accidental modification
of the underlying ranges possibly resulting in overlapping ranges.
Instead, remove the old range with RangeCollection.remove_by_mass() and add the new one, or use
RangeCollection.replace()
"""
return copy.deepcopy(self._ranges)
@classmethod
def from_rrng(cls, fpath: str):
"""
Build RangeCollection from \*.rrng files
:param fpath: filepath
"""
retn = cls()
retn._filepath = validate.file_exists(fpath)
log.info("Reading RRNG file: {}".format(fpath))
conf = ConfigParser()
conf.read(fpath)
nions = int(conf["Ions"]["Number"])
nranges = int(conf["Ranges"]["number"])
elems = [conf["Ions"]["ion" + str(i)] for i in range(1, nions + 1)]
for i in range(1, nranges + 1):
line = conf["Ranges"]["Range" + str(i)].split()
# IVAS saves unknown elements with a name field and not composition, skip these
if any("Name" in i for i in line):
continue
rmin = float(line.pop(0))
rmax = float(line.pop(0))
# The rest can be converted to a dictionary easily
vars = OrderedDict([item.split(":") for item in line])
vol = float(vars.pop("Vol"))
col = helpers.hex2rgbF(vars.pop("Color"))
# Now the rest should be ions
assert all(i in elems for i in vars.keys())
# vars = OrderedDict([(i, int(j)) for i, j in vars.items()])
comp_str = "".join(i + str(j) for i, j in vars.items())
retn.add(Range(comp_str, (rmin, rmax), vol, col))
return retn
@classmethod
def from_rng(cls, filepath: str):
"""
Build RangeCollection from a .rng file
:param filepath: filepath
"""
raise NotImplementedError()
def clear(self):
"""
Remove all Ranges from the RangeCollection
"""
self._ranges = []
def add(self, new: Range):
"""
Add a new :class:`Range` to the :class:`RangeCollection`
:param new: the new :class:`Range`
:return:
"""
if not isinstance(new, Range):
raise TypeError(f"Can only add Range types to RangeCollection not {type(new)}")
else:
for r in self.ranges:
if r.intersects(new):
raise ValueError("Mass ranges cannot coincide")
self._ranges.append(new)
return new
def remove_by_mass(self, mass: float):
"""
Remove a range overlapping the given mass ratio
"""
validate.positive_number(mass)
for i in self._ranges:
if i.lower <= mass < i.upper:
self._ranges.remove(i)
def replace(self, old_rng: Range, new_rng: Range):
"""
Replace an existing Range with a new one. Throws an error if the range is not found.
:param old_rng: Range to be replaced
:param new_rng: New range
"""
for i, rng in enumerate(self._ranges):
if rng == old_rng:
self._ranges[i] = new_rng
return
raise IndexError(f"RangeCollection does not contain {old_rng}")
def ions(self) -> Tuple["ap.Ion", ...]:
"""
Get a tuple of all ions
"""
return tuple(set([i.ion for i in self.ranges]))
def elements(self) -> Tuple[Element]:
"""
Get a tuple of all elements
"""
allelems = []
for rng in self:
elems = [i for i in rng.ion.elements]
allelems += elems
return tuple(set(allelems))
def sorted_ranges(self) -> list:
"""
Get the list of range objects sorted in ascending mass range
"""
return sorted(self._ranges, key=lambda x: x.lower)
def check_overlap(self) -> Union[Tuple, Tuple[float, float]]:
"""
Check if any ranges in the RangeCollection overlap. This returns the first overlap found, not all
overlaps. This is provided if Ranges are being directly accessed and modified
"""
for i, r1 in enumerate(self.ranges):
for j, r2 in enumerate(self.ranges):
if j <= i:
continue
else:
if r1.intersects(r2):
return r1, r2
return ()
def find_by_mass(self, mass: float) -> Range:
"""
Get the range that contains the given m/q
"""
retn = None
for range in self.ranges:
if mass in range:
retn = range
if retn is not None:
return retn
else:
raise ValueError(f"No range containing {mass} exists") | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/core/range.py | range.py |
from typing import Sequence, Tuple, List, Dict, Any, Union, Type, Optional, TYPE_CHECKING
from numbers import Real, Number
import os
from os.path import abspath, dirname, join
import numpy as n
import apav.utils.helpers as helpers
int_types = (int, n.int64, n.int32, n.int16, n.int8)
class NoMultiEventError(Exception):
"""
Raise when an operation requires multiple hit information (i.e. originates from an epos file) but
that information is not available
"""
def __init__(self):
super().__init__("Roi has no multiple-event information")
class NoDetectorInfoError(Exception):
"""
Raise when an operation requires detector specific information (i.e. originates from an epos file) but
that information is not available
"""
def __init__(self):
super().__init__("Roi has no detector coordinate information")
class NoTOFError(Exception):
"""
Raise when an operation requires time-of-flight information (i.e. originates from an epos file) but
that information is not available
"""
def __init__(self):
super().__init__("Roi has no time-of-flight information")
class AbstractMethodError(Exception):
"""
Raise when a call to an abstract method
"""
def __init__(self):
super().__init__("Call to abstract method is not allowed")
class IntervalIntersectionError(Exception):
"""
Raise when two intervals intersect, but shouldn't
"""
def __init__(self, msg: str = None):
if msg is None:
msg = f"Intersection between intervals is not allowed"
super().__init__(msg)
class IonTypeError(Exception):
"""
Raise when an :class:`Ion` was expected but not provided
"""
def __init__(self, other):
super().__init__(f"Expected an Ion type not {type(other)}")
def boolean(val) -> bool:
"""
Validate a boolean value, only bool allowed not 0 or 1
:param val: the boolean value to validate
"""
if val not in (True, False):
raise TypeError(f"{val} is not a boolean value")
else:
return val
def dir_is_writable(filepath: str) -> str:
"""
Validate whether a directory is writable
:param filepath: path to test writability (can be dir path or file path)
"""
temp_name = "__test_path_is_writable_123"
temp_path = join(abspath(dirname(filepath)), temp_name)
try:
with open(temp_path, "w") as file:
file.write("This path is writable")
except Exception as e:
raise e
finally:
if os.path.exists(temp_path):
os.remove(temp_path)
return filepath
def is_type(val, kind):
if isinstance(val, kind):
return val
else:
raise TypeError("Expected type {} not {}".format(kind, type(val)))
def file_exists(fpath: str) -> str:
"""
Validate that a file exists
:param fpath: the file path to validate existence
"""
if not os.path.exists(fpath):
raise FileNotFoundError(f"The path {fpath} does not exist")
elif not os.path.isfile(fpath):
raise IOError(f"The path {fpath} is not a file")
return fpath
def color_as_rgb(val) -> Tuple[Number, Number, Number]:
"""
Validate that an input is a normalized color RGB value, convert if possible
:param val: the color to validate
"""
if isinstance(val, (tuple, list)):
if not len(val) == 3 or not all(0 <= i <= 1 for i in val) or any(i < 0 for i in val):
raise ValueError("Invalid color")
return val
elif isinstance(val, str):
if len(val) != 6:
raise ValueError("Hex string colors must be 6 characters long")
rgb = helpers.hex2rgbF(val)
return rgb
else:
raise TypeError("Invalid color type")
def interval(val: tuple):
"""
Validate a numeric interval
:param val: the interval to validate
"""
if not isinstance(val, (tuple, list)):
raise TypeError("Invalid interval type")
elif len(val) != 2:
raise ValueError("Invalid interval input")
elif val[1] <= val[0]:
raise ValueError(f"Invalid interval ({val[0]} - {val[1]}), expected a sequential interval")
return tuple(val)
def positive_interval(val: tuple) -> tuple:
"""
Validate that an input is a positive range sequence
:param val: the interval to validate
"""
if not isinstance(val, (tuple, list)):
raise TypeError("Invalid interval type")
elif len(val) != 2:
raise ValueError("Invalid interval input")
elif (val[1] <= val[0]) or any(i < 0 for i in val):
raise ValueError(f"Invalid interval extents ({val[0]} - {val[1]}), expected positive interval")
return tuple(val)
def positive_interval_2d(val: (tuple, tuple)) -> (tuple, tuple):
"""
Validate that an input is a positive range sequence in two dimensions
:param val: the interval to validate
"""
if not isinstance(val, (tuple, list)):
raise TypeError("Invalid range type")
pair1, pair2 = val
if len(val) != 2:
raise ValueError("Invalid interval input")
if (pair1[1] <= pair1[0]) or any(i < 0 for i in pair1):
raise ValueError(f"Invalid interval extents")
if (pair2[1] <= pair2[0]) or any(i < 0 for i in pair2):
raise ValueError(f"Invalid interval extents")
return tuple(val)
def positive_number(val):
"""
Validate that an input is a positive number
:param val: the number to validate
"""
if not isinstance(val, (int, float)):
raise TypeError("Invalid type for number")
elif not val >= 0:
raise ValueError(f"Expected value >= 0, instead got {val} instead")
return val
def positive_nonzero_number(val):
"""
Validate that an input is a positive number
:param val: the number to validate
"""
if not isinstance(val, (int, float)):
raise TypeError("Invalid type for number")
elif not val > 0:
raise ValueError(f"Expected value > 0 instead got {val} instead")
return val
def positive_nonzero_int(val):
"""
Validate that an input is a positive number
:param val: the number to validate
"""
if val % 1 != 0:
raise TypeError("Integral value required")
elif not val > 0:
raise ValueError(f"Expected value > 0 instead got {val} instead")
return val
def number_in_interval(val, lower, upper, lower_open=True, upper_open=True):
"""
Validate that a number is contained within an interval.
:param val: the number to validate
:param lower: the lower bound of the interval
:param upper: the upper bound of the interval
:param lower_open: whether or not the lower bound is open
:param upper_open: whether or not the upper bound is open
"""
valid = True
if val < lower or val > upper:
valid = False
if lower_open is True:
if val == lower:
valid = False
if upper_open is True:
if val == upper:
valid = False
left = "(" if lower_open else "["
right = ")" if upper_open else "]"
if not valid:
raise ValueError(f"The value {val} is not in the interval {left}{lower}, {upper}{right} ")
else:
return val
def multiplicity_any(val):
"""
Any integral multiplicity value >= 1 or 'all' or 'multiples'
:param val: the multiplicity to validate
"""
msg = "Expected a multiplicity of int >= 1 or `all` or 'multiples'"
if isinstance(val, str):
if val not in ("all", "multiples"):
raise ValueError(msg)
elif isinstance(val, int_types):
if not val >= 1:
raise ValueError(msg)
elif not isinstance(val, (str, int)):
raise TypeError(msg)
return val
def multiplicity_any_singular_or_all_multiples(val):
"""
Any integral multiplicity value >= 1 or 'multiples'
:param val: the multiplicity to validate
"""
msg = "Expected a multiplicity of int >= 1 or 'multiples'"
if isinstance(val, str):
if val != "multiples":
raise ValueError(msg)
elif isinstance(val, int_types):
if not val >= 1:
raise ValueError(msg)
elif not isinstance(val, (str, int)):
raise TypeError(msg)
return val
def multiplicity_singular_two_or_greater(val) -> int:
"""
Any integral multiplicity value >= 2
:param val: the multiplicity to validate
"""
msg = "Expected a multiplicity value of int >= 2"
if not isinstance(val, int_types):
raise TypeError(msg)
elif not val >= 2:
raise ValueError(msg)
else:
return int(val)
def multiplicity_singular_one_or_greater(val) -> int:
"""
Any integral multiplicity value >= 1
:param val: the multiplicity to validate
"""
msg = "Expected a multiplicity value of int >= 1"
if not isinstance(val, int_types):
raise TypeError(msg)
elif not val >= 1:
raise ValueError(msg)
else:
return int(val)
def multiplicity_non_singles(val) -> Union[int, str]:
"""
Validate that a given multiplicity value is any int > 1 or "multiples"
:param val: the multiplicity to validate
"""
msg = f'Expected a multiplicity value of int >= 2 or "multiples", got {val} of type {type(val)} instead'
if isinstance(val, int_types):
if not val >= 2:
raise ValueError(msg)
else:
return int(val)
elif isinstance(val, str):
if val != "multiples":
raise ValueError(msg)
else:
return val
else:
raise ValueError(msg)
def choice(val: Any, possible_vals: Sequence[Any]):
"""
Validate that a value is one of a set of possible values
:param val: the option to validate
:param possible_vals: the list of possible options
"""
if val not in possible_vals:
raise ValueError(f"'{val}' is not one of {possible_vals}")
return val
def all_positive_nonzero(seq: Sequence[Number]):
"""
Validate that all numbers of a sequence are positive and non-zero
:param seq: the sequence to validate
"""
if any(i <= 0 for i in seq):
raise ValueError("Expected a sequence of all positive non-zero values")
return seq | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/utils/validate.py | validate.py |
import sys
from typing import TYPE_CHECKING, Tuple, Any
from numbers import Number
import os
from pathlib import Path
import numpy as n
from numpy import ndarray
from apav.pyxutils import _minmax
from apav.qt import QAction, QIcon
if TYPE_CHECKING:
from apav.analysis.base import AnalysisBase
_thispath = Path(os.path.abspath(os.path.dirname(__file__)))
paths = {
"toplevel": _thispath / ".." / "..",
"testdata": _thispath / ".." / "tests",
"icons": _thispath / ".." / "icons",
}
unicode_map = {
"deg": "\u00B0",
"degree": "\u00B0",
"degrees": "\u00B0",
"angstrom": "\u212B",
"angstroms": "\u212B",
"PHI": "\u03D5",
"phi": "\u03C6",
"alpha": "\u03B1",
"BETA": "\u03D0",
"beta": "\u03B2",
"gamma": "\u03B3",
"theta": "\u03B8",
"mu": "\u03BC",
"empty": "\u21B5",
}
_unit_suffix = {"nm": "nm", "nanometer": "nm", "nanometers": "nm", "pm": "pm", "picometer": "pm", "picometers": "pm"}
def data_path(filename: str) -> Path:
"""
Get file path for a data in the test data directory
:param filename: filename
:return: path to file
"""
fpath = paths["testdata"] / filename
if not fpath.exists():
raise FileNotFoundError(f"{filename} does not exist")
assert fpath.is_file(), f"Filename {filename} is not a file"
return fpath
def get_icon(name: str) -> QIcon:
path = paths["icons"] / name
if not path.exists():
raise FileNotFoundError(f"Icon {name} was not found")
assert path.is_file(), f"Icon {name} is not a file"
return QIcon(str(path))
def make_action(text: str, slot, icon: str = None, tooltip: str = None, checked=None):
retn = QAction()
retn.setText(text)
if icon is not None:
retn.setIcon(get_icon(icon))
if tooltip is not None:
retn.setToolTip(tooltip)
if isinstance(checked, bool):
retn.setCheckable(True)
retn.setChecked(checked)
retn.triggered.connect(slot)
return retn
def intervals_intersect(minmax1: Tuple[Number, Number], minmax2: Tuple[Number, Number]) -> bool:
"""
Determine if two 1-dimensional intervals [first, last) overlap
"""
if minmax2[0] <= minmax1[0] < minmax2[1]:
return True
elif minmax1[0] <= minmax2[0] < minmax1[1]:
return True
return False
def native_dtype_byteorder() -> str:
"""
Get the native byte-order as '<' or '>' for dtype operations
"""
return (">", "<")[sys.byteorder == "little"]
def array2native_byteorder(array: ndarray) -> ndarray:
"""
Get an array in the native byte-order if it is different, otherwise return the original array
:param array: array
"""
sys_byteorder = native_dtype_byteorder()
ary_bo = array.dtype.byteorder
if ary_bo != sys_byteorder:
return array.byteswap().newbyteorder(sys_byteorder)
else:
return array
def minmax(ary: ndarray) -> Tuple[Any, Any]:
"""
Fast function for finding the min and max values of an array
"""
assert n.issubdtype(ary.dtype, n.number), "Minmax can only operate on numeric arrays"
return tuple(_minmax(ary.ravel()))
def unique_vals(array: ndarray) -> ndarray:
"""
Faster implementation of numpy.unique for int8 or uint8 dtype
:param array: input array
"""
if array.dtype in (n.int8, n.uint8):
return n.argwhere(n.bincount(array.ravel()) != 0).ravel()
else:
return n.unique(array)
def unit_string(unit: str, prefix_space: bool = False) -> str:
"""
Make a unit string, i.e. angstrom symbol
:param unit: unit
:param prefix_space: add a space before the prefix
:return: unit string
"""
retn = ""
if prefix_space is True:
retn += " "
try:
return retn + unicode_map[unit]
except KeyError:
return retn + _unit_suffix[unit]
_NUMERALS = "0123456789abcdefABCDEF"
_HEXDEC = {v: int(v, 16) for v in (x + y for x in _NUMERALS for y in _NUMERALS)}
LOWERCASE, UPPERCASE = "x", "X"
def hex2rgbF(text: str) -> tuple:
"""
Convert a hex/HTML color code to RGB fractions
"""
text = text.replace("#", "")
rgb = _HEXDEC[text[0:2]], _HEXDEC[text[2:4]], _HEXDEC[text[4:6]]
return tuple([i / 255.0 for i in rgb])
class modifying:
"""
Context manager for making changes to Analysis objects without unnecessary calculations. This may be
useful when dealing with large data and multiple changes need to be made to the analysis, and you do not want to
reuse the original analysis object. The analysis is automatically recalculated once the context manager exits.
This is located here to avoid circular imports in the analysis-plotting namespaces.
The below example code loads a large Roi, calculates a correlation histogram, then modifies 3 of the correlation
histograms parameters. The correlation histogram is only computed 2 time, at instantiation and when the context
manager exits.
>>> from apav.analysis import CorrelationHistogram
>>> from apav import Roi
>>> large_roi = Roi.from_epos("path_to_large_roi.epos")
>>> hist = CorrelationHistogram(large_roi, extents=((50, 100), (25, 75)), bin_width=0.01)
>>>
>>> with modifying(hist) as anl:
>>> anl.bin_width = 0.2
>>> anl.symmetric=True
>>> anl.multiplicity="multiples"
"""
def __init__(self, analysis: "AnalysisBase"):
self.analysis = analysis
def __enter__(self):
self.analysis._update_suppress = True
return self.analysis
def __exit__(self, exc_type, exc_val, exc_tb):
self.analysis._update_suppress = False
self.analysis._process() | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/utils/helpers.py | helpers.py |
from typing import TYPE_CHECKING
import numpy as n
from apav.qt import QDoubleSpinBox, pyqtSignal, QComboBox
if TYPE_CHECKING:
from apav import Roi
class QDecimalSpinBox(QDoubleSpinBox):
# This signal only emits when editingFinished() produces a change in value
editingFinishedAndChanged = pyqtSignal()
def __init__(self, parent):
super().__init__(parent)
self.last_val = self.value()
self.editingFinished.connect(self.onEditingFinished)
def setValue(self, new):
self.last_val = new
super().setValue(new)
def onEditingFinished(self):
if n.isclose(self.last_val, self.value()):
return
else:
self.last_val = self.value()
self.editingFinishedAndChanged.emit()
class QMultiplicityComboBox(QComboBox):
def __init__(self, parent):
super().__init__(parent)
def formattedValue(self):
"""
Get the multiplicity value in a format that APAV uses
All = 'all'
All multiples = 'multiples'
Any integer is just converted to int
Anything else raised an error
"""
value = self.currentText()
if value == "All":
return "all"
elif value == "All multiples":
return "multiples"
else:
try:
return int(value)
except ValueError:
raise ValueError("Invalid multiplicity value encountered in list")
class QMultiplesMultiplicityComboBox(QMultiplicityComboBox):
def __init__(self, parent, roi: "Roi"):
super().__init__(parent)
roi.require_multihit_info()
self.addItem("All multiples")
for i in roi.multiplicities:
if i > 1:
self.addItem(str(i))
class QAnyMultiplicityComboBox(QMultiplicityComboBox):
def __init__(self, parent, roi: "Roi"):
super().__init__(parent)
self.addItem("All")
if roi.has_multiplicity_info():
self.addItem("All multiples")
for i in roi.multiplicities:
self.addItem(str(i)) | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/qtwidgets/controls.py | controls.py |
from apav.qt import (
Qt,
pyqtSignal,
QWidget,
QTreeView,
QBrush,
QPen,
QVBoxLayout,
QAbstractItemView,
QStandardItemModel,
QStandardItem,
QColor,
QRect,
QModelIndex,
QStyledItemDelegate,
)
from apav.core.range import RangeCollection, Range
class QMassRangeWidget(QWidget):
"""
A widget for navigating the ranges in RangeCollection
"""
sigRangeDoubleClicked = pyqtSignal(Range)
def __init__(self, parent, ranges: RangeCollection):
super().__init__(parent)
if not isinstance(ranges, RangeCollection):
raise TypeError(f"Expected a RangeCollection not {type(ranges)}")
self.ranges = ranges
self._layout = QVBoxLayout()
self._layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self._layout)
self._view = QTreeView()
self._view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self._layout.addWidget(self._view)
self._model = QStandardItemModel()
self._view.setModel(self._model)
_color_delg = RangeColorDelegate()
self._view.setItemDelegateForColumn(0, _color_delg)
self._view.setSelectionMode(QTreeView.NoSelection)
self._model.setHorizontalHeaderLabels(["Ion", "Min", "Max"])
for ion in self.ranges.ions():
ion_item = QStandardItem(ion.hill_formula)
ion_item.setEditable(False)
for rngg in self.ranges:
if ion.hill_formula == rngg.hill_formula:
rng_min = QStandardItem(str(rngg.lower))
rng_min.setData(rngg, Qt.UserRole)
rng_min.setEditable(False)
rng_max = QStandardItem(str(rngg.upper))
rng_max.setData(rngg, Qt.UserRole)
rng_max.setEditable(False)
rng_col = QStandardItem()
rng_col.setEditable(False)
rng_col.setData(rngg, Qt.UserRole)
ion_item.appendRow([rng_col, rng_min, rng_max])
self._model.invisibleRootItem().insertRow(0, [ion_item, None, None])
self._view.expandAll()
self._view.setColumnWidth(0, 85)
self._view.setColumnWidth(1, 75)
self._view.setColumnWidth(2, 75)
self._view.doubleClicked.connect(self._onDoubleClicked)
def _onDoubleClicked(self, index: QModelIndex):
rng_item = index.data(Qt.UserRole)
if not index.isValid() or not isinstance(rng_item, Range):
return
self.sigRangeDoubleClicked.emit(rng_item)
class RangeColorDelegate(QStyledItemDelegate):
"""
Delegate for displaying the color of a range
"""
def paint(self, painter, option, index):
if not index.isValid():
super().paint(painter, option, index)
return
rng = index.data(Qt.UserRole)
if not isinstance(rng, Range):
super().paint(painter, option, index)
return
# Setup painter
pen = QPen()
pen.setWidth(0)
painter.setPen(pen)
color = rng.color
brush = QBrush(QColor.fromRgbF(*color))
painter.setBrush(brush)
# Draw the rectangle
height = option.rect.height()
center = option.rect.center()
rect = QRect(height, height, height, height)
rect.moveCenter(center)
painter.drawRect(rect)
super().paint(painter, option, index) | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/qtwidgets/massrangewidget.py | massrangewidget.py |
from apav.qt import QMainWindow, QWidget, QAction, QSize, QFileDialog, QPixmap, QApplication
from apav.utils.helpers import make_action
from apav.utils import validate
class BaseVisualization(QMainWindow):
def __init__(self, ref_data):
"""
Common model for all visualization classes
:param ref_data: Miscellaneous data to be referred to by the visualization class, typically a roi or analysis instance
"""
super().__init__()
self.ref_data = ref_data
_widget = QWidget(self)
self.setCentralWidget(_widget)
self.toolbar_actions = []
self.toolbar = self.addToolBar("Tools")
self.toolbar.setMovable(False)
self.toolbar.setIconSize(QSize(16, 16))
self.setupToolBarActions()
def makeToolBar(self):
for i in self.toolbar_actions:
if isinstance(i, QAction):
self.toolbar.addAction(i)
elif isinstance(i, QWidget):
self.toolbar.addWidget(i)
elif i is None:
self.toolbar.addSeparator()
def setupToolBarActions(self):
tb = self.toolbar_actions
tb.append(make_action("Save as image", self.exportImage, icon="saveas.svg"))
tb.append(make_action("Copy as image", self.copyImage, icon="copy.svg"))
tb.append(make_action("Save as raw data", self.exportRawData, icon="export.svg"))
self.toolbar.addSeparator()
def exportImage(self):
path, filter = QFileDialog.getSaveFileName(self, "Save as png", filter="*.png")
if not path:
return
widg = self.centralWidget()
pix = QPixmap(widg.size())
widg.render(pix)
pix.save(path, "png", 100)
def copyImage(self):
widg = self.centralWidget()
pix = QPixmap(widg.size())
widg.render(pix)
QApplication.clipboard().setPixmap(pix)
def exportRawData(self):
raise validate.AbstractMethodError()
class PyQtGraphVisualization(BaseVisualization):
def __init__(self):
super().__init__() | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/visualization/base.py | base.py |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from apav.analysis.massspectrum import LocalBkgCorrectedMassSpectrum, RangedMassSpectrum, NoiseCorrectedMassSpectrum
import os
import numpy as n
import pyqtgraph as pg
from apav.qt import (
QWidget,
QVBoxLayout,
QHBoxLayout,
QLabel,
QDoubleSpinBox,
Qt,
QCheckBox,
QFileDialog,
QDockWidget,
QSizePolicy,
QLineEdit,
QBrush,
QColor,
QFormLayout,
QGroupBox,
)
from apav.visualization.base import BaseVisualization
from apav.qtwidgets.massrangewidget import QMassRangeWidget
from apav.core import histogram
from apav.utils.helpers import modifying
import apav.qtwidgets.controls as controls
pg.setConfigOption("foreground", "k")
pg.setConfigOption("background", "w")
pg.setConfigOption("antialias", True)
class Plotter1D(pg.PlotWidget):
"""
Base 1D plotter settings
"""
def __init__(self, parent: QWidget, xlabel: str, ylabel: str, xunits: str = "", yunits: str = ""):
super().__init__(parent)
self.getPlotItem().showAxis("right")
self.getPlotItem().showAxis("top")
self.setLabel("left", ylabel, units=yunits)
self.setLabel("bottom", xlabel, units=xunits)
right = self.getPlotItem().getAxis("right")
top = self.getPlotItem().getAxis("top")
self.getPlotItem().showAxis("top")
right.setStyle(showValues=False)
top.setStyle(showValues=False)
class MassSpectrumPlot(BaseVisualization):
def __init__(self, roi):
super().__init__(roi)
self.resize(900, 600)
self.setWindowTitle("Mass histogram - {}".format(os.path.basename(self.ref_data.filepath)))
self.widget = QWidget(self)
self.toplayout = QVBoxLayout()
self.widget.setLayout(self.toplayout)
self.setCentralWidget(self.widget)
self.status = self.statusBar()
self.plot_layout = QHBoxLayout()
self.plot_layout.setContentsMargins(0, 0, 0, 0)
self.plot_layout.setSpacing(0)
self.toplayout.addLayout(self.plot_layout)
self.plotter = Plotter1D(self, "Mass/charge ratio", "Counts", xunits="Da")
self.plot_layout.addWidget(self.plotter)
self.prox = pg.SignalProxy(
self.plotter.getPlotItem().scene().sigMouseMoved, rateLimit=30, slot=self.slotOnMouseMoved
)
self.data = None
self.makeToolBar()
self._recalculateHistogram()
def setupToolBarActions(self):
super().setupToolBarActions()
tb = self.toolbar_actions
# Bin value
tb.append(QLabel("Bin width: "))
self.bin_width = QDoubleSpinBox(self)
self.bin_width.setDecimals(3)
self.bin_width.setMinimum(0.001)
self.bin_width.setMaximum(100)
self.bin_width.setSingleStep(0.01)
self.bin_width.setValue(0.05)
self.bin_width.setSuffix(" Da")
self.bin_width.editingFinished.connect(self._recalculateHistogram)
tb.append(self.bin_width)
tb.append(None)
# lower value
tb.append(QLabel("Lower: "))
self.lower = QDoubleSpinBox(self)
self.lower.setMinimum(0)
self.lower.setMaximum(10000)
self.lower.setValue(0)
self.lower.setSuffix(" Da")
# self.lower.editingFinished.connect(self._recalculateHistogram)
self.lower.editingFinished.connect(self._recalculateHistogram)
tb.append(self.lower)
tb.append(None)
# upper value
tb.append(QLabel("Upper: "))
self.upper = QDoubleSpinBox(self)
self.upper.setMinimum(0)
self.upper.setMaximum(10000)
self.upper.setValue(200)
self.upper.setSuffix(" Da")
self.upper.editingFinished.connect(self._recalculateHistogram)
tb.append(self.upper)
tb.append(None)
# Multiplicity
tb.append(QLabel("Multiplicity: "))
self.multiplicity = controls.QAnyMultiplicityComboBox(self, self.ref_data)
self.multiplicity.currentIndexChanged.connect(self._recalculateHistogram)
tb.append(self.multiplicity)
tb.append(None)
# Normalize
self.norm = QCheckBox(self)
self.norm.setText("Normalize:")
self.norm.setLayoutDirection(Qt.RightToLeft)
self.norm.setChecked(False)
self.norm.stateChanged.connect(self._recalculateHistogram)
tb.append(self.norm)
def _recalculateHistogram(self):
line = pg.mkPen(color=(0, 0, 0), width=1)
bin_width = self.bin_width.value()
mult = self.multiplicity.formattedValue()
low = self.lower.value()
up = self.upper.value()
if low >= up:
self.plotter.getPlotItem().clear()
return
self.data = self.ref_data.mass_histogram(
bin_width=bin_width,
multiplicity=mult,
norm=self.norm.isChecked(),
lower=self.lower.value(),
upper=self.upper.value(),
)
# x = self.data[0] - bin_width/2
# x = n.hstack((x, x[-1] + bin_width))
self.plotter.getPlotItem().plot(self.data[0], self.data[1], stepMode="left", clear=True, pen=line)
def slotOnMouseMoved(self, event):
pos = self.plotter.getPlotItem().getViewBox().mapSceneToView(event[0])
x, y = pos.x(), pos.y()
idx = int((x - self.lower.value()) / self.bin_width.value())
try:
y = self.data[1][idx]
except IndexError:
y = 0
finally:
if idx < 0:
y = 0
self.statusBar().showMessage("x = {}, y = {:d}".format(round(x, 4), int(y)))
def exportRawData(self):
path, filter = QFileDialog.getSaveFileName(self, "Export to raw data", "~/", filter="*.csv")
if not path:
return
dat = self.data[0][None].T
dat = n.hstack((dat, self.data[1][None].T))
n.savetxt(path, dat, delimiter=",")
class MassSpectrumPlotRanged(BaseVisualization):
def __init__(self, ranged_mass_spec: "RangedMassSpectrum"):
self.rmass = ranged_mass_spec
super().__init__(ranged_mass_spec.roi)
self.resize(1200, 600)
self.setWindowTitle("Ranged mass histogram - {}".format(os.path.basename(self.ref_data.filepath)))
self.widget = QWidget(self)
self.toplayout = QVBoxLayout()
self.widget.setLayout(self.toplayout)
self.setCentralWidget(self.widget)
self.status = self.statusBar()
self.plot_layout = QHBoxLayout()
self.plot_layout.setContentsMargins(0, 0, 0, 0)
self.plot_layout.setSpacing(0)
self.toplayout.addLayout(self.plot_layout)
self.plotter = Plotter1D(self, "Mass/charge ratio", "Counts", xunits="Da")
self.plot_layout.addWidget(self.plotter)
self.prox = pg.SignalProxy(
self.plotter.getPlotItem().scene().sigMouseMoved, rateLimit=30, slot=self.slotOnMouseMoved
)
self.makeToolBar()
self.data = self.rmass.histogram
# Draw colored ranges
for rng in self.rmass.ranges:
idx = n.argwhere((self.data[0] >= rng.lower) & (self.data[0] <= rng.upper))[:, 0]
rngx = histogram.centers2edges(self.data[0][idx], self.rmass.bin_width)
fill = QBrush(QColor.fromRgbF(*rng.color))
line = pg.mkPen(None)
self.plotter.getPlotItem().plot(
rngx, self.data[1][idx], stepMode="center", brush=fill, fillLevel=0, pen=line
)
# Draw line spectrum
self.centers = histogram.centers2edges(self.data[0], self.rmass.bin_width)
line = pg.mkPen(color=(0, 0, 0), width=1)
self.mass_spectrum_item = self.plotter.getPlotItem().plot(
self.centers, self.data[1], stepMode="center", pen=line
)
# Range viewer
range_dock = QDockWidget("Mass ranges", self)
range_dock.setContentsMargins(0, 0, 0, 0)
range_dock.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable)
range_widget = QMassRangeWidget(self, self.rmass.ranges)
range_widget.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred)
range_dock.setWidget(range_widget)
self.addDockWidget(Qt.RightDockWidgetArea, range_dock)
range_widget.sigRangeDoubleClicked.connect(self.onRangeSelected)
def onRangeSelected(self, rng):
xmin = rng.lower
xmax = rng.upper
xstride = xmax - xmin
idx = n.argwhere((self.data[0] >= xmin) & (self.data[0] <= xmax))
ymax = self.data[1][idx].upper()
ymin = self.data[1][idx].lower()
ystride = ymax - ymin
# Pad the x, y extents
xmin -= 0.1 * xstride
xmax += 0.1 * xstride
ymin -= 0.1 * ystride
ymax += 0.1 * ystride
# Don't let ymin be zero in case the plot is int log scale
if ymin < 1:
ymin = 1
self.plotter.setXRange(xmin, xmax)
self.plotter.setYRange(ymin, ymax)
def setupToolBarActions(self):
super().setupToolBarActions()
tb = self.toolbar_actions
# Cutoff value
tb.append(QLabel("Bin width: "))
self.bin_width = QLineEdit(self)
self.bin_width.setText(str(self.rmass.bin_width) + " Da")
self.bin_width.setReadOnly(True)
self.bin_width.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
tb.append(self.bin_width)
tb.append(None)
# upper value
tb.append(QLabel("Upper: "))
self.upper = QLineEdit(self)
self.upper.setText(str(self.rmass.upper) + " Da")
self.upper.setReadOnly(True)
self.upper.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
tb.append(self.upper)
tb.append(None)
# Multiplicity
tb.append(QLabel("Multiplicity: "))
self.multiplicity = QLineEdit(self)
mult = self.rmass.multiplicity
if mult == "all":
text = "All"
elif mult == "multiples":
text = "All multiples"
elif isinstance(mult, int):
text = str(mult)
else:
raise ValueError("Unknown multiplicity value")
self.multiplicity.setText(text)
self.multiplicity.setReadOnly(True)
self.multiplicity.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
tb.append(self.multiplicity)
tb.append(None)
def slotOnMouseMoved(self, event):
pos = self.plotter.getPlotItem().getViewBox().mapSceneToView(event[0])
x, y = pos.x(), pos.y()
idx = int(x / self.rmass.bin_width)
try:
counts = self.data[1][idx]
except IndexError:
counts = 0
finally:
if idx < 0:
counts = 0
comp = ""
if x > 0:
for rng in self.rmass.ranges:
if x in rng:
if 0 <= y <= counts:
comp = rng.hill_formula
txt = "x = {}, y = {}".format(round(x, 2), round(counts, 2))
if comp != "":
txt += f" - {comp}"
self.statusBar().showMessage(txt)
def exportRawData(self):
path, filter = QFileDialog.getSaveFileName(self, "Export to raw data", "~/", filter="*.csv")
if not path:
return
dat = self.data[0][None].T
dat = n.hstack((dat, self.data[1][None].T))
n.savetxt(path, dat, delimiter=",")
class MassSpectrumPlotNoiseCorrected(MassSpectrumPlotRanged):
def __init__(self, noise_corr_mass):
super().__init__(noise_corr_mass)
self.nmass = noise_corr_mass
self.setWindowTitle("Noise corrected mass histogram - {}".format(os.path.basename(self.ref_data.filepath)))
line = pg.mkPen(color=(255, 0, 255), width=2)
bkg_min = self.nmass.noise_background.lower
x = self.nmass.noise_fit_data[0]
idx = n.argmin(n.abs(x - bkg_min))
self.noise_fit_item = self.plotter.getPlotItem().plot(
self.nmass.noise_fit_data[0][idx:], self.nmass.noise_fit_data[1][idx:], pen=line
)
self.legend = pg.LegendItem(offset=(-20, 10))
self.legend.setParentItem(self.plotter.getPlotItem())
self.legend.addItem(self.mass_spectrum_item, "Uncorrected mass spectrum")
self.legend.addItem(self.noise_fit_item, f"Noise background")
class MassSpectrumPlotLocalBkgCorrected(MassSpectrumPlotNoiseCorrected):
def __init__(self, local_bkg_mass: "LocalBkgCorrectedMassSpectrum"):
super().__init__(local_bkg_mass)
self.lmass = local_bkg_mass
self.setWindowTitle(
"Local background corrected mass histogram - {}".format(os.path.basename(self.ref_data.filepath))
)
fit_pen = pg.mkPen("g", width=3)
inc_pen = pg.mkPen("r", width=2, style=Qt.DotLine)
corr_pen = pg.mkPen("r", width=3)
bkg_pen = pg.mkPen(0.0, width=4)
def correct_edges(ary: n.ndarray):
"""
The fits are inherently evaluated on with end points at the center of the bins
we need the edges in order to visualize on the histogram
:param ary: array histogram centers
"""
dx = (ary[1] - ary[0]) / 2
return n.concatenate([[ary[0] - dx], ary, [ary[-1] + dx]])
# We only want to add items to legend one time, keep track if it has been added
legend_fit_added = False
legend_bkg_added = False
for bkg in self.lmass.background_collection:
# Plot line for fit interval
lower_fit_idx = round(min(i[0] for i in bkg.fit_intervals) / self.lmass.bin_width)
upper_fit_idx = round(max(i[1] for i in bkg.fit_intervals) / self.lmass.bin_width)
x_fit = correct_edges(self.lmass.histogram[0][lower_fit_idx:upper_fit_idx])
sig_y_fit = bkg.eval(x_fit)
noise_y_fit = self.lmass.noise_background.eval(x_fit)
y_fit = sig_y_fit + noise_y_fit
self.plotter.getPlotItem().plot(x_fit, y_fit, pen=bkg_pen)
item = self.plotter.getPlotItem().plot(x_fit, y_fit, pen=fit_pen)
if legend_fit_added is False:
self.legend.addItem(item, f"Local background fit")
legend_fit_added = True
bkg_max_range_val = None
for rng in self.lmass.ranges:
if not bkg.contains_range(rng):
continue
lower_inc_idx = round(rng.lower / self.lmass.bin_width)
upper_inc_idx = round(rng.upper / self.lmass.bin_width)
x_inc = correct_edges(self.lmass.histogram[0][lower_inc_idx:upper_inc_idx])
noise_y_inc = self.lmass.noise_background.eval(x_inc)
sig_y_inc = bkg.eval(x_inc)
y_inc = sig_y_inc + noise_y_inc
self.plotter.getPlotItem().plot(x_inc, y_inc, pen=bkg_pen)
item = self.plotter.getPlotItem().plot(x_inc, y_inc, pen=corr_pen)
if legend_bkg_added is False:
self.legend.addItem(item, f"Local background correction")
legend_bkg_added = True
bkg_max_range_val = int(rng.upper / self.lmass.bin_width)
# Plot line from fit interval to max included Range
if bkg_max_range_val is not None:
x_corr = correct_edges(self.lmass.histogram[0][upper_fit_idx:bkg_max_range_val])
sig_y_corr = bkg.eval(x_corr)
noise_y_corr = self.lmass.noise_background.eval(x_corr)
y_corr = sig_y_corr + noise_y_corr
self.plotter.getPlotItem().plot(x_corr, y_corr, pen=inc_pen)
class CorrelationHistogramPlot(BaseVisualization):
def __init__(self, corr_hist):
super().__init__(corr_hist)
self.resize(700, 500)
self.setWindowTitle("Correlation Histogram - {}".format(os.path.basename(corr_hist.roi.filepath)))
self.setMouseTracking(True)
self.status = self.statusBar()
self.widget = QWidget(self)
self.toplayout = QVBoxLayout()
self.widget.setLayout(self.toplayout)
self.setCentralWidget(self.widget)
self.dock = QDockWidget("Options", self)
self.dock.setMinimumWidth(250)
self.dock.setFeatures(QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetFloatable)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dock)
self.makeDock()
# Setup data
self.img = None
self.colorbar = None
self.plotter = None
self.data = None
self.proxy = None
self.plot_layout = QHBoxLayout()
self.plot_layout.setContentsMargins(0, 0, 0, 0)
self.plot_layout.setSpacing(0)
self.toplayout.addLayout(self.plot_layout)
self._recalculateHistogram(self.ref_data)
self.makeToolBar()
def slotOnMouseMoved(self, event):
pos = self.plotter.getPlotItem().getViewBox().mapSceneToView(event[0])
bin = self.ref_data.bin_width
xrng, yrng = self.ref_data.extents
ysize = self.data.shape[1]
x, y = pos.x(), pos.y()
idx = int((x - xrng[0]) / bin)
idy = int((y - yrng[0]) / bin)
try:
z = self.data[idx, idy]
except:
z = 0
self.statusBar().showMessage("x = {}, y = {}, z = {}".format(round(x, 2), round(y, 2), int(z)))
def makeDock(self):
dockwidget = QWidget(self)
layout = QVBoxLayout(self.dock)
dockwidget.setLayout(layout)
self.dock.setWidget(dockwidget)
extents = self.ref_data.extents
bin_group = QGroupBox(self)
bin_group.setTitle("Bins")
bin_layout = QFormLayout(self)
bin_group.setLayout(bin_layout)
# Bin value
self.binwidth = controls.QDecimalSpinBox(self)
self.binwidth.setMinimum(0.001)
self.binwidth.setMaximum(10)
self.binwidth.setSingleStep(0.01)
self.binwidth.setDecimals(3)
self.binwidth.setValue(self.ref_data.bin_width)
self.binwidth.setSuffix(" Da")
self.binwidth.editingFinishedAndChanged.connect(self._recalculateHistogram)
bin_layout.addRow(QLabel("Width: "), self.binwidth)
layout.addWidget(bin_group)
ext_group = QGroupBox(self)
ext_group.setTitle("Histogram boundaries")
ext_layout = QFormLayout(self)
ext_group.setLayout(ext_layout)
# Ion 1 lower value
self.lower1 = controls.QDecimalSpinBox(self)
self.lower1.setMinimum(0)
self.lower1.setMaximum(1000)
self.lower1.setValue(extents[0][0])
self.lower1.setSuffix(" Da")
self.lower1.editingFinishedAndChanged.connect(self._recalculateHistogram)
ext_layout.addRow(QLabel("Ion1 lower:"), self.lower1)
# Ion 1 upper value
self.upper1 = controls.QDecimalSpinBox(self)
self.upper1.setMinimum(1)
self.upper1.setMaximum(1000)
self.upper1.setValue(extents[0][1])
self.upper1.setSuffix(" Da")
self.upper1.editingFinishedAndChanged.connect(self._recalculateHistogram)
ext_layout.addRow(QLabel("Ion1 upper:"), self.upper1)
# Ion 2 lower value
self.lower2 = controls.QDecimalSpinBox(self)
self.lower2.setMinimum(0)
self.lower2.setMaximum(1000)
self.lower2.setValue(extents[1][0])
self.lower2.setSuffix(" Da")
self.lower2.editingFinishedAndChanged.connect(self._recalculateHistogram)
ext_layout.addRow(QLabel("Ion2 lower:"), self.lower2)
# Ion 2 upper value
self.upper2 = controls.QDecimalSpinBox(self)
self.upper2.setMinimum(1)
self.upper2.setMaximum(1000)
self.upper2.setValue(extents[1][1])
self.upper2.setSuffix(" Da")
self.upper2.editingFinishedAndChanged.connect(self._recalculateHistogram)
ext_layout.addRow(QLabel("Ion2 upper:"), self.upper2)
layout.addWidget(ext_group)
# Multiplicity
mult_group = QGroupBox(self)
mult_group.setTitle("Multiple events")
mult_layout = QFormLayout(self)
mult_group.setLayout(mult_layout)
self.multiplicity = controls.QMultiplesMultiplicityComboBox(self, self.ref_data.roi)
idx = self.multiplicity.findText(str(self.ref_data.multiplicity))
self.multiplicity.setCurrentIndex(idx)
self.multiplicity.currentIndexChanged.connect(self._recalculateHistogram)
mult_layout.addRow("Multiplicity:", self.multiplicity)
layout.addWidget(mult_group)
view_group = QGroupBox(self)
view_group.setTitle("Appearance")
view_layout = QFormLayout(self)
view_group.setLayout(view_layout)
# log
self.log_edit = QCheckBox(self)
self.log_edit.setLayoutDirection(Qt.RightToLeft)
self.log_edit.setChecked(False)
self.log_edit.stateChanged.connect(self._recalculateHistogram)
view_layout.addRow("Log:", self.log_edit)
# Symmetric
self.symmetric = QCheckBox(self)
self.symmetric.setLayoutDirection(Qt.RightToLeft)
self.symmetric.setChecked(self.ref_data.symmetric)
self.symmetric.stateChanged.connect(self._recalculateHistogram)
view_layout.addRow("Symmetric:", self.symmetric)
# flip
self.flip = QCheckBox(self)
self.flip.setLayoutDirection(Qt.RightToLeft)
self.flip.setChecked(self.ref_data.symmetric)
self.flip.stateChanged.connect(self._recalculateHistogram)
view_layout.addRow("Flipped:", self.flip)
layout.addWidget(view_group)
layout.addStretch()
def exportRawData(self):
xrng, yrng = self.ref_data.extents
head = f"ion1({xrng[0]}-{xrng[1]}) ion2({yrng[0]}-{yrng[1]}) bin({self.ref_data.bin_width})"
path, filter = QFileDialog.getSaveFileName(self, "Export to raw data", f"~/{head}.csv", filter="*.csv")
if not path:
return
self.ref_data.export(path)
def _recalculateHistogram(self, corr_hist=None):
"""
Recalculate the histogram. This should not modify the original CorrelationHistogram as it is copied in
the constructor
:param corr_hist: an existing CorrelationHistogram, used for initial plot
"""
if self.lower1.value() >= self.upper1.value():
return
elif self.lower2.value() >= self.upper2.value():
return
if corr_hist is not None:
with modifying(self.ref_data) as data_mod:
multiplicity = self.multiplicity.formattedValue()
data_mod.multiplicity = multiplicity
data_mod.extents = (
(self.lower1.value(), self.upper1.value()),
(self.lower2.value(), self.upper2.value()),
)
data_mod.flip = self.flip.isChecked()
data_mod.symmetric = self.symmetric.isChecked()
data_mod.bin_width = self.binwidth.value()
if self.colorbar:
self.plot_layout.removeWidget(self.colorbar)
if self.plotter:
self.plot_layout.removeWidget(self.plotter)
self.plotter = pg.PlotWidget(self)
self.plotter.getPlotItem().showAxis("right")
self.plotter.getPlotItem().showAxis("top")
self.data = self.ref_data.histogram.copy()
xrng, yrng = self.ref_data.extents
non_zero = n.where(self.data > 0)
binw = self.ref_data.bin_width
plot_data = self.data
if self.log_edit.isChecked():
data_log = self.data.copy()
data_log[non_zero] = n.log(self.data[non_zero])
plot_data = data_log
if self.ref_data.flip is False:
xlabel = "Ion 1 mass/charge"
ylabel = "Ion 2 mass/charge"
else:
xlabel = "Ion 2 mass/charge"
ylabel = "Ion 1 mass/charge"
self.plotter.setLabel("left", ylabel, units="Da")
self.plotter.setLabel("bottom", xlabel, units="Da")
right = self.plotter.getPlotItem().getAxis("right")
top = self.plotter.getPlotItem().getAxis("top")
self.plotter.getPlotItem().showAxis("top")
right.setStyle(showValues=False)
top.setStyle(showValues=False)
self.plotter.plotItem.getViewBox().setAspectLocked(True)
self.img = pg.ImageItem()
self.plotter.addItem(self.img)
self.img.setImage(plot_data)
tr = self.img.transform()
tr.translate(xrng[0], yrng[0])
tr.scale(binw, binw)
self.img.setTransform(tr)
if self.colorbar is None:
self.colorbar = pg.HistogramLUTWidget(self, self.img)
else:
self.colorbar.setImageItem(self.img)
self.plot_layout.insertWidget(0, self.plotter)
self.plot_layout.addWidget(self.colorbar)
self.prox = pg.SignalProxy(
self.plotter.getPlotItem().scene().sigMouseMoved, rateLimit=30, slot=self.slotOnMouseMoved
)
# class DetectorDeadZonePlot(VTKVisualization):
# def __init__(self, dead_zone):
# super().__init__()
# self.dead_zone = dead_zone
#
# # roi = self.dead_zone.roi
# # idx = self.dead_zone.idx
# # detx = roi.misc["det_x"][idx]
# # dety = roi.misc["det_y"][idx]
# # dx = detx[1::2] - detx[::2]
# # dy = dety[1::2] - dety[::2]
# # dt = dead_zone.tof_diff*0.1
# dx = dead_zone.dx
# dy = dead_zone.dy
# dt = dead_zone.tof_diff
# poly = point_cloud(n.array([dx, dy, dt]).T)
# points = point_dataset(poly, 4)
# self.viewport.register_actors(points)
# # points.SetScale(1, 1, 0.25)
#
# cube = vtkCubeAxesActor()
# cube.SetXAxisRange(dx.lower(), dx.upper())
# cube.SetYAxisRange(dy.lower(), dy.upper())
# cube.SetZAxisRange(dt.lower(), dt.upper())
# cube.SetBounds(dx.lower(), dx.upper(), dy.lower(), dy.upper(), dt.lower(), dt.upper())
#
# cube.GetXAxesLinesProperty().SetColor(0, 0, 0)
# cube.GetXAxesGridlinesProperty().SetColor(0, 0, 0)
# cube.GetYAxesLinesProperty().SetColor(0, 0, 0)
# cube.GetZAxesLinesProperty().SetColor(0, 0, 0)
# cube.GetTitleTextProperty(0).SetColor(0,0,0)
# cube.GetLabelTextProperty(0).SetColor(0,0,0)
# cube.GetTitleTextProperty(1).SetColor(0,0,0)
# cube.GetLabelTextProperty(1).SetColor(0,0,0)
# cube.GetTitleTextProperty(2).SetColor(0,0,0)
# cube.GetLabelTextProperty(2).SetColor(0,0,0)
#
# self.viewport.register_actors(cube)
#
#
# # view = vtkContextView()
#
#
# # chart = vtkChartXYZ()
# # view.GetScene().AddItem(chart)
# # plot = vtkPlotPoints3D()
# # table = vtkTable()
# # xary, yary, tofary = vtkFloatArray(), vtkFloatArray(), vtkFloatArray()
# # xary.SetName("Delta x (mm)")
# # yary.SetName("Delta y (mm)")
# # tofary.SetName("Delta TOF (ns)")
# # table.AddColumn(xary)
# # table.AddColumn(yary)
# # table.AddColumn(tofary)
# # table.SetNumberOfRows(dx.size)
# # for i, item in enumerate(zip(dx, dy, dead_zone.tof_diff)):
# # x, y, z = item
# # table.SetValue(i, 0, vtkVariant(float(x)))
# # table.SetValue(i, 1, vtkVariant(float(y)))
# # table.SetValue(i, 2, vtkVariant(float(z)))
# #
# # plot.SetInputData(table)
# # chart.AddPlot(plot)
# # self.viewport._ren
# # view.SetRenderWindow(self.viewport._renwin)
# # b = vtkMapper()
# # b.SetInputConnection(chart)
# # a = vtkOpenGLContextActor()
# # self.viewport.register_actors(chart) | APAV | /APAV-1.4.0-cp311-cp311-win_amd64.whl/apav/visualization/plotting.py | plotting.py |
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.
| APC-Power-Usage | /APC_Power_Usage-0.1.5-py3-none-any.whl/APC_Power_Usage-0.1.5.dist-info/LICENSE.md | LICENSE.md |
import threading
import time
import subprocess
import re
from datetime import datetime
from .db import Session, APCReading
class APC(object):
def __init__(self):
pass
@property
def ups_name(self):
return self._ups_name
@property
def model(self):
return self._model
@property
def date(self):
return self._date
@property
def load_percent(self):
return self._load_percent
@property
def nominal_power(self):
return self._nominal_power
@property
def load(self):
return self.nominal_power * self.load_percent / 100
def reload(self):
apc_subprocess = self._apc_subprocess()
self._parse(apc_subprocess.stdout)
def _apc_subprocess(self):
return subprocess.run(["apcaccess"], capture_output=True)
def _parse(self, apc_subprocess_stdout):
fields = {
"UPSNAME": lambda obj, x: setattr(obj, "_ups_name", x),
"MODEL": lambda obj, x: setattr(obj, "_model", x),
"DATE": lambda obj, x: setattr(
obj, "_date", datetime.strptime(x, "%Y-%m-%d %H:%M:%S %z")
),
"LOADPCT": lambda obj, x: setattr(
obj, "_load_percent", float(x.split(" ")[0])
),
"NOMPOWER": lambda obj, x: setattr(
obj, "_nominal_power", int(x.split(" ")[0])
),
}
for row in apc_subprocess_stdout.decode("utf-8").strip().split("\n"):
match = re.search("^([A-Z]+\s*[A-Z]+)\s*:\s(.*)$", row.strip())
if match.group(1) in fields:
fields[match.group(1)](self, match.group(2))
def __repr__(self) -> str:
return "APC(ups_name={}, model={}, date={}, load_percent={}, nominal_power={}, load={})".format(
self.ups_name,
self.model,
self.date,
self.load_percent,
self.nominal_power,
self.load,
)
class Collector(threading.Thread):
def __init__(self):
super(Collector, self).__init__(target=self._collect, daemon=True)
self.apc = APC()
def _collect(self):
while True:
self.apc.reload()
date = datetime(
self.apc.date.year,
self.apc.date.month,
self.apc.date.day,
hour=self.apc.date.hour,
minute=0,
second=0,
microsecond=0,
)
with Session.begin() as session:
existing_reading = (
session.query(APCReading).filter_by(date=date).one_or_none()
)
if existing_reading is None:
apc_reading = APCReading(
date=date,
no_logs=1,
load=self.apc.load,
)
session.add(apc_reading)
else:
existing_reading.load = existing_reading.load + self.apc.load
existing_reading.no_logs = existing_reading.no_logs + 1
session.add(existing_reading)
time.sleep(1) | APC-Power-Usage | /APC_Power_Usage-0.1.5-py3-none-any.whl/apc/apc.py | apc.py |
import requests
from urllib.parse import urljoin
from .base import ATF_LOGGER, ApcKind, AuthError, NullAuth
class UpsParserStateMachine:
def __init__(self) -> None:
self.upsst = {}
self.state = self.wait_for_upss
self.key = ''
def wait_for_upss(self, line: str) -> None:
if "UPS Status" in line:
self.state = self.handle_kov_start
def handle_kov_start(self, line: str) -> None:
if line == '<div class="dataName">':
self.key = ''
self.state = self.handle_key
elif self.key and line == '<div class="dataValue">':
self.state = self.handle_value
def handle_key(self, line: str) -> None:
if "</span>" in line:
self.key = line.split('<', 2)[0]
elif line == '</div>':
self.key = ''
else:
return
self.state = self.handle_kov_start
def handle_value(self, line: str) -> None:
if line == '</div>':
self.key = ''
self.state = self.handle_kov_start
elif '<span ' not in line:
tmp = line.split('<', 2)[0].replace(' ', '').lstrip()
if self.key not in self.upsst:
self.upsst[self.key] = tmp
else:
self.upsst[self.key] += ' ' + tmp
class Frmnc(ApcKind):
@staticmethod
def parse(rlns):
statemach = UpsParserStateMachine()
for line in rlns:
(statemach.state)(line)
return statemach.upsst
def fetch(self, user: str, password: str):
base_url = "http://" + self._host
s = requests.Session()
s.auth = NullAuth()
r = self.urlway(0, base_url, s.get, stream=True)
forml = next(filter(lambda value: "name=\"frmLogin\"" in value, r.iter_lines(decode_unicode=True)))
forml = next(filter(lambda value: "action=" in value, forml.split())).split('=', 2)[1].split('"', 3)[1]
r = self.urlway(1, urljoin(base_url, forml), s.post, stream=True, data = {
'login_username': user,
'login_password': password,
})
if (r.status_code == 403) or (r.url == urljoin(base_url, forml)):
del r, s
raise AuthError()
del forml
try:
upsst = self.parse(r.iter_lines(decode_unicode=True))
ATF_LOGGER.debug(F'{self._host}: [result] {repr(upsst)}')
finally:
self.urlway(2, urljoin(r.url, "logout.htm"), s.get)
del r, s
return upsst
@staticmethod
def extract(upsst) -> str: return upsst['Internal Temperature'].replace('°C', '') | APC-Temp-fetch | /apc_temp_fetch-0.0.9-py3-none-any.whl/APC_Temp_fetch/frmnc.py | frmnc.py |
from html.parser import HTMLParser
import requests
from urllib.parse import urljoin
from .base import ATF_LOGGER, ApcKind, AuthError, NullAuth
class UpsStatEntity:
def __init__(self) -> None:
self.description = ''
self.value = ''
self.units = ''
class UpsParserStateMachine(HTMLParser):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.stats = dict()
self.__sel = None
def handle_starttag(self, tag: str, attrs) -> None:
namattrs = [i[1] for i in attrs if i[0] == 'name']
if (tag != 'span') or (len(namattrs) != 1):
return
parts = namattrs[0].split('?')
if len(parts) != 2:
return
ty, eid = parts
if eid not in self.stats:
self.stats[eid] = UpsStatEntity()
self.__sel = self.stats[eid]
self.__selty = ty
def handle_endtag(self, tag: str) -> None:
self.__sel = None
self.__selty = ''
def handle_data(self, data: str) -> None:
if not self.__sel:
return
ty = self.__selty
if ty == 'description':
self.__sel.description += data.replace(':', '')
elif ty == 'value':
self.__sel.value += data.strip()
elif ty == 'units':
self.__sel.units += data
class GdenNt07(ApcKind):
@staticmethod
def parse(chunks):
statemach = UpsParserStateMachine()
for chunk in chunks:
statemach.feed(chunk)
statemach.close()
return statemach.stats
def fetch(self, user: str, password: str):
base_url = F'http://{self._host}'
upsst = None
with requests.Session() as s:
s.auth = NullAuth()
r = self.urlway(0, base_url, s.get, stream=True)
forml = next(filter(lambda value: "name=\"HashForm1\"" in value, r.iter_lines(decode_unicode=True)))
forml = next(filter(lambda value: "action=" in value, forml.split())).split('=', 2)[1].split('"', 3)[1]
r = self.urlway(1, urljoin(base_url, forml), s.post, data = {
'login_username': user,
'login_password': password,
'prefLanguage': '00000000',
'submit': 'Log+On',
})
if (r.status_code == 403) or (r.url == urljoin(base_url, forml)):
del r, s
raise AuthError()
del forml
try:
r = self.urlway(2, urljoin(r.url, 'batts.htm'), s.get, stream=True)
upsst = self.parse(r.iter_lines(decode_unicode=True))
finally:
self.urlway(3, urljoin(r.url, 'logout.htm'), s.get)
upsst2 = {}
for i in upsst.values():
upsst2[i.description] = (i.value, i.units)
upsst = upsst2
ATF_LOGGER.debug(F'{self._host}: [result] {repr(upsst)}')
return upsst
@staticmethod
def extract(upsst) -> str:
value, units = upsst['Battery Temperature']
return value | APC-Temp-fetch | /apc_temp_fetch-0.0.9-py3-none-any.whl/APC_Temp_fetch/gden_nt07.py | gden_nt07.py |
import logging
import requests
import sys
ATF_LOGGER = logging.getLogger('APC_Temp_fetch')
class ApcKind:
def __init__(self, host: str, rqargs, **kwargs):
# forwards all unused arguments, to make this class usable as a mixin
super().__init__(**kwargs) # type: ignore[call-arg]
self._host = host
self._rqargs = rqargs
def urlway(self, num: int, in_url: str, handler, **kwargs):
ATF_LOGGER.debug(F'{self._host}: [{num}] {in_url}')
try:
r = handler(in_url, **kwargs, **self._rqargs)
ATF_LOGGER.debug(F'{self._host}: [{num}] -> {r.url}')
if 'stream' in kwargs and bool(kwargs['stream']) and r.encoding is None:
r.encoding = 'utf-8'
return r
except Exception as e:
# it does not make sense to try to use r.url here, it may be unavailable
ATF_LOGGER.error(F'{self._host}: [{num}] while fetching {in_url}: {repr(e)}')
# do not use ATF_LOGGER.exception because we re-raise
# the exception and don't want to clutter the output
raise
def fetch(self, user: str, password: str):
raise NotImplementedError
@staticmethod
def extract(upsst) -> str:
"""extract the temperature from the return value of the `fetch` method"""
raise NotImplementedError
class AuthError(Exception):
def __init__(self, message='authentification failed'):
super().__init__(message)
# source: https://github.com/psf/requests/issues/2773#issuecomment-174312831
class NullAuth(requests.auth.AuthBase):
'''force requests to ignore the ``.netrc``
Some sites do not support regular authentication, but we still
want to store credentials in the ``.netrc`` file and submit them
as form elements. Without this, requests would otherwise use the
.netrc which leads, on some sites, to a 401 error.
Use with::
requests.get(url, auth=NullAuth())
'''
def __call__(self, r): return r | APC-Temp-fetch | /apc_temp_fetch-0.0.9-py3-none-any.whl/APC_Temp_fetch/base.py | base.py |
from html.parser import HTMLParser
import requests
from collections.abc import Iterable
from typing import Dict, Tuple
from .base import ATF_LOGGER, ApcKind
from html.parser import HTMLParser
import requests
from collections.abc import Iterable
from typing import Dict, List
class UpsStatEntity:
def __init__(self, ident) -> None:
self.ident = ident
self.description = ''
self.value = ''
# e.g. """ <tr onMouseOver="TTshow('upsstat_11')" onMouseOut="TThide()"><td class="maina">Battery Temperature</td><td class="mainb" width="100%" colspan=3>30.0 </td></tr>"""
class UpsParserStateMachine(HTMLParser):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.stats = dict()
self.__sel = None
self.__selty = None
def handle_starttag(self, tag: str, attrs) -> None:
attrs = {i[0]: i[1] for i in attrs}
if tag == 'tr':
if ('onmouseover' in attrs) and (attrs.get('onmouseout') == 'TThide()'):
self.__sel = UpsStatEntity(attrs['onmouseover'].replace("TTshow('upsstat_", '').replace("')", ''))
self.__selty = None
elif self.__sel and tag == 'td':
self.__selty = attrs.get('class')
@staticmethod
def mangle_value(x: str) -> str:
return x.strip().replace('\xa0', ' ').removeprefix('_dw("').removesuffix('")')
def handle_endtag(self, tag: str) -> None:
if not self.__sel:
return
if tag == 'td':
self.__selty = None
elif tag == 'tr' and self.__sel.description:
self.stats[self.mangle_value(self.__sel.description)] = (self.__sel.ident, self.mangle_value(self.__sel.value))
self.__selty = None
self.__sel = None
def handle_entityref(self, name: str) -> None:
if name == ' ':
self.handle_data(' ')
def handle_data(self, data: str) -> None:
if (not self.__sel) or (not self.__selty):
return
if self.__selty == 'maina':
self.__sel.description += data
elif self.__selty == 'mainb':
self.__sel.value += data
class Cs121(ApcKind):
@staticmethod
def parse(rlns: Iterable[str]) -> Dict[str, Tuple[int, str]]:
statemach = UpsParserStateMachine()
for line in rlns:
statemach.feed(line)
statemach.close()
return statemach.stats
def fetch(self, user: str, password: str):
# we ignore user and password
rlns = self.urlway(0, F'http://{self._host}/main.shtml', requests.get, stream = True).iter_lines(decode_unicode=True)
upsst = self.parse(rlns)
ATF_LOGGER.debug(F'{self._host}: [result] {repr(upsst)}')
return upsst
@staticmethod
def extract(upsst: Dict[str, Tuple[int, str]]) -> str:
for i in ['UPS Temperature', 'Battery Temperature']:
j = upsst.get(i)
if j: return j[1] | APC-Temp-fetch | /apc_temp_fetch-0.0.9-py3-none-any.whl/APC_Temp_fetch/cs121.py | cs121.py |
import argparse
import copy
import logging
import sys
import time
from typing import Any, Dict, Optional
from . import KINDS
from .base import ATF_LOGGER
class UnknownFetcher(Exception):
pass
def run_one(kind: str, host: str, user: str, password: str, timeout: Optional[float], rqadfl: Dict[str, Any]) -> None:
x = None
try:
x = KINDS[kind]
except KeyError:
raise UnknownFetcher('unknown fetcher: ' + kind)
rqa = copy.deepcopy(rqadfl)
if timeout:
rqa['timeout'] = timeout
fval = x(host, rqa).fetch(user, password)
val = None
try:
val = x.extract(fval)
except KeyError:
# suppress verbose traceback
ATF_LOGGER.error(F"{host}: unable to extract temperature data")
return
if val:
print(f"{host}\t{val}")
def common_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("--proxy", help="set a proxy for all requests")
def setup_logging(verbose: bool) -> logging.StreamHandler:
ch = logging.StreamHandler()
lvl = logging.DEBUG if verbose else logging.INFO
ch.setLevel(lvl)
ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
ATF_LOGGER.addHandler(ch)
ATF_LOGGER.setLevel(lvl)
return ch
def parse_rqadfl(args: Any) -> Dict[str, Any]:
rqadfl = {}
if args.proxy:
rqadfl["proxies"] = dict(http=args.proxy, https=args.proxy)
return rqadfl
def main_one() -> None:
parser = argparse.ArgumentParser()
common_args(parser)
parser.add_argument("kind", help=f"APC interface kind (one of: {' '.join(KINDS.keys())})")
parser.add_argument("host", help="connect to the host (APC) via HTTP")
parser.add_argument("user", help="with the given user")
parser.add_argument("password", help="with the given pass")
parser.add_argument("--timeout", help="set a timeout (in seconds) for each request execution (per request)", type=float)
args = parser.parse_args()
del parser
ch = setup_logging(args.verbose)
rqadfl = parse_rqadfl(args)
try:
run_one(args.kind, args.host, args.user, args.password, args.timeout, rqadfl)
except Exception as e:
ATF_LOGGER.exception(args.host)
def main_list() -> None:
parser = argparse.ArgumentParser()
common_args(parser)
parser.add_argument("apclist", help="file containing list of 'kind host user password [timeout]'")
args = parser.parse_args()
del parser
ch = setup_logging(args.verbose)
rqadfl = parse_rqadfl(args)
with open(args.apclist, 'r') as apclist:
for line in apclist:
parts = line.split()
if not parts or parts[0] == '#':
pass
elif len(parts) < 4:
ATF_LOGGER.error(F'got invalid apclist line: {line}')
else:
kind, host, user, password = parts[:4]
try:
timeout = float(parts[4]) if len(parts) > 4 else None
run_one(kind, host, user, password, timeout, rqadfl)
except Exception as e:
ATF_LOGGER.exception(host) | APC-Temp-fetch | /apc_temp_fetch-0.0.9-py3-none-any.whl/APC_Temp_fetch/cli.py | cli.py |
# User Guide for APEC (v1.2.2)
(Accessibility Pattern based Epigenomic Clustering)
<img src="images/workflow.jpg" width="800">
APEC can perform fine cell type clustering on single cell chromatin accessibility data from scATAC-seq, snATAC-seq, sciATAC-seq or any other relevant experiment. It can also be used to evaluate gene expression from relevant accesson, search for differential motifs/genes for each cell cluster, find super enhancers, and construct pseudo-time trajectory (by calling Monocle). **If users have already obtained the fragment-count-per-peak matrix from other mapping pipelines (such as CellRanger), please run APEC from [Section One "Run APEC from fragment count matrix"](#section-one-run-aepc-from-fragment-count-matrix). If users have only the raw fastq files, please jump to [Section Two "Get fragment count matrix from raw data"](#section-two-get-fragment-count-matrix-from-raw-data).**
## Section One. Run AEPC from fragment count matrix
### 1. Requirements and installation
#### 1.1 Requirements
APEC requires Linux system (CentOS 7.3+ or Ubuntu 16.04+), as well as Python3 (3.6.8, not 3.7.x) and R (3.5.1+) environments. If users want to build pseudotime trajectory with APEC, please install Monocle (2.10.0, http://cole-trapnell-lab.github.io/monocle-release/docs/) in R (see [1.2 Install and import APEC](#12-install-and-import-apec)). Also, the following softwares are required for APEC if users want to run motif analysis (as chromVAR):
Bedtools: http://bedtools.readthedocs.io/en/latest/content/installation.html
Meme 4.11.2: http://meme-suite.org/doc/download.html?man_type=web (Meme 5.x.x is not compatible with APEC)
The files in **reference** folder are required for APEC. But we didn't upload reference files to GitHub since they are too big. Users can download all reference files from one of the following websites:
-|website
-|-
1|http://galaxy.ustc.edu.cn:30803/APEC/reference/ <br> Due to server upgrades, this site will be temporarily unavailable from 2020-03-30 to 2020-04-01.
2|https://apec.oss-us-east-1.aliyuncs.com/APEC/reference/reference.zip
3|https://drive.google.com/drive/folders/1psK7za4KNJfqpigUaLMY3bWlqBHp1J5k?usp=sharing
The **reference** folder should contains the following files:
hg19_RefSeq_genes.gtf, hg19_chr.fa, hg19_chr.fa.fai,
mm10_RefSeq_genes.gtf, mm10_chr.fa, mm10_chr.fa.fai,
JASPAR2018_CORE_vertebrates_non-redundant_pfms_meme.txt, tier1_markov1.norc.txt
#### 1.2 Install and import APEC
Users can install APEC by:
pip install APEC==1.2.2
We strongly recommend that users build a python 3.6.8 environment for APEC with **miniconda** or **anaconda**, since APEC is dependent on specific versions of python packages (see README.md in examples folder), and Monocle (2.10.0) can be installed in conda environment too. The commands to create a conda environment (named as apec_env) and install APEC and Monocle are:
conda create -n apec_env python=3.6.8
conda activate apec_env
pip install APEC==1.2.2
conda install -n apec_env -c bioconda bioconductor-monocle=2.10.0
In Ipython, Jupyter-notebook or a python script, users can import packages of APEC by:
from APEC import clustering,plot,generate
Users can inquire the manual for each function of APEC by using "help()" in Ipython or Jupyter, for example:
help(clustering.cluster_byAccesson)
### 2. Input data
If users have employed APEC to generate fragment count matrix from raw data (see [Section Two](#section-two-get-fragment-count-matrix-from-raw-data)), they can run AEPC clustering and subsequent analysis on the $project folder directly.
If using APEC for 10X scATAC-seq data, users can run the following script to prepare the project:
from APEC import convert
convert.convert_10X('$10X_data_folder/', '$project/')
The '$10X_data_folder' should contain 'barcodes.tsv', 'matrix.mtx' and 'peaks.bed' files, which are the results of Cellranger.
If using the matrix generated from other experiments, users need to prepare a project folder (termed '$project'), which contains **matrix** and **peak** folders. Please place "filtered_cells.csv" and "filtered_reads.mtx" in **matrix** folder, "top_filtered_peaks.bed" in **peak** folder. Here is the instruction for three input files:
filtered_cells.csv: Two-column (separated by tabs) list of cell information ('name' and 'notes'):
The 'name' column stores cell names (or barcodes).
The 'notes' column can be cell-type or any other cell information.
top_filtered_peaks.bed: Three-column list of peaks, which is a standard bed format file.
It is same to the "peaks.bed" file in the CellRanger output of a 10X dataset.
filtered_reads.mtx: Fragment count matrix in mtx format, where a row is a peak and a column is a cell.
It is same to the "matrix.mtx" file in the CellRanger output of a 10X dataset.
The order of cells should be the same with "filtered_cells.csv".
The order of peaks should be the same with "top_filtered_peaks.bed.
### 3. Functions of APEC (step by step)
#### Users can directly run example projects and scripts in "examples" folder to generate figures listed in our APEC paper (https://www.biorxiv.org/content/10.1101/646331v4).
#### 3.1 Clustering by APEC
Use the following codes to cluster cells by APEC algorithm:
clustering.build_accesson('$project', ngroup=600)
clustering.cluster_byAccesson('$project', nc=0, norm='probability')
plot.plot_tsne('$project', rs=0)
plot.correlation('$project', cell_label='notes', clip=[0,1])
input parameters:
ngroup: Number of accessons, default=600.
nc: Number of cell clusters, set it to 0 if users want to predict it with APEC, default=0.
norm: Normalization method, can be 'zscore', 'probability', or 'filter'; default='probability'.
If there are many noises in the fragment count matrix, please consider to set norm='filter'.
rs: The random_seed parameter for tSNE, default=0.
cell_label: Color labels for cells, can be 'notes' or 'cluster', default='notes'.
If cell_label='cluster', it will use clustering result of clustering.cluster_byAccesson().
clip: Range [min, max] for the correlation heatmap, default=[-1,1]
output files:
$project/matrix/Accesson_peaks.csv
$project/matrix/Accesson_reads.csv
$project/result/cluster_by_APEC.csv
$project/result/TSNE_by_APEC.csv
$project/figure/TSNE_by_APEC_with_notes_label.pdf
$project/figure/TSNE_by_APEC_with_cluster_label.pdf
$project/figure/cell_cell_correlation_by_APEC_with_notes_label.png
<img src="images/TSNE_by_APEC_with_notes_label.jpg" width="400">
_Figure A. TSNE_by_APEC_with_notes_label.pdf, where cells are labeled with cell types (the 'notes' column of filtered_cells.csv)_
<img src="images/TSNE_by_APEC_with_cluster_label.jpg" width="400">
_Figure B. TSNE_by_APEC_with_cluster_label.pdf, where cells are labeled with cell clusters (the 'cluster' column of cluster_by_APEC.csv)_
#### 3.2 Clustering by chromVAR (optional, required for motif analysis)
Use the following codes to cluster cells by chromVAR algorithm:
generate.motif_matrix('$project', genome_fa='$reference/hg19_chr.fa',
background='$reference/tier1_markov1.norc.txt',
meme='$reference/JASPAR2018_CORE_vertebrates_redundant_pfms_meme.txt',
np=4)
clustering.cluster_byMotif('$project', np=4, nc=0, ns=50)
input parameters:
genome_fa: Path to hg19_chr.fa or mm10_chr.fa in $reference folder.
background: Path to tier1_markov1.norc.txt in $reference folder.
meme: Path to JASPAR2018_CORE_vertebrates_redundant_pfms_meme.txt in $reference folder.
np: Number of CPU cores used for parallel calculation, default=4.
nc: Number of cell clusters, set it to 0 if users want to predict it using Louvain algorithm, default=0.
ns: Number of permuted sampling, default=50.
output files:
$project/result/deviation_chromVAR.csv
$project/result/cluster_by_chromVAR.csv
#### 3.3 Evaluate ARI, NMI and AMI for clustering result
If users have the real cell type in the 'notes' column of '$project/matrix/filtered_cells.csv', please use the following code to calculate ARI, NMI and AMI to estimate the accuracy of the clustering algorithm.
clustering.cluster_comparison('$project/matrix/filtered_cells.csv',
'$project/result/cluster_by_APEC.csv',
exclude='UNK')
The output ARI, NMI and AMI values will be printed on the screen directly. Please make sure that the column 'notes' of filtered_cells.csv denotes the cell type of each cell. For some datasets, such as the hematopoietic cells, the user should exclude all "UNK" cells (unknown type) before the calculation of ARI.
#### 3.4 Generate pseudotime trajectory
By default, APEC adapts monocle to generate pseudotime trajectory from accesson matrix:
generate.monocle_trajectory('$project', npc=5)
plot.plot_trajectory('$project', cell_label='notes', angles=[30,30])
input parameters:
npc: Number of principal components used to build trajectory, default=5.
cell_label: Labels for cells, can be 'notes' or 'cluster', default='notes'.
angles: Rotation angles for 3D trajectory, e.g. [100,20], default=[30,30].
output files:
$project/result/monocle_trajectory.csv
$project/result/monocle_reduced_dimension.csv
$project/figure/pseudotime_trajectory_with_notes_label.pdf
<img src="images/pseudotime_trajectory_with_notes_label.jpg" width="400">
_Figure C. pseudotime_trajectory_with_notes_label.pdf_
#### 3.5 Generate gene expression
generate.gene_score('$project', genome_gtf='hg19_RefSeq_genes.gtf', distal=20000)
output file:
$project/matrix/genes_scored_by_TSS_peaks.csv
$project/peak/genes_TSS_peaks.csv
#### 3.6 Generate differential feature for a cell cluster
Get differential accessons:
generate.get_nearby_genes('$project') # optional. Users should run step 3.5 before this.
generate.differential_feature('$project', feature='accesson', target='0', vs='all')
Get differential motifs/genes:
generate.differential_feature('$project', feature='motif', target='0', vs='all')
generate.differential_feature('$project', feature='gene', target='0', vs='all')
input parameters:
feature: Type of feature, can be 'accesson' or 'motif' or 'gene', default='accesson'.
If feature='accesson', run step 3.1 first;
if feature='motif', run step 3.2 first;
if feature='gene', run step 3.5 first.
cell_label: Cell Labels used for differential analysis, can be 'notes' or 'cluster', default='cluster'.
target: The target cluster that users search for differential features, default='1'.
If cell_label='cluster', target is one element in the 'cluster' column of cluster_by_APEC.csv file;
if cell_label='notes', target is one element in the 'notes' column of filtered_cells.csv file.
vs: Versus which clusters, can be '2,3,4' or 'all', default='all' (means all the rest clusters).
pvalue: P-value for student-t test, default=0.01.
log2_fold: Cutoff for log2(fold_change), default=1.
output file:
$project/result/differential_accesson_of_cluster_X_vs_XXX.csv
$project/result/differential_motif_of_cluster_X_vs_XXX.csv
$project/result/differential_gene_of_cluster_X_vs_XXX.csv
#### 3.7 Plot motif/gene on tSNE/trajectory diagram
plot.plot_feature('$project', space='tsne', feature='gene', name='FOXO1')
plot.plot_feature('$project', space='trajectory', feature='motif', name='GATA1')
input parameters:
space: In which space we draw the feature, can be 'tsne' or 'trajectory', default='tsne'.
If space='tsne', run plot.plot_tsne() first;
if space='trajectory', run step 3.4 first.
feature: Type of the feature, can be 'accesson' or 'motif' or 'gene', default='accesson'.
If feature='accesson', run step 3.1 first;
if feature='motif', run step 3.2 first;
if feature='gene', run step 3.5 first.
name: Name of the feature.
If feature='accesson', name=accesson number, i.e. '1';
if feature='motif', name=motif symbol, i.e. 'GATA1';
if feature='gene', name=gene symbol, i.e. 'CD36'.
clip: Clip range for the input matrix, can be [min, max] or 'none', default='none'.
angles: Rotation angles for 3D trajectory, e.g. [100,20], default=[30,30].
output files:
$project/figure/motif_XXX_on_trajectory_by_APEC.pdf
$project/figure/gene_XXX_on_tsne_by_APEC.pdf
<img src="images/motif_GATA1_on_trajectory_by_APEC.jpg" width="400">
_Figure D. motif_GATA1_on_trajectory_by_APEC.pdf_
#### 3.8 Generate potential super enhancer
generate.super_enhancer('$project', super_range=1000000, p_cutoff=0.01)
input parameter:
super_range: Genome range to search for super enhancer, default=1000000.
p_cutoff: Cutoff of P-value, default=0.01.
output file:
$project/result/potential_super_enhancer.csv
## Section Two. Get fragment count matrix from raw data
### (This part is only available on GitHub:https://github.com/QuKunLab/APEC)
### 1. Requirements and installation
All of the following software needs to be placed in the global environment of the Linux system to ensure that they can be called in any path/folder. Picard is also required, but we have placed it into $APEC/reference folder, and users don't need to install it. We recommend that users adopt the latest version of these software, except Meme (version 4.11.2).
Bowtie2: https://sourceforge.net/projects/bowtie-bio/files/bowtie2/2.2.9/
Samtools: https://github.com/samtools/samtools
Bedtools: http://bedtools.readthedocs.io/en/latest/content/installation.html
Macs2: https://github.com/taoliu/MACS.git
Meme 4.11.2: http://meme-suite.org/doc/download.html?man_type=web
pysam for python: set up by "pip install pysam"
Levenshtein for python: set up by "pip install python-Levenshtein"
#### 1.2 Installation
Users can simply install this part by copying the **code_v1.1.0** folder and **reference** folder into a same path. Users **must** run ***APEC_prepare_steps.sh*** directly in code_v1.1.0/, since each program will invoke the reference files automatically. The **reference** folder is required, but we didn't upload reference files to GitHub since they are too big. Users can download all reference files from one of the following sites:
-|website
-|-
1|http://galaxy.ustc.edu.cn:30803/APEC/reference/ <br> Due to server upgrades, this site will be temporarily unavailable from 2020-03-30 to 2020-04-01.
2|https://apec.oss-us-east-1.aliyuncs.com/APEC/reference_full.zip.001 <br> https://apec.oss-us-east-1.aliyuncs.com/APEC/reference_full.zip.002 <br> https://apec.oss-us-east-1.aliyuncs.com/APEC/reference_full.zip.003
The **reference** folder should contains the following files:
hg19_refseq_genes_TSS.txt, hg19_RefSeq_genes.gtf, hg19_blacklist.JDB.bed,
hg19_chr.fa, hg19_chr.fa.fai, hg19.chrom.sizes,
hg19.1.bt2, hg19.2.bt2, hg19.3.bt2, hg19.4.bt2, hg19.rev.1.bt2, hg19.rev.2.bt2,
mm10_refseq_genes_TSS.txt, mm10_RefSeq_genes.gtf, mm10_blacklist.BIN.bed,
mm10_chr.fa, mm10_chr.fa.fai, mm10.chrom.sizes,
mm10.1.bt2, mm10.2.bt2, mm10.3.bt2, mm10.4.bt2, mm10.rev.1.bt2, mm10.rev.2.bt2,
JASPAR2018_CORE_vertebrates_non-redundant_pfms_meme.txt, tier1_markov1.norc.txt, picard.jar
### 2. Fragment count matrix
#### 2.1 Arrangement of raw data
The **raw_data** folder should contain all raw sequencing fastq files into the. All these pair-end fastq files should be named as:
type1-001_1.fastq, type1-001_2.fastq, type1-002_1.fastq, type1-002_2.fastq, ……;
type2-001_1.fastq, type2-001_2.fastq, type2-002_1.fastq, type2-002_2.fastq, ……;
……
where "\_1" and "\_2" indicate forward and backward reads for pair-end sequencing. {type1, type2, ...} can be cell-types or batches of samples, such as {GM, K562, ...}, or {batch1, batch2, ...}, or any other words without underline "\_" or dash "-".
Users need to build a **project** folder to store the result. The **work**, **matrix**, **peak** and **figure** folders will be automatically built by subsequent steps, and placed in **project** folder.
#### 2.2 Easy-run of matrix preparation
Users can use the script ***APEC_prepare_steps.sh*** to finish the process from raw data to fragment count matrix. This script includes steps of "trimming", "mapping", "peak calling", "aligning read counts matrix", and "quality contral". Running this step on our example project (i.e. project01 with 672 cells) will take 10~20 hours on an 8-core 32 GB computer, since the sequence mapping step is the slowest step.
Example:
bash APEC_prepare_steps.sh -r $raw_data -s $project -g hg19 -n 4 -l 3 -p 0.2 -f 2000
Input parameters:
-r: The raw_data folder
-s: The project folder.
-g: "hg19" or "mm10".
-n: Number of CPU cores.
-l: Threshold for the –log(Q-value) of peaks, used to filter peaks.
-p: Threshold of the percentage of fragments in peaks, used to filter cells.
-f: Threshold of the fragment number of each cell, used to filter cells.
Output files:
The script ***APEC_prepare_steps.sh*** will generate **work**, **peak**, **matrix**, and **figure** folders with many output files. Here, we only introduce files that are useful to users. For our example projects, all of these results can be reproduced on a general computer system.
(1) In **work** folder:
For each cell, the mapping step can generate a subfolder (with cell name) in the **work** folder. There are several useful files in each subfolder:
cell_name.hist.pdf: A histogram of fragment length distribution of each cell.
cell_name.RefSeqTSS.pdf: Insert enrichment around TSS regions of each cell.
(2) In **peak** folder:
mergeAll.hist.pdf: A histogram of fragment length distribution of all cells.
mergeAll.RefSeqTSS.pdf: Insert enrichment around TSS regions of all cells.
top_filtered_peaks.bed: Filtered top peaks, ranked by Q-value.
(3) In **matrix** folder:
reads.csv: Fragment count matrix.
cell_info.merged.csv: Data quality report of each cell.
filtered_cells.csv: Filtered cells information in csv format.
filtered_reads.mtx: Filtered fragment count matrix in mtx format.
(4) In **figure** folder:
cell_quality.pdf: A scatter plot of the fragment number and the percentage of fragments in peaks.
### 3. Generate bigwig files for UCSC track
generate_UCSCtrack.py -s $project --cfile cluster.csv --gsize chrom.sizes
Options:
-s The project folder.
--cfile cluster.csv file, e.g. cluster_by_APEC.csv in
$project/result/ folder
--gsize chrom.size files, default=../reference/hg19.chrom.sizes
This script outputs bigwig files to $project/result/track/ folder.
| APEC | /APEC-1.2.2.tar.gz/APEC-1.2.2/README.md | README.md |
import pymavlink
import pymavlink.dialects.v20.all as dialect
import threading
import time
from pymavlink.mavutil import default_native
def connection(device, baud=115200, source_system=255, source_component=0,
planner_format=None, write=False, append=False,
robust_parsing=True, notimestamps=False, input=True,
dialect=None, autoreconnect=False, zero_time_base=False,
retries=3, use_native=default_native,
force_connected=False, progress_callback=None,
udp_timeout=0, **opts):
return vehicle(device, baud, source_system, source_component,
planner_format, write, append,
robust_parsing, notimestamps, input,
dialect, autoreconnect, zero_time_base,
retries, use_native,
force_connected, progress_callback,
udp_timeout, **opts)
class vehicle:
def __init__(self, device, baud, source_system, source_component,
planner_format, write, append,
robust_parsing, notimestamps, input,
dialect, autoreconnect, zero_time_base,
retries, use_native,
force_connected, progress_callback,
udp_timeout, **opts):
self.mav = pymavlink.mavutil.mavlink_connection(device, baud, source_system, source_component,
planner_format, write, append,
robust_parsing, notimestamps, input,
dialect, autoreconnect, zero_time_base,
retries, use_native,
force_connected, progress_callback,
udp_timeout, **opts)
self.mav.wait_heartbeat()
self.armable = False
self.command_long_send(CMD=dialect.MAV_CMD_SET_MESSAGE_INTERVAL, param1=33, param2=((10 ** 6) / 2))
@property
def target_system(self):
return self.mav.target_system
@property
def target_component(self):
return self.mav.target_component
def command_long_send(self, CMD, confirm=0, param1=0, param2=0, param3=0, param4=0, param5=0, param6=0, param7=0):
message = dialect.MAVLink_command_long_message(target_system=self.target_system,
target_component=self.target_component, command=CMD,
confirmation=confirm, param1=param1, param2=param2,
param3=param3, param4=param4, param5=param5, param6=param6,
param7=param7)
self.mav.mav.send(message)
def is_armable(self, timeout=60, interval=0.1):
if self.armable is False:
t0 = time.time()
while time.time() - t0 <= timeout:
self.command_long_send(dialect.MAV_CMD_REQUEST_MESSAGE, param1=193)
msg = self.mav.recv_match(type='EKF_STATUS_REPORT', blocking=True)
if (msg.flags >= 512) and (msg.flags < 1024):
self.armable = True
return True
time.sleep(interval)
return False
else:
return self.armable
def setmode(self, mode):
flight_modes = self.mav.mode_mapping()
if mode not in flight_modes.keys():
return False
self.command_long_send(CMD=dialect.MAV_CMD_DO_SET_MODE, param1=dialect.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,
param2=flight_modes[mode])
return True
def arm(self):
self.command_long_send(dialect.MAV_CMD_COMPONENT_ARM_DISARM, param1=1)
time.sleep(0.1)
msg = self.mav.recv_match(type='COMMAND_ACK', condition='COMMAND_ACK.command==400', blocking=True)
msg = msg.to_dict()
if msg["result"] == 0:
return "Armed"
elif msg["result"] == 1:
return "Temporarily Rejected"
elif msg["result"] == 2:
return "Denied"
elif msg["result"] == 3:
return "Unsupported"
elif msg["result"] == 4:
return "Failed"
elif msg["result"] == 5:
return "In Progress"
elif msg["result"] == 6:
return "Cancelled"
else:
return "Unknown"
def takeoff(self, alt, blocking=False, callback=None):
self.command_long_send(dialect.MAV_CMD_NAV_TAKEOFF, param7=alt)
message = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
message = message.to_dict()
lat = float(message["lat"]) * 10 ** (-7)
lon = float(message["lon"]) * 10 ** (-7)
if not blocking and callback is not None:
thread = threading.Thread(target=self.compare, args=([lat, lon, alt], callback,))
thread.start()
elif blocking and callback is None:
thread = threading.Thread(target=self.compare, args=([lat, lon, alt], callback,))
thread.start()
thread.join()
def land(self):
self.command_long_send(CMD=dialect.MAV_CMD_NAV_LAND)
def upload_GEOFence(self, fence_list):
FENCE_TOTAL = "FENCE_TOTAL".encode(encoding="utf-8")
FENCE_ACTION = "FENCE_ACTION".encode(encoding="utf-8")
message = dialect.MAVLink_param_request_read_message(target_system=self.target_system,
target_component=self.target_component,
param_id=FENCE_ACTION, param_index=-1)
self.mav.mav.send(message)
while True:
message = self.mav.recv_match(type="PARAM_VALUE", blocking=True)
message = message.to_dict()
if message["param_id"] == "FENCE_ACTION":
fence_action_original = int(message["param_value"])
break
while True:
message = dialect.MAVLink_param_set_message(target_system=self.target_system,
target_component=self.target_component,
param_id=FENCE_ACTION, param_value=dialect.FENCE_ACTION_NONE,
param_type=dialect.MAV_PARAM_TYPE_REAL32)
self.mav.mav.send(message)
message = self.mav.recv_match(type="PARAM_VALUE", blocking=True)
message = message.to_dict()
if message["param_id"] == "FENCE_ACTION":
if int(message["param_value"]) == dialect.FENCE_ACTION_NONE:
break
while True:
message = dialect.MAVLink_param_set_message(target_system=self.target_system,
target_component=self.target_component,
param_id=FENCE_TOTAL, param_value=0,
param_type=dialect.MAV_PARAM_TYPE_REAL32)
self.mav.mav.send(message)
message = self.mav.recv_match(type="PARAM_VALUE", blocking=True)
message = message.to_dict()
if message["param_id"] == "FENCE_TOTAL":
if int(message["param_value"]) == 0:
break
while True:
message = dialect.MAVLink_param_set_message(target_system=self.target_system,
target_component=self.target_component,
param_id=FENCE_TOTAL, param_value=len(fence_list),
param_type=dialect.MAV_PARAM_TYPE_REAL32)
self.mav.mav.send(message)
message = self.mav.recv_match(type="PARAM_VALUE", blocking=True)
message = message.to_dict()
if message["param_id"] == "FENCE_TOTAL":
if int(message["param_value"]) == len(fence_list):
break
idx = 0
while idx < len(fence_list):
message = dialect.MAVLink_fence_point_message(target_system=self.target_system,
target_component=self.target_component,
idx=idx, count=len(fence_list), lat=fence_list[idx][0],
lng=fence_list[idx][1])
self.mav.mav.send(message)
message = dialect.MAVLink_fence_fetch_point_message(target_system=self.target_system,
target_component=self.target_component, idx=idx)
self.mav.mav.send(message)
message = self.mav.recv_match(type="FENCE_POINT", blocking=True)
message = message.to_dict()
latitude = message["lat"]
longitude = message["lng"]
if latitude != 0.0 and longitude != 0.0:
idx += 1
while True:
message = dialect.MAVLink_param_set_message(target_system=self.target_system,
target_component=self.target_component,
param_id=FENCE_ACTION,
param_value=fence_action_original,
param_type=dialect.MAV_PARAM_TYPE_REAL32)
self.mav.mav.send(message)
message = self.mav.recv_match(type="PARAM_VALUE", blocking=True)
message = message.to_dict()
if message["param_id"] == "FENCE_ACTION":
if int(message["param_value"]) == fence_action_original:
break
def upload_Mission(self, mission_list):
self.clear_Mission()
message = dialect.MAVLink_mission_count_message(target_system=self.target_system,
target_component=self.target_component,
count=len(mission_list) + 2,
mission_type=dialect.MAV_MISSION_TYPE_MISSION)
self.mav.mav.send(message)
while True:
message = self.mav.recv_match(blocking=True)
message = message.to_dict()
if message["mavpackettype"] == "MISSION_REQUEST":
if message["mission_type"] == dialect.MAV_MISSION_TYPE_MISSION:
seq = message["seq"]
if seq == 0: # Home Location
message = dialect.MAVLink_mission_item_message(target_system=self.target_system,
target_component=self.target_component,
seq=seq, frame=dialect.MAV_FRAME_GLOBAL,
command=dialect.MAV_CMD_NAV_WAYPOINT, current=0,
autocontinue=0, param1=0, param2=0, param3=0,
param4=0, x=0, y=0, z=0,
mission_type=dialect.MAV_MISSION_TYPE_MISSION)
elif seq == 1: # Takeoff
message = dialect.MAVLink_mission_item_message(target_system=self.target_system,
target_component=self.target_component,
seq=seq, frame=dialect.MAV_FRAME_GLOBAL,
command=dialect.MAV_CMD_NAV_TAKEOFF, current=0,
autocontinue=0, param1=0, param2=0, param3=0,
param4=0, x=0, y=0, z=mission_list[0][2],
mission_type=dialect.MAV_MISSION_TYPE_MISSION)
elif seq == len(mission_list) + 1:
message = dialect.MAVLink_mission_item_message(target_system=self.target_system,
target_component=self.target_component,
seq=seq, frame=dialect.MAV_FRAME_GLOBAL,
command=dialect.MAV_CMD_NAV_LAND, current=0,
autocontinue=0, param1=0, param2=0, param3=0,
param4=0, x=mission_list[seq - 2][0],
y=mission_list[seq - 2][1],
z=mission_list[seq - 2][2],
mission_type=dialect.MAV_MISSION_TYPE_MISSION)
else:
message = dialect.MAVLink_mission_item_message(target_system=self.target_system,
target_component=self.target_component,
seq=seq, frame=dialect.MAV_FRAME_GLOBAL,
command=dialect.MAV_CMD_NAV_WAYPOINT, current=0,
autocontinue=0, param1=0, param2=0, param3=0,
param4=0, x=mission_list[seq - 2][0],
y=mission_list[seq - 2][1],
z=mission_list[seq - 2][2],
mission_type=dialect.MAV_MISSION_TYPE_MISSION)
self.mav.mav.send(message)
elif message["mavpackettype"] == "MISSION_ACK":
if message["mission_type"] == dialect.MAV_MISSION_TYPE_MISSION and \
message["type"] == dialect.MAV_MISSION_ACCEPTED:
break
def clear_Mission(self):
message = dialect.MAVLink_mission_clear_all_message(target_system=self.target_system,
target_component=self.target_component,
mission_type=dialect.MAV_MISSION_TYPE_MISSION)
self.mav.mav.send(message)
def clear_GEOFence(self):
message = dialect.MAVLink_mission_clear_all_message(target_system=self.target_system,
target_component=self.target_component,
mission_type=dialect.MAV_MISSION_TYPE_FENCE)
self.mav.mav.send(message)
def enable_GEOFence(self, en_dis):
if en_dis == "ENABLE":
self.command_long_send(CMD=dialect.MAV_CMD_DO_FENCE_ENABLE, param1=1)
else:
self.command_long_send(CMD=dialect.MAV_CMD_DO_FENCE_ENABLE, param1=0)
def goto(self, lat, lon, alt, vx=0, vy=0, vz=0, afx=0, afy=0, afz=0, yaw=0, yaw_rate=0, mode=int(0b110111111000),
coo_frame='GLOBAL', blocking=False, callback=None):
if coo_frame == 'GLOBAL':
FRAME = dialect.MAV_FRAME_GLOBAL_INT
elif coo_frame == 'RELATIVE':
FRAME = dialect.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT
elif coo_frame == 'TERRAIN':
FRAME = dialect.MAV_FRAME_GLOBAL_TERRAIN_ALT_INT
else:
FRAME = dialect.MAV_FRAME_GLOBAL_INT
message = dialect.MAVLink_set_position_target_global_int_message(time_boot_ms=10,
target_system=self.target_system,
target_component=self.target_component,
coordinate_frame=FRAME, type_mask=mode,
lat_int=lat * 10 ** 7, lon_int=lon * 10 ** 7,
alt=alt,
vx=vx, vy=vy, vz=vz, afx=afx, afy=afy, afz=afz,
yaw=yaw, yaw_rate=yaw_rate)
self.mav.mav.send(message)
if not blocking and callback is not None:
thread = threading.Thread(target=self.compare, args=([lat, lon, alt], callback,))
thread.start()
elif blocking and callback is None:
thread = threading.Thread(target=self.compare, args=([lat, lon, alt], callback,))
thread.start()
thread.join()
def getparam(self, ID):
message = dialect.MAVLink_param_request_read_message(target_system=self.target_system,
target_component=self.target_component,
param_id=ID.encode("utf-8"), param_index=-1)
self.mav.mav.send(message)
while True:
message = self.mav.recv_match(type="PARAM_VALUE", blocking=True)
message = message.to_dict()
if message["param_id"] == ID:
return message["param_value"]
def setparam(self, ID, value):
message = dialect.MAVLink_param_set_message(target_system=self.target_system,
target_component=self.target_component, param_id=ID.encode("utf-8"),
param_value=value, param_type=dialect.MAV_PARAM_TYPE_REAL32)
self.mav.mav.send(message)
def compare(self, GPS, callback):
while True:
message = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
message = message.to_dict()
if (((float(message["lat"]) * 10 ** (-7) - GPS[0]) <= 10 ** (-6)) and
((float(message["lon"]) * 10 ** (-7) - GPS[1]) <= 10 ** (-6)) and
((float(message["alt"]) * 10 ** (-3) - GPS[2]) <= 0.05)):
break
if callback is not None:
callback() | API-Drone | /API_Drone-0.0.3-py3-none-any.whl/API_Drone/vehicle.py | vehicle.py |
## API Overview
The API contains the following files:
- **Setup.py** - creates the ADC (MCP3008) channel object
- **Sensor.py** - handles the sensor package's activities, qualities, and functionality
The following files illustrate the functioning of the API:
- application_console.py - prints the sensor output to the screen
- application_file.py - prints the sensor output to a text file (data.txt)
The API's functionality is interfaced with the Flask Web Application using **Samba file sharing**.
The API requires the following libraries to run:
- busio
- digitalio
- board
- adafruit_mcp3xxx.mcp3008
- adafruit_mcp3xxx.analog_in
These libraries can be installed on the Raspberry Pi using the following command:
- pip3 install adafruit-circuitpython-mcp3xxx
| API-StevenThomi | /API-StevenThomi-0.0.1.tar.gz/API-StevenThomi-0.0.1/README.md | README.md |
===========
api_callers
===========
.. image:: https://img.shields.io/pypi/v/API-callers.svg
:target: https://pypi.python.org/pypi/API-callers
.. image:: https://travis-ci.com/williamcaesar/API_callers.svg?branch=master
:target: https://travis-ci.com/williamcaesar/API_callers
.. image:: https://pyup.io/repos/github/williamcaesar/API_callers/shield.svg
:target: https://pyup.io/repos/github/williamcaesar/API_callers/
:alt: Updates
.. image:: https://api.codeclimate.com/v1/badges/0d12be035a4fc86a5fb4/maintainability
:target: https://codeclimate.com/github/williamcaesar/API_callers/maintainability
:alt: Maintainability
A set of tools to work with JWT APIs
* Free software: GNU General Public License v3
Features
--------
* Sync Caller
* Async Caller
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| API-callers | /API_callers-0.2.1.tar.gz/API_callers-0.2.1/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/williamcaesar/api_callers/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
api_callers could always use more documentation, whether as part of the
official api_callers docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/williamcaesar/api_callers/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `api_callers` for local development.
1. Fork the `api_callers` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/api_callers.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv api_callers
$ cd api_callers/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 api_callers tests
$ python setup.py test or py.test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check
https://travis-ci.org/williamcaesar/api_callers/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ py.test tests.test_api_callers
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bumpversion patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
| API-callers | /API_callers-0.2.1.tar.gz/API_callers-0.2.1/CONTRIBUTING.rst | CONTRIBUTING.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install api_callers, run this command in your terminal:
.. code-block:: console
$ pip install api_callers
This is the preferred method to install api_callers, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for api_callers can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/williamcaesar/api_callers
Or download the `tarball`_:
.. code-block:: console
$ curl -OL https://github.com/williamcaesar/api_callers/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/williamcaesar/api_callers
.. _tarball: https://github.com/williamcaesar/api_callers/tarball/master
| API-callers | /API_callers-0.2.1.tar.gz/API_callers-0.2.1/docs/installation.rst | installation.rst |
import re
from . import APILibError
from .interface import interface as to_interface
__all__ = ['Router', 'Context']
class Router:
'''通过此对象集中管理(注册、调用)interface'''
def __init__(self, context_cls=None):
'''
:arg context_cls: 此 router 绑定的 context 类型。不同类型的 context 提供不同的功能。
:type context_cls: `Context` 或它的子类
'''
self.context_cls = context_cls or Context
self.interfaces = {
# path: interface
}
def register(self, path, parameters=None, bound=False):
'''通过这个 decorator 注册 interface。
可以传入一个普通函数,此 decorator 会自动将其转换为 interface;也可以传入一个已经生成好的 interface。
:arg string path: interface 对应的 route path
:arg parameters: 只在传入的是普通函数(也就是不是 interface)时有效, 指定其参数定义,如果不需要参数,则为 None。
:arg parameters: 只在传入的是普通函数(也就是不是 interface)时有效,指明当前传入的是 function 还是 bound method。
:type parameters: list of ``api_libs.parameters.Parameter`` or ``None``
'''
if type(path) != str:
raise RouteRegisterFailed('route path ({}) 必须是字符串'.format(path))
path = path.lower()
if re.search('/', path):
raise RouteRegisterFailed('route path 中不允许出现 "/" 字符(got: {})'.format(path))
elif path in self.interfaces:
raise RouteRegisterFailed('route path ({}) 已存在,不允许重复添加'.format(path))
def wrapper(interface_or_fn):
if hasattr(interface_or_fn, '__api_libs_interface'):
interface = interface_or_fn
else:
interface = to_interface(parameters, bound)(interface_or_fn)
self.interfaces[path] = interface
return interface
return wrapper
def call(self, path, context_data=None, arguments={}):
'''调用 interface
:arg string path: 要调用的 interface 的 router path
:arg any context_data: 可以是初始化 context 对象所需的数据,也可以直接传入 context 实例。不同类型的 context 需要不同类型的数据
:arg dict arguments: 传给 interface 的参数值'''
context_instance = context_data if isinstance(context_data, self.context_cls) else self.context_cls(self, context_data)
return self._call_with_context(path, context_instance, arguments)
def _call_with_context(self, path, context_instance, arguments={}):
if type(path) != str:
raise RouteCallFailed('route path ({}) 必须是字符串'.format(path))
path = path.lower()
if not isinstance(context_instance, self.context_cls):
raise RouteCallFailed('context 类型错误(expect: {}, got: {})'.format(self.context_cls, context_instance))
if path not in self.interfaces:
raise RouteCallFailed('route "{}" 不存在'.format(path))
return self.interfaces[path](arguments=arguments, context=context_instance)
def change_context(self, context_cls):
'''为当前 router 指定一个新的 context 类型'''
self.context_cls = context_cls
class Context:
'''存放 interface 被调用时的上下文信息,以及提供一些辅助方法'''
def __init__(self, router, context_data=None):
self.router = router
self.data = context_data
def call(self, route_path, arguments={}):
'''调用同一个 router 下的另一个 interface。
新调用的 interface 会接收到和当前一样的 context 对象。'''
return self.router._call_with_context(route_path, self, arguments)
class RouteRegisterFailed(APILibError):
pass
class RouteCallFailed(APILibError):
pass | API-libs | /API_libs-0.1.16-py3-none-any.whl/api_libs/route.py | route.py |
from tornado.web import RequestHandler
import tornado.concurrent
import json
import asyncio
from .. import APILibError
from ..route import Router, Context
__all__ = ['TornadoAdapter']
class TornadoContext(Context):
'''
Attributes:
* req_handler: 与当前 HTTP Request 对应的 `tornado.web.RequestHandler` 实例
'''
def __init__(self, router, req_handler):
self.req_handler = req_handler
super().__init__(router)
def dump_json(result, req_handler):
req_handler.set_header('Content-Type', 'application/json')
return json.dumps(result)
class TornadoAdapter:
'''将 router 与 Tornado app 进行适配。
通过此对象把 HTTP Request 转换成 interface 调用;再把调用结果输出给客户端
Attributes:
* router: 此 adapter 绑定的 router。通过把 router 绑定到 adapter,使其中的 interface 能通过 HTTP Request 进行调用。
* RequestHandler: 经过特殊调整,能够把 HTTP Request 转换成 interface 调用的 RequestHandler。
应把它加入 tornado application 的 handler 列表里,分配一个 url pattern
注意:必须保证 url pattern 中有且只有一个 regex group,代表 route path
例如这样: (r'/api/(.+)', RequestHandler)
此 RequestHandler 只响应 GET 和 POST 请求
adapter 的使用方法见 README.md 中的示例代码
'''
def __init__(self, router=None, output_formatter=dump_json):
'''
:arg router: 指定要把 adapter 绑定到哪个 router。
若未指定此此参数,adapter 会自己创建一个。
注意,adapter 要求与它绑定的 router 的 Context 类型能够接收一个 tornado RequestHandler 实例作为 context data
:arg output_formatter: RequestHandler 会调用此函数对 interface 的返回值进行格式化后,再把得到的内容输出给客户端。
默认是转换成 JSON,你可以自己指定一个函数,来转换成其他格式。
此函数会接收到两个参数: call result 和 RequestHandler 对象。第二个参数用来输出自定义的 HTTP Header
'''
self.output_formatter = output_formatter
self.router = router or Router(TornadoContext)
class AdaptedRequestHandler(RequestHandler):
'''
为了支持异步行为,handler 以 async 函数的方式运行。
不过 interface 并不要求非得是 async 函数,即使是普通函数,handler 也能正常处理。
'''
async def get(handler_self, route_path):
await self.handle_request(handler_self, route_path)
async def post(handler_self, route_path):
await self.handle_request(handler_self, route_path)
self.RequestHandler = AdaptedRequestHandler
def bind_router(self, router):
'''将 adapter 绑定到另一个 router 上
注意,与新的 router 绑定后,原来的 router 中注册的 interfaces,并不会转移到新的 router 里。
如果有需要,请手动进行转移(new_router.interfaces = {path:interface for path, interface in old_router.interfaces.items()})
'''
self.router = router
async def handle_request(self, req_handler, route_path):
'''进行 HTTP Request 与 interface Call 与 JSON Response 之间的转换
HTTP 请求的格式约定
要调用一个 interface,需要三样东西:
1. route path
2. context data
3. arguments
通过 HTTP 请求调用 interface 时,
- route path 通过 URL 指定
- context data 会被设置为当前的 tornado RequestHandler,不需要手动指定
- arguments 通过 query string 或 POST body 指定,详见 `extract_arguments()` 方法
'''
arguments = self.extract_arguments(req_handler)
result = await self.call_interface(req_handler, route_path, arguments)
self.finish_request(req_handler, result)
async def call_interface(self, req_handler, route_path, arguments):
'''这里把对 interface 的调用单独拆分出一个方法,是为了让使用者能方便地对此行为进行扩展
例如在执行调用前进行一些准备操作'''
ret_val = self.router.call(route_path, req_handler, arguments)
if asyncio.iscoroutine(ret_val) or isinstance(ret_val, tornado.concurrent.Future):
ret_val = await ret_val
return ret_val
def finish_request(self, req_handler, result):
output = self.output_formatter(result, req_handler)
req_handler.write(output)
def extract_arguments(self, req_handler):
'''从 HTTP Request 中提取出 arguments
arguments 必须以 JSON 的形式提供。
可以用来提供 values 的渠道有三种,分别对应不同的情况:
1. POST body
把 arguments json 作为 POST body
并将 HTTP Header 中的 Content-Type 设为 application/json
大部分情况下,使用这种模式
2. POST field
在 POST 请求中,创建一个名为 arguments 的 field,把 arguments json 作为它的的值
适用于在同一个请求中,既要提交 argumens 又要上传文件的情况
3. query string
在 URL query string 中指定一个名为 arguments 的项,把 argument json 作为它的值。
适用于没法提交 POST 请求,又必须指定 arguments 的情况,例如向用户的手机或邮箱发送的验证链接。
用这种方式,arguments 的内容不能太长。
如果同时从多种渠道提供了 arguments ,那么只有其中一种会被使用。
之所以强制使用 JSON 的格式,不支持传统的 query string 和 POST form-data,
是因为传统的 form 处理起来问题太多,而且只支持字符串类型;JSON 的数据结构则简单、清晰,类型丰富,可以减少很多麻烦。
'''
raw_arguments = req_handler.get_argument('arguments', default='')
# 这里不能直接用 == 'application/json' 进行判断,因为有些客户端(例如 React Native)会在原 Content-Type 后面加上额外的 ;charset=utf-8 之类的文字。
if raw_arguments == '' and req_handler.request.headers.get('Content-Type', '').startswith('application/json'):
try:
raw_arguments = req_handler.request.body.strip().decode()
except UnicodeDecodeError:
# request body 中包含了无法识别的字符(例如二进制数据)
raise RequestHandleFailed('arguments 中包含非法字符')
if len(raw_arguments):
try:
arguments = json.loads(raw_arguments)
if type(arguments) is not dict:
raise ValueError()
except ValueError:
# Python 3.5 里,json 抛出的异常变成了 JSONDecodeError,不过它貌似是 ValueError 的子类,所以依然可以这样捕获
raise RequestHandleFailed('arguments 格式不合法: ' + raw_arguments)
else:
arguments = {}
return arguments
class RequestHandleFailed(APILibError):
pass | API-libs | /API_libs-0.1.16-py3-none-any.whl/api_libs/adapters/tornado_adapter.py | tornado_adapter.py |
from .Parameter import Parameter, VerifyFailed
import re
import html
__all__ = ['Str']
class Str(Parameter):
rule_order = ['type', 'trim', 'regex', 'not_regex', 'escape']
def spec_defaults(self):
return dict(
super().spec_defaults(),
trim=True,
escape=True
)
def rule_type(self, value):
if type(value) is not str:
raise VerifyFailed('参数 {} 必须是字符串(got {} {})'.format(
self.name, type(value), value))
return value
def rule_trim(self, value):
'''把参数值首尾的空格去掉'''
return value.strip() if self.specs['trim'] else value
def rule_choices(self, value):
if 'choices' in self.specs and value not in self.specs['choices']:
raise VerifyFailed('rule_choices: 参数 {} 只能为以下值 {} (got: {})'.format(
self.name, self.specs['choices'], value))
return value
def rule_regex(self, value):
if 'regex' in self.specs and not re.search(self.specs['regex'], value):
raise VerifyFailed('rule_regex: 参数 {} 不符合格式(got: {})'.format(
self.name, value))
return value
def rule_not_regex(self, value):
if 'not_regex' in self.specs and re.search(self.specs['not_regex'], value):
raise VerifyFailed('rule_not_regex: 参数 {} 不符合格式(got: {})'.format(
self.name, value))
return value
def rule_escape(self, value):
'''转义字符串中的 HTML 字符'''
if self.specs['escape']:
value = html.escape(value)
return value
def rule_min_len(self, value):
'''通过 min_len=n 指定字符串的最小长度'''
if 'min_len' in self.specs and len(value) < self.specs['min_len']:
raise VerifyFailed('参数 {} 的长度不能小于 {} (got: {})'.format(
self.name, self.specs['min_len'], value))
return value
def rule_max_len(self, value):
'''通过 min_len=n 指定字符串的最大长度'''
if 'max_len' in self.specs and len(value) > self.specs['max_len']:
raise VerifyFailed('参数 {} 的长度不能大于 {} (got: {})'.format(
self.name, self.specs['max_len'], value))
return value | API-libs | /API_libs-0.1.16-py3-none-any.whl/api_libs/parameters/str_param.py | str_param.py |
import inspect
from .. import APILibError
class _NoValueCls:
def __repr__(self):
return 'Parameter.NoValue'
NoValue = _NoValueCls()
Remove = NoValue # NoValue 的别名,执行 copy() 时,若要移除一个 rule_spec,用这个名字比较容易理解
class VerifyFailed(APILibError):
pass
class Parameter:
'''
定义一个 interface 参数。
Attributes:
* name: 参数名。用于某些复合参数(如 List)时不用指定,其他情况下必须指定。
若指定了参数名,在 verify() 时,会尝试根据参数名从传进来的数据 dict 中找到需要的参数值;
若未指定参数名,在 verify() 时,会直接把传进来的数据整个作为参数值,也因此 required 和 default rule 都没有意义了。
* specs: 对这个参数的要求(specification)。例如:required=True, nullable=False, max=10
specs 需要以 kwargs 的形式提供,key 部分是规则名,value 部分是具体的要求。
Parameter 与它的各个子类中,都定义了一系列 rule 函数,系统会按照一定顺序调用它们,来完成对参数值的检查和格式化。
rule 函数在接收到参数值后,可以有如下几种行为:
1. 返回任意值,代表参数通过验证,并把参数值设为这个返回值
2. 抛出 VerifyFailed 代表验证失败,抛出的时候可以附带一个失败说明
通过 specs,可以设定这些 rule 的检查规则。
rule 分两种:
一种是在 Parameter 基类中已定义好的核心 rule,称之为 sysrule;
另一种是在每个 Parameter 的子类中定义的 rule,称之为普通 rule。
sysrule 的方法名以 ``sysrule_`` 开头;普通 rule 则以 ``rule_`` 开头。
sysrule 会比普通 rule 先执行。
sysrule 之间的执行顺序通过 sysrule_order 指定,这个顺序是设计好的,一般不需要修改。
普通 rule 之间的执行顺序通过 rule_order 指定。没出现在这个列表中的 rule 会在列表中的 rule 都调用完后,以随机的顺序被调用。
如果当前参数没有被赋值,那么会把 NoValue 传给 rule (没赋值和赋值为 None 完全是两回事,千万不要搞混)。
Parameter 默认只让 sysrule 处理 NoValue 和 None 值,如果 sysrule 都执行完毕后,参数值仍然是 NoValue 或 None,那么整个检查行为到此结束,
把 NoValue / None 作为最终的结果,后面的普通 rule 不再被执行。
这样设计使得普通 rule 里就不用包含处理 NoValue 和 None 的代码了,节省了精力。因为普通的 rule 不太可能会为 NoValue 和 None 准备什么处理逻辑,即使碰到了也顶多是跳过执行而已。
'''
def __init__(self, name=NoValue, **specs):
self.name = name
self.specs = dict(self.spec_defaults(), **specs)
self._normal_rules = self._sorted_normal_rules()
def _sorted_normal_rules(self):
if len(self.rule_order) != len(set(self.rule_order)):
raise Exception('rule_order 不允许出现重复的内容({})'.format(self.rule_order))
methods = inspect.getmembers(self, predicate=inspect.ismethod)
normal_rules = [name[5:] for name, _ in methods if name[:5] == 'rule_']
return self.rule_order + list(
set(normal_rules).difference(set(self.rule_order)))
def __call__(self, *args, **kwargs):
'''
param.copy(...) 的快捷方式
例如:
p.copy(required=False) => p(required=False)
p.copy('new_name', a=b) => p('new_name', a=b)
'''
return self.copy(*args, **kwargs)
def copy(self, name=None, **specs_to_inplace):
'''以当前 Parameter 为基础,复制出一个新的 Parameter
:arg string name: 新 parameter 的名称。若不指定,则沿用原来的名称。
对于一个有名称的 parameter,如果想在 copy 后让它变得没有名称,需要把此参数的值设成 NoValue。
:arg specs_to_inplace: 修改、新增、移除 rule_spec 的值。通过把 value 设置成 NoValue 可以移除指定的 rule_spec。
p1 = Parameter('p1', required=True, default=1)
p2 = p1.copy('p2', default=2, nullable=True, required=NoValue)
# 相当于: Parameter('p2', default=2, nullable=True)
p3 = Parameter() # 无名称的 parameter
p4 = p3.copy('p4') # copy 的同时设置名称
p5 = p4.copy(NoValue) # copy 的同时把名称去掉
'''
if name is None:
name = self.name
specs = self.specs.copy()
for key, value in specs_to_inplace.items():
if value is NoValue:
specs.pop(key)
else:
specs[key] = value
return type(self)(name, **specs)
def verify(self, arguments):
value = arguments.get(self.name, NoValue) if self.name is not NoValue else arguments
for rule_name in self.sysrule_order:
value = getattr(self, 'sysrule_' + rule_name)(value)
if value is not NoValue and value is not None:
for rule_name in self._normal_rules:
value = getattr(self, 'rule_' + rule_name)(value)
return value
# 各 sysrule 的执行顺序
sysrule_order = ['default', 'required', 'nullable']
# 各普通 rule 的执行顺序
rule_order = []
def spec_defaults(self):
'''返回各 specs 的默认值(如果有的话)
子类重写此方法时,不要忘了继承父类里的值。方法:
return dict(super().spec_defaults(), spec_in_child=value))'''
return dict(
required=True,
nullable=False
)
def sysrule_default(self, value):
'''如果某个参数没有被赋值,则给予其一个默认值'''
if value is NoValue and 'default' in self.specs:
value = self.specs['default']
return value
def sysrule_required(self, value):
'''若为 true,则参数必须被赋值(但是不关心它是什么值,即使是 None 也无所谓)'''
if self.specs['required'] and value is NoValue:
raise VerifyFailed('缺少必要参数:{}'.format(self.name))
return value
def sysrule_nullable(self, value):
'''是否允许参数值为 None。
没被赋值的参数它的值自然不是 None,所以可以通过这个 rule 的检查'''
if not self.specs['nullable'] and value is None:
raise VerifyFailed('参数 {} 不允许为 None'.format(self.name))
return value | API-libs | /API_libs-0.1.16-py3-none-any.whl/api_libs/parameters/Parameter.py | Parameter.py |
from .Parameter import Parameter, VerifyFailed
import math
import decimal as dec
__all__ = ['Int', 'Float', 'Decimal']
class Number(Parameter):
'''各数值类型 Parameter 的基类,不建议直接使用'''
def spec_defaults(self):
return dict(
super().spec_defaults(),
nozero=False
)
def rule_min(self, value):
'''通过 min=n 指定最小值'''
if 'min' in self.specs and value < self.specs['min']:
raise VerifyFailed('参数 {} 的值不能小于 {} (got: {})'.format(
self.name, self.specs['min'], value))
return value
def rule_max(self, value):
'''通过 max=n 指定最大值'''
if 'max' in self.specs and value > self.specs['max']:
raise VerifyFailed('参数 {} 的值不能大于 {} (got: {})'.format(
self.name, self.specs['max'], value))
return value
def rule_nozero(self, value):
'''通过 nozero=true/false 指定是否允许等于 0'''
if self.specs['nozero'] and value == 0:
raise VerifyFailed('参数 {} 不能等于 0'.format(self.name))
return value
class Int(Number):
rule_order = ['type']
def rule_type(self, value):
if type(value) is not int or math.isnan(value) or math.isinf(value):
raise VerifyFailed('参数 {} 必须是合法的 int (got: {} {})'.format(
self.name, type(value), value))
return value
class Float(Number):
rule_order = ['type']
def rule_type(self, value):
if type(value) not in [int, float] or math.isnan(value) or math.isinf(value):
raise VerifyFailed('参数 {} 必须是合法的 int 或 float (got: {} {})'.format(
self.name, type(value), value))
# 如果传入的数值是 int,此操作会将其强制转换成 float
return float(value)
class Decimal(Number):
rule_order = ['type']
def rule_type(self, value):
if type(value) is str:
try:
dec_value = dec.Decimal(value)
except dec.InvalidOperation:
raise VerifyFailed('参数 {} 的值({})不符合格式'.format(self.name, value))
elif type(value) is int:
dec_value = dec.Decimal(value)
elif type(value) is float:
# float 在转换成 decimal 前,必须先转换成字符串。不然会有精度损失。例如: Decimal(0.18) 会得到 0.179999...
dec_value = dec.Decimal(str(value))
elif type(value) is dec.Decimal:
dec_value = value
else:
raise VerifyFailed('参数 {} 的原始值必须是 str、int、float、Decimal, got {} {}'.format(
self.name, type(value), value))
if math.isnan(dec_value) or math.isinf(dec_value):
raise VerifyFailed('参数 {} 的值({})不符合格式'.format(self.name, value))
return dec_value | API-libs | /API_libs-0.1.16-py3-none-any.whl/api_libs/parameters/number_param.py | number_param.py |
# API42
I have made a 42 API python wrapper, where it allows you to make api calls with ease.
> it's very early and doesn't have all the routes and methods, it's a work in progress ^^, Any feedback or ideas are appreciated
- Support for python 3.6 and above
- Multi-threading for faster requests [BETA]
- Easy to use
- Reliability and stability ( requests are resent automatically in case of failure because of Rate limit or Token expiration )
- No need to use a token, it will be generated automatically and kept track of
## Installation :
``pip install api42``
## Examples :
in this example, we can find people searching for a minishell group in Khouribga ( campus 16 )
<img width="1105" alt="image" src="https://user-images.githubusercontent.com/43254081/220220830-d9ba5048-5a34-4de1-be6d-1495687b72d9.png">
## Methods :
```python
# ------------------- GET TOKEN ------------------- #
getToken()
# ------------------- Accreditations ------------------- #
getAllAccreditations(*args)
getAccreditationByID(accreditationId)
# ------------------- Achievements ------------------- #
getAllAchievements(*args)
getAchievementByID(achievementId)
getCursusAchievements(cursusId, *args)
getCampusAchievements(campusId, *args)
getTitlesAchievements(titleId, *args)
# ------------------- Achievements users ------------------- #
getAllAchievementsUsers(*args)
getAchievementsUsersByID(achievementUserId)
# ------------------- Amendments ------------------- #
getAllAmendments(*args)
getAmendmentByID(amendmentId)
getUserAmendments(userId, *args)
getIntershipsAmendments(intershipId, *args)
# ------------------- Announcements ------------------- #
getAllAnnouncements(*args)
getAnnouncementByID(announcementId)
# ------------------- Anti Grav Units ------------------- #
getAllAntiGravUnits(*args)
getAntiGravUnitByID(antiGravUnitId)
# ------------------- Anti Grav Units Users ------------------- #
getAllAntiGravUnitsUsers(*args)
getAntiGravUnitsUsersByID(antiGravUnitsUserId)
getUserAntiGravUnits(userId, *args)
getCampusAntiGravUnits(campusId, *args)
# ------------------- Apps ------------------- #
getAllApps(*args)
getAppByID(appId)
getUserApps(userId, *args)
# ------------------- Attachments ------------------- #
getAllAttachments(*args)
getAttachmentByID(attachmentId)
getProjectSessionsAttachments(projectSessionId, *args)
getProjectAttachments(projectId, *args)
# ------------------- Balances ------------------- #
getAllBalances(*args)
getBalanceByID(balanceId)
getPoolsBalances(poolId, *args)
# ------------------- Bloc deadlines ------------------- #
getAllBlocDeadlines(*args)
getBlocDeadlineByID(blocDeadlineId)
getBlocsBlocDeadlines(blocId, *args)
# ------------------- Blocs ------------------- #
getAllBlocs(*args)
getBlocByID(blocId)
# ------------------- Broadcasts ------------------- #
getCampusBroadcasts(campusId, *args)
# ------------------- campus ------------------- #
getAllCampuses(*args)
getCampusByID(campusId)
getCampusStats(campusId, *args)
# ------------------- Certificates ------------------- #
getAllCertificates(*args)
getCertificateByID(certificateId)
# ------------------- Certificates Users ------------------- #
getAllCertificatesUsers(*args)
getCertificateUserByID(certificateUserId)
getUserCertificates(userId, *args)
getCertificateCertificatesUsers(certificateId, *args)
# ------------------- Closes ------------------- #
getAllCloses(*args)
getCloseByID(closeId)
getUserCloses(userId, *args)
# ------------------- Coalitions ------------------- #
getAllCoalitions(*args)
getCoalitionByID(coalitionId)
getUserCoalitions(userId, *args)
getBlocsCoalitions(blocId, *args)
# ------------------- Coalitions Users ------------------- #
getAllCoalitionsUsers(*args)
getCoalitionUserByID(coalitionUserId)
getUserCoalitionsUsers(userId, *args)
getCoalitionCoalitionsUsers(coalitionId, *args)
# ------------------- Commands ------------------- #
getProductsCommands(productId, *args)
# ------------------- Community Services ------------------- #
getAllCommunityServices(*args)
getCommunityServiceByID(communityServiceId)
getCloseCommunityServices(closeId, *args)
# ------------------- Companies ------------------- #
getAllCompanies(*args)
getCompanyByID(companyId)
getCompanySubscribedUsers(companyId, *args)
getCompanyInternshipsUsers(companyId, *args)
# ------------------- Correction point historics ------------------- #
GetUserCorrectionPointHistorics(userId, *args)
# ------------------- Cursus ------------------- #
getAllCursus(*args)
getCursusByID(cursusId)
# ------------------- Cursus Users ------------------- #
getAllCursusUsers(*args)
getCursusUserByID(cursusUserId)
getCursusCursusUsers(cursusId, *args)
getUserCursusUsers(userId, *args)
# ------------------- Dashes ------------------- #
getAllDashes(*args)
getDashByID(dashId)
# ------------------- Dashes Users ------------------- #
getAllDashesUsers(*args)
getDashesUserByID(dashUserId)
getDashesDashesUsers(dashId, *args)
# ------------------- Endpoints ------------------- #
getAllEndpoints(*args)
getEndpointByID(endpointId)
# ------------------- Evaluations ------------------- #
getAllEvaluations(*args)
getEvaluationByID(evaluationId)
# ------------------- Events ------------------- #
getAllEvents(*args)
getEventByID(eventId)
getCursusEvents(cursusId, *args)
getCampusEvents(campusId, *args)
getUsersEvents(userId, *args)
# ------------------- Events Users ------------------- #
getAllEventsUsers(*args)
getEventsUserByID(eventsUserId)
getUsersEventsUsers(userId, *args)
getEventsEventsUsers(eventId, *args)
# ------------------- Exams ------------------- #
getAllExams(*args)
getExamByID(examId)
getCursusExams(cursusId, *args)
getCampusExams(campusId, *args)
getCampusCursusExams(campusId, cursusId, *args)
getUserExams(userId, *args)
getProjectExams(projectId, *args)
# ------------------- Exams Users ------------------- #
getExamExamsUsers(examId, *args)
# ------------------- Experiences ------------------- #
getAllExperiences(*args)
getCampusExperiences(campusId, *args)
getProjectUserExperiences(projectUserId, *args)
getUserExperiences(userId, *args)
getSkillExperiences(skillId, *args)
getPartnershipUserExperiences(partnershipUserId, *args)
getExperienceByID(experienceId)
# ------------------- Expertises ------------------- #
getAllExpertises(*args)
getExpertiseByID(expertiseId)
# ------------------- Expertises Users ------------------- #
getExpertiseExpertisesUsers(expertiseId, *args)
getUserExpertisesUsers(userId, *args)
getAllExpertisesUsers(*args)
getExpertiseUserByID(expertiseUserId)
# ------------------- Feedbacks ------------------- #
getEventFeedbacks(eventId, *args)
getAllFeedbacks(*args)
getScaleTeamFeedbacks(scaleTeamId, *args)
getEventFeedback(eventId, feedbackId)
getFeedbackByID(feedbackId)
getScaleTeamFeedback(scaleTeamId, feedbackId)
# ------------------- Flags ------------------- #
getAllFlags(*args)
# ------------------- Flashes ------------------- #
getAllFlashes(*args)
getFlashByID(flashId)
# ------------------- Flash Users ------------------- #
getFlashFlashUsers(flashId, *args)
getAllFlashUsers(*args)
getFlashFlashUserByID(flashId, flashUserId)
getFlashUserByID(flashUserId)
# ------------------- Gitlab Users ------------------- #
getUserGitlabUsers(userId, *args)
# ------------------- Groups ------------------- #
getAllGroups(*args)
getUserGroups(userId, *args)
getGroupByID(groupId)
# ------------------- Groups Users ------------------- #
getGroupGroupsUsers(groupId, *args)
getUserGroupsUsers(userId, *args)
getAllGroupsUsers(*args)
getGroupUserByID(groupId, groupsUserId)
getUserGroupByID(userId, groupsUserId)
# ------------------- internships ------------------- #
getAllInternships(*args)
getInternshipByID(internshipId)
# ------------------- Journals ------------------- #
getAllJournals(*args)
# ------------------- Languages ------------------- #
getAllLanguages(*args)
getLanguageByID(languageId)
# ------------------- Languages Users ------------------- #
getUserLanguagesUsers(userId, *args)
getAllLanguagesUsers(*args)
getUserLanguageByID(userId, languageUserId)
getLanguageUserByID(languageUserId)
# ------------------- Levels ------------------- #
getAllLevels(*args)
getCursusLevels(cursusId, *args)
# ------------------- Locations ------------------- #
getAllLocations(*args)
getUserLocations(userId, *args)
getCampusLocations(campusId, *args)
getLocationByID(locationId)
# ------------------- Notes ------------------- #
getAllNotes(*args)
getUserNotes(userId, *args)
getCampusNotes(campusId, *args)
getNoteByID(noteId)
# ------------------- notions ------------------- #
getAllNotions(*args)
getCursusNotions(cursusId, *args)
getTagNotions(tagId, *args)
getNotionByID(notionId)
# ------------------- Offers ------------------- #
getAllOffers(*args)
getOfferByID(offerId)
# ------------------- Offers Users ------------------- #
getAllOffersUsers(*args)
getUserOffersUsers(userId, *args)
getOfferOffersUsers(offerId, *args)
getOfferUserByID(offerUserId)
# ------------------- Params Project sessions rules ------------------- #
getAllParamsProjectSessionsRules(*args)
getParamsProjectSessionsRuleByID(paramsProjectSessionsRuleId)
getProjectSessionsRuleParamsProjectSessionsRules(projectSessionsRuleId, *args)
# ------------------- Partnerships ------------------- #
getAllPartnerships(*args)
getPartnershipByID(partnershipId)
# ------------------- Partnerships Users ------------------- #
getAllPartnershipsUsers(*args)
getPartnershipPartnershipsUsers(partnershipId, *args)
getPartnershipUserByID(partnershipUserId)
# ------------------- Patronages ------------------- #
# ------------------- Patronages reports ------------------- #
# ------------------- Pools ------------------- #
# ------------------- Products ------------------- #
# ------------------- Project Data ------------------- #
# ------------------- Project Sessions ------------------- #
# ------------------- Project Sessions rules ------------------- #
# ------------------- Project Sessions skills ------------------- #
# ------------------- Project ------------------- #
getAllProjects(*args)
getProjectByID(projectId)
getCursusProjects(cursusId, *args)
getProjectProjects(projectId, *args)
getMeProjects(*args)
# ------------------- Project users ------------------- #
getAllProjectsUsers(*args)
getProjectProjectsUsers(projectId, *args)
getUserProjectsUsers(userId, *args)
getProjectUserByID(projectUserId)
# ------------------- Quests ------------------- #
# ------------------- Quests users ------------------- #
# ------------------- Roles ------------------- #
# ------------------- Roles entities ------------------- #
# ------------------- Rules ------------------- #
# ------------------- Scale Teams ------------------- #
getAllScaleTeams(*args)
getScaleTeamByID(scaleTeamId)
getProjectSessionScaleTeams(projectSessionId, *args)
getProjectScaleTeams(projectId, *args)
getUserScaleTeamsAsCorrector(userId, *args)
getUserScaleTeamsAsCorrected(userId, *args)
getMeScaleTeamsAsCorrector(*args)
getMeScaleTeamsAsCorrected(*args)
getMeScaleTeams(*args)
getUserScaleTeams(userId, *args)
# ------------------- Scales ------------------- #
# ------------------- Scores ------------------- #
# ------------------- Skills ------------------- #
# ------------------- Slots ------------------- #
# ------------------- Squads ------------------- #
# ------------------- Subnotions ------------------- #
# ------------------- Tags ------------------- #
# ------------------- Tags users ------------------- #
# ------------------- teams ------------------- #
# ------------------- teams uploads ------------------- #
# ------------------- teams users ------------------- #
# ------------------- titles ------------------- #
# ------------------- titles users ------------------- #
# ------------------- transactions ------------------- #
# ------------------- translations ------------------- #
# ------------------- user candidatures ------------------- #
getAllUserCandidatures(*args)
getUserUserCandidature(userId)
getUserCandidatureByID(userCandidatureId)
# ------------------- users ------------------- #
getUserLocationsStats(userId)
getCoalitionUsers(coalitionId, *args)
getDashUsers(dashId, *args)
getEventUsers(eventId, *args)
getAccreditationUsers(accreditationId, *args)
getTeamUsers(teamId, *args)
getProjectUsers(projectId, *args)
getAllUsers(*args)
getCursusUsers(cursusId, *args)
getCampusUsers(campusId, *args)
getAchievementUsers(achievementId, *args)
getTitleUsers(titleId, *args)
getQuestUsers(questId, *args)
getGroupUsers(groupId, *args)
getUserByID(userId)
getMe()
# ------------------- waitlists ------------------- #
# ------------------- custom -------------------- #
getCustomPaginatedData(url, *args)
getCustomNonPaginatedData(url)
# -------------------------------------------------- #
``` | API42 | /API42-0.2.tar.gz/API42-0.2/README.md | README.md |
# APIAlchemy
API toolkit for Python, modeled after SQLAlchemy
## Installation
```
pip install APIAlchemy
```
## Overview
APIAlchemy provides developers an abstraction layer for consuming web services from the following vendors:
- [AppDynamics](https://docs.appdynamics.com/display/PRO43/Metric+and+Snapshot+API)
- [ExtraHop](https://docs.extrahop.com/7.9/rest-extract-metrics)
- [GitHub](https://developer.github.com/v3)
- [New Relic](https://docs.newrelic.com/docs/apis/nerdgraph/get-started/introduction-new-relic-nerdgraph)
- [Prometheus](https://prometheus.io/docs/prometheus/latest/querying/api)
- [Prometheus-to-Graphite bridge](https://github.com/prometheus/client_python#graphite)
- [Prometheus Pushgateway](https://github.com/prometheus/pushgateway)
- [SolarWinds](https://github.com/solarwinds/OrionSDK/wiki/REST)
- [Splunk](https://docs.splunk.com/Documentation/Splunk/8.0.2/RESTUM/RESTusing)
- [Wavefront](https://github.com/wavefrontHQ/wavefront-sdk-python)
## License
Distributed under the [MIT license](https://opensource.org/licenses/MIT). | APIAlchemy | /APIAlchemy-1.3.1.tar.gz/APIAlchemy-1.3.1/README.md | README.md |
import os
import re
import urllib3
from time import sleep
from urllib import parse
from splunklib.client import connect
from splunklib import results
from .. import BaseService
class Service(BaseService):
application = None
def __init__(self, scheme, conn_str):
super(Service, self).__init__(scheme, conn_str)
pattern = re.compile(
r"""
(?P<username>[^:]+)
:(?P<password>[^@]+)
@(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)
(?::(?P<port>[\d]+))?
(?:/(?P<application>.+))?
""",
re.X
)
m = pattern.match(self._conn_str)
if m is not None:
components = m.groupdict()
components['username'] = parse.unquote(components['username'])
components['password'] = parse.unquote(components['password'])
if self._scheme is not None:
components['scheme'] = self._scheme
ipv4host = components.pop('ipv4host')
ipv6host = components.pop('ipv6host')
components['host'] = ipv4host or ipv6host
port = components.pop('port')
if port is not None:
components['port'] = port
application = components.pop('application')
if application is not None:
self.application = parse.unquote(application)
self._conn_params = components
@property
def client(self):
verify = os.getenv('APIALCHEMY_SPLUNK_SSL_VERIFY', 'true').lower() == 'true'
if not verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
return connect(verify=verify, **self._conn_params)
@staticmethod
def search(client, query, **kwargs):
if 'exec_mode' not in kwargs.keys():
kwargs['exec_mode'] = 'blocking'
job = client.search(query, **kwargs)
if kwargs['exec_mode'] == 'normal':
while True:
while not job.is_ready():
pass
if job['isDone'] == '1':
break
sleep(2)
result_count = int(job['resultCount'])
offset = 0
count = int(client.confs['limits']['restapi']['maxresultrows'])
reader = []
while offset < result_count:
kwargs_paginate = {
'count': count,
'offset': offset
}
reader += results.ResultsReader(job.results(**kwargs_paginate))
offset += count
job.cancel()
return reader | APIAlchemy | /APIAlchemy-1.3.1.tar.gz/APIAlchemy-1.3.1/src/apialchemy/vendors/splunk/__init__.py | __init__.py |
import base64
import os
import re
import urllib3
from urllib import parse
from prometheus_api_client import PrometheusConnect, Metric, MetricsList
from .. import BaseService
class Service(BaseService):
def __init__(self, scheme, conn_str):
super(Service, self).__init__(scheme, conn_str)
pattern = re.compile(
r"""
(?:
(?P<username>[^:]+)
:(?P<password>[^@]+)@
)?
(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)
(?::(?P<port>[\d]+))?
""",
re.X
)
m = pattern.match(self._conn_str)
if m is not None:
components = m.groupdict()
username = components.pop('username')
password = components.pop('password')
if username is not None and password is not None:
encoding = os.getenv('APIALCHEMY_PROMETHEUS_AUTH_ENCODING', 'utf-8')
auth_str = '%s:%s' % (parse.unquote(username), parse.unquote(password))
auth_bytes = auth_str.encode(encoding)
base64_bytes = base64.b64encode(auth_bytes)
components['headers'] = {
'Authorization': base64_bytes.decode(encoding)
}
ipv4host = components.pop('ipv4host')
ipv6host = components.pop('ipv6host')
host = ipv4host or ipv6host
if self._scheme is not None:
scheme = self._scheme
else:
scheme = 'http'
components['url'] = scheme + '://' + host
port = components.pop('port')
if port is not None:
components['url'] += ':' + port
self._conn_params = components
@property
def client(self):
verify = os.getenv('APIALCHEMY_PROMETHEUS_SSL_VERIFY', 'true').lower() == 'true'
if not verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
return PrometheusConnect(disable_ssl=not verify, **self._conn_params)
@staticmethod
def get_metric_obj(metric_data):
return Metric(metric_data)
@staticmethod
def get_metric_obj_list(metric_data):
return MetricsList(metric_data) | APIAlchemy | /APIAlchemy-1.3.1.tar.gz/APIAlchemy-1.3.1/src/apialchemy/vendors/prometheus/__init__.py | __init__.py |
import json
import sys
from requests.sessions import Session
from requests import Request, codes
from .. import BaseClient
class Client(BaseClient):
_session = None
def __init__(self, **kwargs):
super(Client, self).__init__(**kwargs)
self.apikey = kwargs.get('apikey')
@staticmethod
def _get_full_path(path=None):
path = '/api/v1' + (path or '')
return path
def _get_session(self):
if not self._session:
self._session = Session()
self._session.verify = self.verify
return self._session
def _request(self, **request_args):
s = self._get_session()
req = Request(**request_args)
prepped = s.prepare_request(req)
# Merge environment settings into session
settings = s.merge_environment_settings(prepped.url, {}, None, None, None)
return s.send(prepped, **settings)
def get_metrics(self, **params):
url = self.base_url + self._get_full_path('/metrics')
for k in list(params.keys()):
if params[k] is None:
del params[k]
kwargs = {
'method': 'POST',
'url': url,
'headers': {
'Content-type': 'application/json',
'Accept': 'text/plain',
'Authorization': 'ExtraHop apikey=' + self.apikey
},
'data': json.dumps(params)
}
r = self._request(**kwargs)
if r.status_code != codes.ok:
print(url, file=sys.stderr)
r.raise_for_status()
return r.json()
def get_metrics_by_xid(self, xid):
url = self.base_url + self._get_full_path('/metrics/next/' + str(xid))
kwargs = {
'method': 'GET',
'url': url,
'headers': {
'Content-type': 'application/json',
'Accept': 'text/plain',
'Authorization': 'ExtraHop apikey=' + self.apikey
}
}
r = self._request(**kwargs)
if r.status_code != codes.ok:
print(url, file=sys.stderr)
r.raise_for_status()
return r.json() | APIAlchemy | /APIAlchemy-1.3.1.tar.gz/APIAlchemy-1.3.1/src/apialchemy/vendors/extrahop/client.py | client.py |
import base64
from prometheus_client import delete_from_gateway, push_to_gateway, pushadd_to_gateway
from urllib3 import PoolManager
from .. import BaseClient
class Client(BaseClient):
def __init__(self, **kwargs):
super(Client, self).__init__(**kwargs)
self.username = kwargs.get('username')
self.password = kwargs.get('password')
def _pushgateway_handler(self, url, method, timeout, headers, data):
username = self.username
password = self.password
verify = self.verify
def handle():
if username is not None and password is not None:
auth_value = '{0}:{1}'.format(username, password).encode('utf-8')
auth_token = base64.b64encode(auth_value)
auth_header = b'Basic ' + auth_token
headers.append(['Authorization', auth_header])
cert_reqs = 'CERT_REQUIRED' if verify else 'CERT_NONE'
http = PoolManager(cert_reqs=cert_reqs)
resp = http.request(method, url, headers=dict(headers), body=data, timeout=timeout)
if resp.status >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(resp.status, resp.reason))
return handle
def delete(self, **kwargs):
if 'handler' in kwargs.keys():
handler = kwargs.pop('handler')
else:
handler = None
if not callable(handler):
handler = self._pushgateway_handler
delete_from_gateway(self.base_url, handler=handler, **kwargs)
def push(self, **kwargs):
if 'handler' in kwargs.keys():
handler = kwargs.pop('handler')
else:
handler = None
if not callable(handler):
handler = self._pushgateway_handler
strict = kwargs.pop('strict') if 'strict' in kwargs.keys() else False
if strict:
pushadd_to_gateway(self.base_url, handler=handler, **kwargs)
else:
push_to_gateway(self.base_url, handler=handler, **kwargs) | APIAlchemy | /APIAlchemy-1.3.1.tar.gz/APIAlchemy-1.3.1/src/apialchemy/vendors/pushgateway/client.py | client.py |
import os
import re
import urllib3
from urllib import parse
from appd.request import AppDynamicsClient
from .. import BaseService
class Service(BaseService):
application = None
def __init__(self, scheme, conn_str):
super(Service, self).__init__(scheme, conn_str)
pattern = re.compile(
r"""
(?P<username>[^@:]+)
(?:@(?P<account>[^:]+))?
:(?P<password>.+)
@(?:
\[(?P<ipv6host>[^/]+)\] |
(?P<ipv4host>[^/:]+)
)
(?::(?P<port>[\d]+))?
(?:/(?P<application>.+))?
""",
re.X
)
m = pattern.match(self._conn_str)
if m is not None:
components = m.groupdict()
components['username'] = parse.unquote(components['username'])
account = components.pop('account')
if account is not None:
components['account'] = parse.unquote(account)
components['password'] = parse.unquote(components['password'])
ipv4host = components.pop('ipv4host')
ipv6host = components.pop('ipv6host')
host = ipv4host or ipv6host
if self._scheme is not None:
scheme = self._scheme
else:
scheme = 'https'
components['base_url'] = scheme + '://' + host
port = components.pop('port')
if port is not None:
components['base_url'] += ':' + port
application = components.pop('application')
if application is not None:
self.application = parse.unquote(application)
self._conn_params = components
@property
def client(self):
verify = os.getenv('APIALCHEMY_APPD_SSL_VERIFY', 'true').lower() == 'true'
if not verify:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
return AppDynamicsClient(verify=verify, **self._conn_params)
@staticmethod
def get_application_component_id(client, application_id, name):
result = client.get_tiers(application_id)
for t in result:
if t.name == name:
return t.id
return None
@staticmethod
def get_application_id(client, name):
for a in client.get_applications():
if a.name == name:
return a.id
return None
@staticmethod
def get_business_transaction_id(client, application_id, name):
result = client.get_bt_list(application_id)
for bt in result:
if bt.name == name:
return bt.id
return None | APIAlchemy | /APIAlchemy-1.3.1.tar.gz/APIAlchemy-1.3.1/src/apialchemy/vendors/appdynamics/__init__.py | __init__.py |
import json
import logging
LOGGER = logging.getLogger(__name__)
class LiveNewsResource :
def __init__(self, response : str, group_type : str = None, category_dp_name : str = None) -> None:
LOGGER.debug("LiveNewsResource object is being created.")
self._response = response
response_dict = json.loads(self._response)
self.__messageID = response_dict['msgID']
self.__serverTime = response_dict['srvTm']
self.__data = response_dict['data']
if group_type:
self.group_type = group_type
if category_dp_name:
self.__category_dp_name = category_dp_name
def _getCategoriesFormatted(self) -> 'LiveNewsResource' :
'''Collect a list of all received categories'''
LOGGER.debug("inside getNewsCategoriesFormatted method")
categories = []
for key, value in self.__data.items():
if type(value) is list:
for item in value:
if 'dpNm' in item.keys():
categories.append(item['dpNm'])
formatted_response_dict = { 'data' : {"categories" : categories},
'msgID' : self.__messageID,
'srvTm' : self.__serverTime
}
self._response = json.dumps(formatted_response_dict)
return self
def _getNewsFormatted(self) -> 'LiveNewsResource' :
LOGGER.debug("inside _getNewsFormatted method")
if self.group_type == "NG" :
response_type = "listResponse"
news_list = self.__data[response_type]['content']
newsItems = []
for news_block in news_list :
news_block.pop("guid", "KEY_NOT_FOUND")
news_block.pop("timeText", "KEY_NOT_FOUND")
newsItems.append(news_block)
self.__data['content'] = newsItems
elif self.group_type == "G" :
response_type = "groupResponse"
content = []
for content_block in self.__data[response_type]['content'] :
newsItems = []
for news_block in content_block["newsItems"] :
news_block.pop("guid", "KEY_NOT_FOUND")
news_block.pop("timeText", "KEY_NOT_FOUND")
newsItems.append(news_block)
content_block["newsItems"] = newsItems
content.append(content_block)
self.__data["content"] = content
self.__data["first"] = self.__data[response_type]["first"]
self.__data["last"] = self.__data[response_type]["last"]
self.__data["number"] = self.__data[response_type]["number"]
self.__data["size"] = self.__data[response_type]["size"]
self.__data["totalElements"] = self.__data[response_type]["totalElements"]
self.__data["totalPages"] = self.__data[response_type]["totalPages"]
self.__data.pop("type", "KEY_NOT_FOUND")
self.__data.pop(response_type, "KEY_NOT_FOUND")
formatted_resp_dict = {
'data' : self.__data,
'msgID' : self.__messageID,
'srvTm' : self.__serverTime
}
self._response = json.dumps(formatted_resp_dict)
return self
def _filterCategories(self, filter_categories : list, exclude : bool = True) -> 'LiveNewsResource' :
'''
If `exclude` is False, only the `categories` are exctracted from response.\n
If `exclude` is True, categories other than the `categories` are extracted from response.
'''
LOGGER.debug("inside __filterCategories method")
response_dict = json.loads(self._response)
if exclude:
filteredCategoriesList = list(set(response_dict['data']['categories']).difference(set(filter_categories)))
response_dict['data']['categories'] = filteredCategoriesList
else :
news_list = []
for news_block in response_dict['data']['content']:
if news_block["category"] in filter_categories:
news_list.append(news_block)
response_dict['data']['content'] = news_list
if not news_list:
response_dict['data']['msg'] = f"There are no news available for '{self.__category_dp_name if type(self.__category_dp_name) is str else ', '.join(self.__category_dp_name)}' on page {response_dict['data']['number']}. Please try in other pages."
self._response = json.dumps(response_dict)
return self | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/resources/live_news_resource.py | live_news_resource.py |
import json
import logging
from typing import Any, Callable, List
from APIConnect.validator import Validator
from constants.streaming_constants import StreamingConstants
from feed.feed import Feed
LOGGER = logging.getLogger(__name__)
class ReducedQuotesFeed():
@Validator.ValidateInputDataTypes
def __init__(self, feedObj : Feed) -> None:
self.__feed_obj = feedObj
@Validator.isRequired(["symbols", "callBack"])
def subscribeReducedQuotesFeed(self, symbols: List[str], callBack: Callable[[str], Any]) -> None:
reducedQuote = self.__create_reduced_quote_request(symbols)
LOGGER.debug("Subscribing Reduced quote feed with request: %s", reducedQuote)
self.__feed_obj._subscribe(reducedQuote, callBack, StreamingConstants.REDUCED_QUOTE_SREAM_REQ_CODE)
@Validator.isRequired(["symbols"])
def unsubscribeReducedQuotesFeed(self) -> None:
'''
This method will unsubscribe from the streamer. After successful invokation, this will stop the streamer packets of the symbols subscribed.
'''
unsub_reduced_quote = self.__create_reduced_quote_request(subscribe = False)
LOGGER.debug("Unsubscribing reduced quote feed with request: %s", unsub_reduced_quote)
self.__feed_obj._unsubscribe(unsub_reduced_quote, StreamingConstants.REDUCED_QUOTE_SREAM_REQ_CODE)
def __create_reduced_quote_request(self, symbols = [], subscribe: bool = True) -> str:
symset = []
for syms in symbols:
symset.append({"symbol": syms})
if subscribe:
request_type = "subscribe"
else:
request_type = "unsubscribe"
req = {
"request":
{
"streaming_type": "quote",
"data":
{
"accType": "EQ",
"symbols": symset
},
"formFactor": "P",
"appID": self.__feed_obj._appID,
"response_format": "json",
"request_type": request_type
},
"echo": {}
}
return json.dumps(req) + "\n" | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/feed/reduced_quotes_feed.py | reduced_quotes_feed.py |
import json
import logging
from typing import Any, Callable, List
from APIConnect.validator import Validator
from constants.streaming_constants import StreamingConstants
from feed.feed import Feed
LOGGER = logging.getLogger(__name__)
class QuotesFeed():
@Validator.ValidateInputDataTypes
def __init__(self, feedObj : Feed) -> None:
self.__feed_obj = feedObj
@Validator.isRequired(["symbols", "callBack"])
def subscribeQuotesFeed(self, symbols: List[str], callBack: Callable[[str], Any]) -> None:
quote = self.__create_quote_request(symbols)
LOGGER.debug("Subscribing quote feed with request: %s", quote)
self.__feed_obj._subscribe(quote, callBack, StreamingConstants.QUOTE_SREAM_REQ_CODE)
@Validator.isRequired(["symbols"])
def unsubscribeQuotesFeed(self) -> None:
'''
This method will unsubscribe from the streamer. After successful invokation, this will stop the streamer packets of the symbols subscribed.
'''
unsub_quote = self.__create_quote_request(subscribe = False)
LOGGER.debug("Unsubscribing quote feed with request: %s", unsub_quote)
self.__feed_obj._unsubscribe(unsub_quote, StreamingConstants.QUOTE_SREAM_REQ_CODE)
def __create_quote_request(self, symbols = [], subscribe: bool = True) -> str:
symset = []
for syms in symbols:
symset.append({"symbol": syms})
if subscribe:
request_type = "subscribe"
else:
request_type = "unsubscribe"
req = {
"request":
{
"streaming_type": "quote3",
"data":
{
"accType": "EQ",
"symbols": symset
},
"formFactor": "P",
"appID": self.__feed_obj._appID,
"response_format": "json",
"request_type": request_type
},
"echo": {}
}
return json.dumps(req) + "\n" | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/feed/quotes_feed.py | quotes_feed.py |
import json
import logging
from typing import Any, Callable, List
from APIConnect.validator import Validator
from APIConnect.api_constants import ApiConstants
from constants.streaming_constants import StreamingConstants
from feed.feed import Feed
LOGGER = logging.getLogger(__name__)
class DepthFeed():
@Validator.ValidateInputDataTypes
def __init__(self, feedObj : Feed, constantsObj) -> None:
self.__feed_obj = feedObj
self.__constants : ApiConstants = constantsObj
@Validator.isRequired(["symbols", "callBack"])
def subscribeDepthFeed(self, symbols: List[str], callBack: Callable[[str], Any]) -> None:
depth = self.__create_depth_request(symbols)
LOGGER.debug("Subscribing depth feed with request: %s", depth)
if "account_type_exception" in depth:
return
self.__feed_obj._subscribe(depth, callBack, StreamingConstants.DEPTH_STREAM_REQ_CODE)
@Validator.isRequired(["symbols"])
def unsubscribeDepthFeed(self) -> None:
'''
This method will unsubscribe from the streamer. After successful invokation, this will stop the streamer packets of the symbols subscribed.
'''
unsub_depth = self.__create_depth_request(subscribe = False)
LOGGER.debug("Unsubscribing depth feed with request: %s", unsub_depth)
self.__feed_obj._unsubscribe(unsub_depth, StreamingConstants.DEPTH_STREAM_REQ_CODE)
def __create_depth_request(self, symbols = [], subscribe: bool = True) -> str:
symset = []
for syms in symbols:
if ("_MCX" in syms.upper() or "_NCDEX" in syms.upper()) and self.__constants.Data['data']['lgnData']['accTyp'] == 'EQ':
err = {"account_type_exception": "Symbol subscription error"}
LOGGER.info(json.dumps(err))
return json.dumps(err) + "\n"
symset.append({"symbol": syms})
if subscribe:
request_type = "subscribe"
else:
request_type = "unsubscribe"
req = {
"request":
{
"streaming_type": "quote2",
"data":
{
"accType": "EQ",
"symbols": symset
},
"formFactor": "P",
"appID": self.__feed_obj._appID,
"response_format": "json",
"request_type": request_type
},
"echo": {}
}
return json.dumps(req) + "\n" | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/feed/depth_feed.py | depth_feed.py |
from cgitb import Hook
import json
import logging
import socket
from threading import Thread
from time import sleep
from typing import Any, Callable
from constants.streaming_constants import StreamingConstants
LOGGER = logging.getLogger(__name__)
class Feed:
def __init__(self, confObj):
self.__conf = confObj
AppIdKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhcHAiOjAsImZmIjoiVyIsImJkIjoid2ViLXBjIiwibmJmIjoxNjkwNTQxNjQ3LCJzcmMiOiJlbXRtdyIsImF2IjoiMi4wLjMiLCJhcHBpZCI6IjFmZWZjY2Y2YmQzYzllNjFkMWNlYTFlMDY2ZWJlMDg1IiwiaXNzIjoiZW10IiwiZXhwIjoxNjkwNTY5MDAwLCJpYXQiOjE2OTA1NDE5NDd9.Cl5oqCYl4Yx4fiC_oWSsDzIe-8vgkPqWqYAP5XuLtps"
Host="ncst.nuvamawealth.com"
Port=9443
self._appID = AppIdKey
self.__host = Host
self.__port = Port
if self.__conf:
if self.__conf['GLOBAL'].get('AppIdKey'):
self._appID = self.__conf['GLOBAL'].get('AppIdKey')
if self.__conf['STREAM'].get('HOST'):
self.__host = self.__conf['STREAM'].get('HOST')
if self.__conf['STREAM'].get('PORT'):
self.__port = int(self.__conf['STREAM'].get('PORT'))
self._sock = None
self._socket_fs = None
self.__requestsList = {}
t = Thread(target=self.__do_connection)
t.start()
def _subscribe(self, request : str, callback : Callable[[str], Any], requestCode : StreamingConstants):
self.__requestsList[requestCode] = {'request' : request, 'callback' : callback}
self.__sub(requestCode)
def _unsubscribe(self, request : str, requestCode : StreamingConstants):
if self.__is_connection_alive():
self.__send_stream_request(request)
else :
self.__do_connection()
self.__send_stream_request(request)
self.__requestsList.pop(requestCode, "Key not found")
def __sub(self, action):
if self.__is_connection_alive():
if action == 'all':
for req_code in self.__requestsList.keys():
self.__start_streaming(self.__requestsList[req_code]['request'])
sleep(0.1)
elif type(action) is StreamingConstants:
self.__start_streaming(self.__requestsList[action]['request'])
else:
self.__do_connection()
self.__sub(action)
def __start_streaming(self, sendRequest : str):
self.__send_stream_request(sendRequest)
t_read = Thread(target = self.__read_stream_data)
t_read.start()
def __send_stream_request(self, request : str):
self._socket_fs.writelines(request)
self._socket_fs.flush()
def __read_stream_data(self):
while True:
resp = self._socket_fs.readline()
if resp:
LOGGER.debug(f"Response recevied : {resp}")
try:
resp_dict = json.loads(resp)
if resp_dict['response']["streaming_type"] == "quote3":
callback = self.__requestsList[StreamingConstants.QUOTE_SREAM_REQ_CODE]['callback']
if resp_dict['response']["streaming_type"] == "quote":
callback = self.__requestsList[StreamingConstants.REDUCED_QUOTE_SREAM_REQ_CODE]['callback']
elif resp_dict['response']["streaming_type"] == "orderFiler":
callback = self.__requestsList[StreamingConstants.ORDER_STREAM_REQ_CODE]['callback']
elif resp_dict['response']["streaming_type"] == "news":
callback = self.__requestsList[StreamingConstants.LIVENEWS_STREAM_REQ_CODE]['callback']
elif resp_dict['response']["streaming_type"] == "quote2":
callback = self.__requestsList[StreamingConstants.DEPTH_STREAM_REQ_CODE]['callback']
elif resp_dict['response']["streaming_type"] == "miniquote":
callback = self.__requestsList[StreamingConstants.MINI_QUOTE_STREAM_REQ_CODE]['callback']
callback(resp)
except json.JSONDecodeError:
pass
else:
LOGGER.error("Response Blank. Socket Connection seems to be closed. Trying to reconnect...")
break
self.__sub(action = "all")
def __is_connection_alive(self) -> bool:
alive = False
status = f"Socket is null : {self._sock is None}, socket file stream is null : {self._socket_fs is None}, "
if (self._sock is not None) and (self._socket_fs is not None) :
LOGGER.debug(status + f"Socket is closed : {self._sock._closed}, socket file stream is closed : {self._socket_fs.closed}")
if (not self._sock._closed) and (not self._socket_fs.closed):
alive = True
return alive
def __do_connection(self):
''' Create connection; if it fails inititate retry logic '''
try :
self.__create_connection()
except OSError:
self.__retry_connection()
def __create_connection(self):
# New code TCP
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(100)
self._sock.connect((self.__host, self.__port)) # raises OSError
self._sock.setblocking(True)
self._socket_fs = self._sock.makefile('rw')
LOGGER.info("Connection established with subscriber.")
def __retry_connection(self):
times = 17000 # ~17000 for ~24 hours with delay of 5 seconds
initalDelay = 1 # seconds
maxDelay = 5 # seconds
delayFactor = 2.0
currentDelay = initalDelay
for currentTry in range(times, 0, -1):
try :
self.__create_connection()
except OSError as e:
LOGGER.error(f"Error : {e}. Failed to establish connection with the streaming socket. Retrying socket connection... Max tries left {currentTry}")
sleep(currentDelay)
currentDelay = currentDelay*delayFactor if currentDelay*delayFactor < maxDelay else maxDelay
else:
break
else:
#last attempt
try :
self.__create_connection()
except OSError as e:
LOGGER.error(f"Failed to connect with streaming socket after {times} unsuccessful retry attempts. Error : {e}")
self._sock.close()
raise e | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/feed/feed.py | feed.py |
import json
import logging
from typing import Any, Callable, List
from APIConnect.validator import Validator
from constants.streaming_constants import StreamingConstants
from feed.feed import Feed
LOGGER = logging.getLogger(__name__)
class MiniQuoteFeed():
@Validator.ValidateInputDataTypes
def __init__(self, feedObj : Feed) -> None:
self.__feed_obj = feedObj
@Validator.isRequired(["symbols", "callBack"])
def subscribeMiniQuoteFeed(self, symbols: List[str], callBack: Callable[[str], Any]) -> None:
miniQuote = self.__create_miniQuote_request(symbols)
LOGGER.debug("Subscribing miniQuote feed with request: %s", miniQuote)
self.__feed_obj._subscribe(miniQuote, callBack, StreamingConstants.MINI_QUOTE_STREAM_REQ_CODE)
@Validator.isRequired(["symbols"])
def unsubscribeMiniQuoteFeed(self) -> None:
'''
This method will unsubscribe from the streamer. After successful invokation, this will stop the streamer packets of the symbols subscribed.
'''
unsub_miniQuote = self.__create_miniQuote_request(subscribe = False)
LOGGER.debug("Unsubscribing miniQuote feed with request: %s", unsub_miniQuote)
self.__feed_obj._unsubscribe(unsub_miniQuote, StreamingConstants.MINI_QUOTE_STREAM_REQ_CODE)
def __create_miniQuote_request(self, symbols = [], subscribe: bool = True) -> str:
symset = []
for syms in symbols:
symset.append({"symbol": syms})
if subscribe:
request_type = "subscribe"
else:
request_type = "unsubscribe"
req = {
"request":
{
"streaming_type": "miniquote",
"data":
{
"accType": "EQ",
"symbols": symset
},
"formFactor": "P",
"appID": self.__feed_obj._appID,
"response_format": "json",
"request_type": request_type
},
"echo": {}
}
return json.dumps(req) + "\n" | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/feed/miniQuote_feed.py | miniQuote_feed.py |
import json
import logging
from typing import Any, Callable
from APIConnect.validator import Validator
from constants.streaming_constants import StreamingConstants
from feed.feed import Feed
LOGGER = logging.getLogger(__name__)
class OrdersFeed():
def __init__(self, feedObj : Feed, acc_id : str, user_id : str) -> None:
self.acc_id = acc_id
self.user_id = user_id
self.__feed_obj = feedObj
@Validator.isRequired(["callBack"])
def subscribeOrdersFeed(self, callBack: Callable[[str], Any]) -> None:
order_req = self.__create_order_request()
LOGGER.debug(f"Subscribing orders feed with request: {order_req}")
# self.__feed_obj._socket_fs.writelines(order_req)
# self.__feed_obj._socket_fs.flush()
self.__feed_obj._subscribe(order_req, callBack, StreamingConstants.ORDER_STREAM_REQ_CODE)
def unsubscribeOrdersFeed(self) -> None:
unsub_order_req = self.__create_order_request(subscribe=False)
LOGGER.debug(
f"Unsubscribing orders feed with request: {unsub_order_req}")
self.__feed_obj._unsubscribe(unsub_order_req, StreamingConstants.ORDER_STREAM_REQ_CODE)
def __create_order_request(self, subscribe: bool = True) -> str:
if subscribe:
req_type = "subscribe"
else:
req_type = "unsubscribe"
req = {
"request":
{
"streaming_type": "orderFiler",
"data":
{
"accType": "EQ",
"userID": self.user_id,
"accID": self.acc_id,
"responseType": ["ORDER_UPDATE", "TRADE_UPDATE"]
},
"formFactor": "P",
"appID": self.__feed_obj._appID,
"response_format": "json",
"request_type": req_type,
},
"echo": {}
}
return json.dumps(req) + "\n" | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/feed/orders_feed.py | orders_feed.py |
import json
import logging
from typing import Tuple, Union
from APIConnect.http import Http
from APIConnect.validator import Validator
from constants.results_stocks_news_category import ResultsAndStocksNewsCategoryEnum
from constants.router import Router
from exceptions.api_exception import APIError
from exceptions.validation_exception import ValidationException
from resources.live_news_resource import LiveNewsResource
LOGGER = logging.getLogger(__name__)
class LiveNewsService():
def __init__(self, routerObj, httpObj, fileName) -> None:
LOGGER.debug("LiveNewsService object is being created.")
self.__routerObj : Router = routerObj
self.__http : Http = httpObj
self.__fileName = fileName
self.__excludeCategories = ['Results', 'Stocks in News', 'My Holdings']
self.__allCategoriesDataDict = {}
self.__validCategoriesList = []
def _getNewsCategories(self) -> str :
LOGGER.debug("inside _getNewsCategories method")
all_categories_resp = self.__getAllNewsCategories()
if all_categories_resp != "":
filtered_response = LiveNewsResource(all_categories_resp)._getCategoriesFormatted()._filterCategories(self.__excludeCategories)._response
self.__validCategoriesList = json.loads(filtered_response)['data']['categories']
return filtered_response
else:
return all_categories_resp
@Validator.isRequired(['category'])
def _getGeneralNews(self, category : str, searchText : str = None, pageNumber : int = None) -> str :
LOGGER.debug("inside _getGeneralNews method")
response = ""
self._getNewsCategories()
if self.__validCategoriesList:
specific_cat_dict, special = self.__getSpecificCategoryData(category)
if specific_cat_dict :
request_body = {
"exclCategory": [] if special else specific_cat_dict["exc"],
"inclCategory": [specific_cat_dict["cat"]] if special else specific_cat_dict["inc"],
"validRequest": None if special else specific_cat_dict["lgrq"],
"page": pageNumber,
"group": specific_cat_dict['uiTyp'],
"searchText": searchText
}
response = self.__getGeneralNewsAPI(request_body)
if response != '""':
response = LiveNewsResource(response, specific_cat_dict['uiTyp'])._getNewsFormatted()._response
return response
def _getHoldingsNews(self, category : Union[str, list] = None, searchText : str = None, pageNumber : int = None) -> str :
LOGGER.debug("inside _getHoldingsNews method")
response = ""
category_list = []
if category:
category_list.extend(category) if type(category) is list else category_list.append(category)
self._getNewsCategories()
if self.__validCategoriesList:
holdings_cat_data, _ = self.__getSpecificCategoryData("My Holdings", True)
request_body = {
"exclCategory": holdings_cat_data["exc"],
"inclCategory": holdings_cat_data["inc"],
"validRequest": holdings_cat_data["lgrq"],
"group": holdings_cat_data["uiTyp"],
"page": pageNumber,
"searchText": searchText
}
response = self.__getHoldingsNewsAPI(request_body)
if response != '""':
response = LiveNewsResource(response, holdings_cat_data["uiTyp"])._getNewsFormatted()._response
if category and category != "All" : # if category is All, no filtering required
if sorted(category) == sorted([e.value for e in ResultsAndStocksNewsCategoryEnum]):
self.__validCategoriesList = [e.value for e in ResultsAndStocksNewsCategoryEnum]
filter_categories = []
for cat in category_list :
cat_dict, special = self.__getSpecificCategoryData(cat)
filter_categories.append(cat_dict["cat"]) if special else filter_categories.extend(cat_dict["inc"])
response = LiveNewsResource(response, category_dp_name=category)._filterCategories(filter_categories, exclude=False)._response
return response
def _getResultsAndStocksNews(self, searchText : str = None, pageNumber : int = None) -> str :
LOGGER.debug("inside _getResultsAndStocksNews method")
response = ""
request_body = {
"exclCategory": [],
"inclCategory": ["Result", "STOCK_IN_NEWS"],
"validRequest": False,
"page": pageNumber,
"group": "G",
"searchText": searchText
}
response = self.__getGeneralNewsAPI(request_body)
if response != '""':
response = LiveNewsResource(response, "G")._getNewsFormatted()._response
return response
def __getSpecificCategoryData(self, category : str, holdings : bool = False) -> Tuple[dict, bool]:
LOGGER.debug("inside __getSpecificCategoryData method")
cat_data = {}
special = False
if self.__allCategoriesDataDict :
if holdings or category in self.__validCategoriesList :
for key, value in self.__allCategoriesDataDict['data'].items():
if type(value) is list:
for cat_dict in value:
if 'dpNm' in cat_dict.keys() and cat_dict['dpNm'] == category:
if key == "ctLst":
special = True
cat_data = cat_dict
break
else :
raise ValidationException(f"'{category}' is not a valid category. Please call getNewsCategories function to retrieve all valid categories.")
return cat_data, special
def __getAllNewsCategories(self) -> str:
'''method to either read all categories from user session file or get all categories from categories API'''
LOGGER.debug("inside __getAllNewsCategories method")
try:
with open(self.__fileName, 'r+') as fs:
read = fs.read()
user_data_dict = json.loads(read)
# if categories saved in data_ApiKey.txt file
if 'newsCategories' in user_data_dict.keys():
LOGGER.debug(f"Reading categories response from user data file." )
self.__allCategoriesDataDict = user_data_dict['newsCategories']
newsCategoriesResponse = json.dumps(self.__allCategoriesDataDict)
# if categories not saved in file, call api and save response in data_ApiKey.txt
else :
newsCategoriesResponse = self.__getAllNewsCategoriesAPI()
if newsCategoriesResponse != "":
self.__allCategoriesDataDict = json.loads(newsCategoriesResponse)
user_data_dict['newsCategories'] = self.__allCategoriesDataDict
# deleting contents of data_ApiKey.txt and writing categories-appended-data
fs.seek(0)
fs.truncate()
fs.write(json.dumps(user_data_dict))
LOGGER.debug("Categories response saved to user data file.")
return newsCategoriesResponse
except FileNotFoundError:
LOGGER.error(f"Session file {self.__fileName} not found. Kindly login again.")
raise APIError("Session file not found. Kindly login again.")
except OSError as e:
LOGGER.error(f"Error in reading/writing {self.__fileName} : {e}")
raise e
def __getAllNewsCategoriesAPI(self) -> str:
LOGGER.debug("inside __getAllNewsCategoriesAPI method")
url = self.__routerObj._LiveNewsCategoriesURL()
reply = self.__http._GetMethod(url)
LOGGER.debug(f"Response received: {reply}")
return json.dumps(reply)
def __getGeneralNewsAPI(self, request_body) -> str:
LOGGER.debug("inside __getGeneralNewsAPI method")
url = self.__routerObj._GeneralNewsURL()
body = json.dumps(request_body)
LOGGER.debug("__getGeneralNewsAPI method is called with data: %s", body)
reply = self.__http._PostMethod(url, body)
LOGGER.debug(f"Response received: {reply}")
return json.dumps(reply)
def __getHoldingsNewsAPI(self, request_body ) -> str:
LOGGER.debug("inside __getHoldingsNewsAPI method")
url = self.__routerObj._HoldingsNewsURL()
body = json.dumps(request_body)
LOGGER.debug("__getHoldingsNewsAPI method is called with data: %s", body)
reply = self.__http._PostMethod(url, body)
LOGGER.debug(f"Response received: {reply}")
return json.dumps(reply)
def _getLatestCorpActionsAPI(self, symbol : str) -> str:
LOGGER.debug("inside __getLatestCorpActionsAPI method")
url = self.__routerObj._LatestCorpActionsURL().format(symbol = symbol)
reply = self.__http._GetMethod(url)
LOGGER.debug(f"Response received: {reply}")
return json.dumps(reply) | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/services/live_news_service.py | live_news_service.py |
import json
import logging
import sys
import time
from typing import List
from APIConnect.api_constants import ApiConstants
from APIConnect.http import Http
from constants.router import Router
from resources.watchlist_resource import WatchlistResource
LOGGER = logging.getLogger(__name__)
class WatchlistService:
def __init__(self, routerObj, httpObj, constantsObj) -> None:
LOGGER.debug("WatchlistService object is being created.")
self.__routerObj : Router = routerObj
self.__http : Http = httpObj
self.__constants : ApiConstants = constantsObj
self.__accId = None
self.__accType = None
self.__profileId = None
def _getWatchlistGroups(self) -> str :
LOGGER.debug("inside _getWatchlistGroups method")
self.__getUserAccData()
url = self.__routerObj._WatchlistBaseGroupsURL()
queryParams = {"accId" : self.__accId, "accTyp" : self.__accType}
response = self.__getGroupsWatchlistAPI(url, queryParams)
if response :
formatted_resp = WatchlistResource(response)._getWatchlistFormatted()._response
return formatted_resp
return response
def _getScripsOfGroup(self, GroupName : str) -> str :
LOGGER.debug("inside _getScripsOfGroup method")
self.__getUserAccData()
url = self.__routerObj._WatchlistGetScripsURL()
queryParams = {"accId" : self.__accId, "accTyp" : self.__accType, "prfId" : self.__profileId, "grpNm" : GroupName}
response = self.__getScripsOfGroupAPI(url, queryParams)
if response :
formatted_resp = WatchlistResource(response)._getWatchlistFormatted()._response
return formatted_resp
return response
def _createGroup(self, GroupName : str, Symbols : List[str]) -> str :
LOGGER.debug("inside _createGroup method")
self.__getUserAccData()
url = self.__routerObj._WatchlistGroupNameURL().format(groupName = GroupName)
request_body = {
"accId": self.__accId,
"accTyp": self.__accType,
"prfId": self.__profileId,
"grpNm": GroupName,
"symLst": Symbols
}
LOGGER.debug(f"_createGroup method is called with data : {request_body}")
response = self.__createGroupAPI(url, json.dumps(request_body))
if response :
formatted_resp = WatchlistResource(response)._getWatchlistFormatted()._response
return formatted_resp
return response
def _addSymbols(self, GroupName : str, Symbols : List[str]) -> str :
LOGGER.debug("inside _addSymbol method")
self.__getUserAccData()
currentUnixTimeStamp = int(time.time()*1000)
url = self.__routerObj._WatchlistGroupNameURL().format(groupName = GroupName)
request_body = {
"accId": self.__accId,
"accTyp": self.__accType,
"act": "add",
"grpNm": GroupName,
"symLst": Symbols,
"updatedOn" : currentUnixTimeStamp
}
LOGGER.debug(f"_addSymbol method is called with data : {request_body}")
response = self.__addSymbolAPI(url, json.dumps(request_body))
if response :
formatted_resp = WatchlistResource(response)._getWatchlistFormatted()._response
return formatted_resp
return response
def _deleteSymbols(self, GroupName : str, Symbols : List[str]) -> str :
LOGGER.debug("inside _deleteSymbols method")
self.__getUserAccData()
currentUnixTimeStamp = int(time.time()*1000)
url = self.__routerObj._WatchlistGroupNameURL().format(groupName = GroupName)
request_body = {
"accId": self.__accId,
"accTyp": self.__accType,
"act": "del",
"grpNm": GroupName,
"symLst": Symbols,
"updatedOn" : currentUnixTimeStamp
}
LOGGER.debug(f"_deleteSymbols method is called with data : {request_body}")
response = self.__deleteSymbolAPI(url, json.dumps(request_body))
if response :
formatted_resp = WatchlistResource(response)._getWatchlistFormatted()._response
return formatted_resp
return response
def _deleteGroups(self, GroupNames : List[str]) -> str :
LOGGER.debug("inside _deleteGroups method")
self.__getUserAccData()
url = self.__routerObj._WatchlistBaseGroupsURL()
request_body = {
"accId": self.__accId,
"accTyp": self.__accType,
"prfId": self.__profileId,
"delGrp" : GroupNames
}
LOGGER.debug(f"_deleteGroups method is called with data : {request_body}")
response = self.__deleteGroupsAPI(url, json.dumps(request_body))
if response :
formatted_resp = WatchlistResource(response)._getWatchlistFormatted()._response
return formatted_resp
return response
def _renameGroup(self, GroupName : str, NewGroupName : str) -> str :
LOGGER.debug("inside _renameGroup method")
oldSymbols = None
oldGroupSymbolsResponse = self._getScripsOfGroup(GroupName)
if oldGroupSymbolsResponse :
oldSymRespDict = json.loads(oldGroupSymbolsResponse)
symList = oldSymRespDict.get('data').get('syLst')
if symList:
oldSymbols = [scrip.get('sym') for scrip in symList if scrip.get('sym')]
if not oldSymbols:
LOGGER.error("Failed to retrieve old group data. Please try again.")
print("Failed to retrieve old group data. Please try again.")
raise SystemExit(1)
currentUnixTimeStamp = int(time.time()*1000)
url = self.__routerObj._WatchlistGroupNameURL().format(groupName = GroupName)
request_body = {
"accId": self.__accId,
"accTyp": self.__accType,
"act": "modify",
"grpNm": GroupName,
"newGrpNm": NewGroupName,
"symLst": oldSymbols,
"updatedOn" : currentUnixTimeStamp
}
LOGGER.debug(f"_renameGroup method is called with data : {request_body}")
response = self.__renameGroupAPI(url, json.dumps(request_body))
if response :
return WatchlistResource(response)._getWatchlistFormatted()._response
return response
def __getUserAccData(self) :
# if user has eq acc id (acc type is EQ or COMEQ)
if self.__constants.eqAccId :
self.__accId = self.__constants.eqAccId
self.__accType = 'EQ'
# if user has co acc id (acc type is CO)
elif self.__constants.coAccId :
self.__accId = self.__constants.coAccId
self.__accType = 'CO'
self.__profileId = self.__constants.ProfileId
def __getGroupsWatchlistAPI(self, url : str, queryParams) -> str:
LOGGER.debug("inside __getGroupsWatchlistAPI method")
reply = self.__http._GetMethod(url, queryParams)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply
def __getScripsOfGroupAPI(self, url : str, queryParams) -> str:
LOGGER.debug("inside __getScripsOfGroupAPI method")
reply = self.__http._GetMethod(url, queryParams)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply
def __createGroupAPI(self, url : str, request_body : str) -> str:
LOGGER.debug("inside __createGroupAPI method")
reply = self.__http._PostMethod(url, request_body)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply
def __addSymbolAPI(self, url : str, request_body : str) -> str:
LOGGER.debug("inside __addSymbolAPI method")
reply = self.__http._PutMethod(url, request_body)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply
def __deleteSymbolAPI(self, url : str, request_body : str) -> str:
LOGGER.debug("inside __deleteSymbolAPI method")
reply = self.__http._PutMethod(url, request_body)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply
def __deleteGroupsAPI(self, url : str, request_body : str) -> str:
LOGGER.debug("inside __deleteGroupsAPI method")
reply = self.__http._DeleteMethod(url, request_body)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply
def __renameGroupAPI(self, url : str, request_body : str) -> str:
LOGGER.debug("inside __renameGroupAPI method")
reply = self.__http._PutMethod(url, request_body)
if reply :
reply = json.dumps(reply)
LOGGER.debug(f"Response received : {reply}")
return reply | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/services/watchlist_service.py | watchlist_service.py |
import logging
from constants.constants import BaseUrl
import urllib
class Router:
def __init__(self, config_obj=None):
self.LOGGER = logging.getLogger(__name__)
self.LOGGER.info("Router object is being created.")
self.baseurleq = BaseUrl.BASE_EQ.value
self.baseurlcomm = BaseUrl.BASE_COMM.value
self.baseurlcontent = BaseUrl.BASE_CONTENT.value
self.baseurllogin = BaseUrl.BASE_LOGIN.value
self.basemflogin = BaseUrl.BASE_MF_LOGIN.value
self.EquityContractURL = BaseUrl.EQ_CONTRACT.value
self.MFContractURL = BaseUrl.MF_CONTRACT.value
if config_obj and 'GLOBAL' in config_obj:
if config_obj['GLOBAL'].get('BasePathLogin'):
self.baseurllogin = config_obj['GLOBAL']['BasePathLogin']
if config_obj['GLOBAL'].get('BasePathEq'):
self.baseurleq = config_obj['GLOBAL']['BasePathEq']
if config_obj['GLOBAL'].get('BasePathComm'):
self.baseurlcomm = config_obj['GLOBAL']['BasePathComm']
if config_obj['GLOBAL'].get('BasePathMf'):
self.basemflogin = config_obj['GLOBAL']['BasePathMf']
if config_obj['GLOBAL'].get('BasePathContent'):
self.baseurlcontent = config_obj['GLOBAL']['BasePathContent']
if config_obj['GLOBAL'].get('EquityContractURL'):
self.EquityContractURL = config_obj['GLOBAL']['EquityContractURL']
if config_obj['GLOBAL'].get('MFContractURL'):
self.MFContractURL = config_obj['GLOBAL']['MFContractURL']
if config_obj['GLOBAL'].get('AppIdKey'):
self._AppIdKey = config_obj['GLOBAL']['AppIdKey']
self.LOGGER.info("URL constants loaded with provided configuration file.")
def _CheckUpdateURl(self):
return urllib.parse.urljoin(self.baseurlcontent, "adhoc/lib/version/")
def _OrderBookURL(self):
return urllib.parse.urljoin(self.baseurleq, "order/book/{userid}/v1/")
def _OrderBookURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "orderbook/{userid}?rTyp={reqtype}/")
def _TradeBookURL(self):
return urllib.parse.urljoin(self.baseurleq, "tradebook/v1/{userid}/")
def _TradeBookURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "tradebook/{userid}/")
def _NetPositionURL(self):
return urllib.parse.urljoin(self.baseurleq, "positions/net/{userid}/")
def _NetPositionURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "positions/{userid}/")
def _PlaceTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/placetrade/{userid}/")
def _PlaceTradeURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/placetrade/{userid}/")
def _PlaceBracketTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/placebrackettrade/{userid}/")
def _PlaceBasketTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/basketorder/{userid}/")
def _ExitBracketTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/exitbrackettrade/{userid}/")
def _PlaceGtcGtdTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/placegtcgtdtrade/{userid}/")
def _PlaceGtcGtdTradeURL_comm(self):
return urllib.parse.urljoin(self.baseurleq, "trade/placegtcgtdtrade/{userid}/")
def _OrderDetailsURL(self):
return urllib.parse.urljoin(self.baseurleq, "order/details/{userid}?nOID={orderid}")
def _OrderDetailsURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "orderdetails/{userid}?oID={orderid}")
def _OrderHistoryURL(self):
return urllib.parse.urljoin(self.baseurleq, "order/history/{userid}?sDt={StartDate}&eDt={EndDate}/")
def _OrderHistoryURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "orderhistory/{userid}?sDt={StartDate}&eDt={EndDate}/")
def _ModifyTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/modifytrade/{userid}/")
def _ModifyTradeURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/modifytrade/{userid}/")
def _CancelTradeURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/canceltrade/v1/{userid}/")
def _CancelTradeURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/canceltrade/v1/{userid}/")
def _HoldingURL(self):
return urllib.parse.urljoin(self.baseurleq, "holdings/v1/rmsholdings/{userid}/")
def _HoldingURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "reports/detail/{userid}/")
def _LimitsURL(self):
return urllib.parse.urljoin(self.baseurleq, "limits/rmssublimits/{userid}/")
def _LimitsURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "limits/{userid}/")
def _GetAMOFlag(self):
return urllib.parse.urljoin(self.baseurleq, "trade/amoflag/")
def _GetAMOFlag_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/amostatus/{exch}")
def _PositionSqOffURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/position/sqroff/{userid}/")
def _ConvertPositionURL(self):
return urllib.parse.urljoin(self.baseurleq, "trade/convertposition/v1/{userid}/")
def _ConvertPositionURL_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/positionconversion/{userid}/")
def _PlaceAMOTrade(self):
return urllib.parse.urljoin(self.baseurleq, "trade/amo/placetrade/{userid}/")
def _PlaceAMOTrade_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/amo/placetrade/{userid}/")
def _ModifyAMOTrade(self):
return urllib.parse.urljoin(self.baseurleq, "trade/amo/modifytrade/{userid}/")
def _ModifyAMOTrade_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/amo/modifytrade/{userid}/")
def _CancelAMOTrade(self):
return urllib.parse.urljoin(self.baseurleq, "trade/amo/canceltrade/v1/{userid}/")
def _CancelAMOTrade_comm(self):
return urllib.parse.urljoin(self.baseurlcomm, "trade/amo/canceltrade/v1/{userid}/")
# MF Related APIs
def _PlaceMFURL(self):
return urllib.parse.urljoin(self.basemflogin, "trade/{userid}/")
def _ModifyMFURL(self):
return urllib.parse.urljoin(self.basemflogin, "trade/{userid}/")
def _CancelMFURL(self):
return urllib.parse.urljoin(self.basemflogin, "trade/{userid}/")
def _HoldingsMFURL(self):
return urllib.parse.urljoin(self.basemflogin, "holding/{userid}/")
def _OrderBookMFURL(self):
return urllib.parse.urljoin(self.basemflogin, "order/{userid}?frDt={fromDate}&toDt={toDate}/")
# Charts Related APIs
def _ChartsURL(self):
return urllib.parse.urljoin(self.baseurlcontent, "charts/v2/main/{interval}/{exc}/{aTyp}/{symbol}")
# Live News related APIs
def _LiveNewsCategoriesURL(self) -> str:
return urllib.parse.urljoin(self.baseurlcontent, "liveNews/getfiltersandcatagories")
def _GeneralNewsURL(self) -> str:
return urllib.parse.urljoin(self.baseurlcontent, "liveNews/general")
def _HoldingsNewsURL(self) -> str :
return urllib.parse.urljoin(self.baseurleq, "news/eqholdings")
def _LatestCorpActionsURL(self) -> str :
return urllib.parse.urljoin(self.baseurlcontent, "events/latestcorpactions/{symbol}")
# Watchlist related APIs
def _WatchlistBaseGroupsURL(self):
return urllib.parse.urljoin(self.baseurlcontent, "accounts/groups")
def _WatchlistGetScripsURL(self):
return urllib.parse.urljoin(self.baseurlcontent, "accounts/groups/symbols")
def _WatchlistGroupNameURL(self):
return urllib.parse.urljoin(self.baseurlcontent, "accounts/groups/{groupName}/")
# Login related APIs
def _LoginURL(self):
return urllib.parse.urljoin(self.baseurllogin, "accounts/loginvendor/{vendorId}/")
def _TokenURL(self):
return urllib.parse.urljoin(self.baseurllogin, "accounts/logindata/")
def _LogoutURL(self):
return urllib.parse.urljoin(self.baseurllogin, "account/logoff/{userid}/")
#SnapQuote related APIs
def _MarketDepthURL(self):
return urllib.parse.urljoin(self.baseurlcontent, "quote/scrip/{symbol}/") | APIConnect | /APIConnect-2.0.3-py3-none-any.whl/constants/router.py | router.py |
# django_api_deal
#### 打包命令
```angular2html
python setup.py sdist
```
#### 上传命令
```angular2html
twine upload dist/*
```
#### 主要功能
1. 简化常见列表类接口
1. get、post、put、delete都有对应的my_get、my_post、my_put、my_delete方法。
2. 简化参数的校验及检验失败的提示信息
1. 参数类型
1. StrArgument 字符串参数
2. StrOfIntArgument 整形的字符串参数
3. EmailArgument 邮箱参数 必须有@
4. UrlArgument url参数 必须http 或 https开头
5. ListArgument 列表参数
6. DictArgument 字典参数
7. ListNestDictArgument 列表嵌套字典参数 可多层嵌套
8. ChoiceArgument 固定可选值参数 一般用来 True, False,或者其他固定可选项
9. BoolArgument bool参数
10. StrOfBoolArgument bool类型的字符串参数
11. DateStrArgument 日期参数,可自定义日期格式 datetime_format
12. IntArgument FloatArgument DecimalArgument
2.简化参数的参数
13. desc 名称
14. name 上传字段名称
15. must 是否必填
16. query_type 查询时的方式:icontains
17. relate_name 查询字段与name字段不一致时使用或关联查询时使用
4. core跨域解决
1. 在settings中的中间件增加下面的代码
```angular2html
'api_deal.middlewares.cors.MyCorsMiddleware',
```
5. 通用的异常处理
1. 在settings中的中间件增加下面的代码
```angular2html
'api_deal.middlewares.error.MyErrorMiddleware',
```
代码示例:
```angular2html
class BookListView(ApiListView):
model = Book
id_arg = IntArgument("ID", "id", must=True)
name_arg = StrArgument("名称", "name", must=True, query_type="icontains", )
get_params_list = [
name_arg,
]
def get(self, request):
return self.my_get(request)
post_params_list = [
name_arg,
]
def post(self, request):
return self.my_post(request)
select_put_params_list = [
id_arg,
]
put_params_list = [
name_arg,
]
def put(self, request):
return self.my_put(request)
delete_params_list = [
id_arg
]
def delete(self, request):
return self.my_delete(request)
```
正确返回示例
```angular2html
{
"errmsg": "ok"
"data": {...}
}
```
错误返回示例
```
{
"errmsg": "参数名称是必填项"
"data": null
}
``` | APIDjango | /APIDjango-1.3.tar.gz/APIDjango-1.3/README.md | README.md |
import json
from django.db.models import F
from django.http import QueryDict
from django.utils.decorators import method_decorator
from django.views import View
from . import error
from .args import BaseArgument
from .error import METHOD_NOT_ALLOW
from .page import get_start_end_inx
from .res import SuccessReturn, ErrorReturn
def handle_params(request, *args, **kwargs):
"""处理request的请求参数"""
if request.method == "GET":
data = request.GET
else:
if request.content_type == "application/json":
try:
data = json.loads(request.body)
except ValueError:
raise error.ARGUMENT_ERROR("参数解析失败,不是合法的json字符串")
elif request.content_type == "multipart/form-data":
data = request.POST
else:
data = QueryDict(request.body)
request.data = data
def decorator(func):
def wrapper(request, *args, **kwargs):
handle_params(request, *args, **kwargs)
try:
response = func(request, *args, **kwargs)
except Exception as exc:
raise exc
return response
return wrapper
@method_decorator(decorator, name='dispatch')
class ApiView(View):
get_params_list = []
post_params_list = []
select_put_params_list = []
put_params_list = []
delete_params_list = []
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.request = None
def http_method_not_allowed(self, request, *args, **kwargs):
return ErrorReturn(METHOD_NOT_ALLOW)
def check_get_params(self, data):
for get_param in self.get_params_list:
if isinstance(get_param, BaseArgument):
setattr(self, get_param.get_key_name(), get_param.base_check(data))
def check_post_params(self, data):
for post_param in self.post_params_list:
if isinstance(post_param, BaseArgument):
setattr(self, post_param.get_key_name(), post_param.base_check(data))
def check_put_params(self, data):
for param in self.select_put_params_list:
if isinstance(param, BaseArgument):
setattr(self, param.get_key_name(), param.base_check(data))
for param in self.put_params_list:
if isinstance(param, BaseArgument):
setattr(self, param.get_key_name(), param.base_check(data))
def check_delete_params(self, data):
for param in self.delete_params_list:
if isinstance(param, BaseArgument):
setattr(self, param.get_key_name(), param.base_check(data))
class ApiListView(ApiView):
model = None
extra = {}
field_list = []
field_res = {}
order_by = None
def get_query_kwargs(self):
query_kwargs, _ = self.base_get_kwargs(self.get_params_list)
real_query_kwargs = {}
for key, value in query_kwargs.items():
if value not in ["", None]:
real_query_kwargs[key] = value
return real_query_kwargs
def get_field_kwargs(self):
field_list = []
field_res = {}
for field in self.field_list:
relate_field = field.get("relate_field", None)
if relate_field is None:
field_list.append(field['prop'])
else:
field_res[field['prop']] = F(relate_field)
return field_list, field_res
def get_data_list(self, data):
self.check_get_params(data)
query_kwargs = self.get_query_kwargs()
field_list, field_res = self.get_field_kwargs()
data_count = self.model.objects.filter(**query_kwargs).count()
data_list = self.model.objects.filter(**query_kwargs).extra(**self.extra).values(*field_list, **field_res)
if self.order_by is not None:
data_list = data_list.order_by(*self.order_by)
return data_list, data_count
def get_header_list(self):
return [header for header in self.field_list if header.get("is_show", True)]
def my_get(self, request):
res = {}
data_list, data_count = self.get_data_list(request.data)
start_inx, end_inx = get_start_end_inx(request.data, res, data_count)
data_list = data_list[start_inx: end_inx]
res['data_list'] = list(data_list)
res['header_list'] = self.get_header_list()
return SuccessReturn(res)
def base_get_kwargs(self, params_list):
base_kwargs = {}
other_kwargs = {}
for post_param in params_list:
key = post_param.get_key_name()
value = getattr(self, post_param.get_key_name(), None)
is_other_arg = getattr(post_param, "is_other_arg", False)
if is_other_arg is True:
other_kwargs[key] = value
else:
base_kwargs[key] = value
return base_kwargs, other_kwargs
def get_post_kwargs(self):
return self.base_get_kwargs(self.post_params_list)
def get_put_select_kwargs(self):
return self.base_get_kwargs(self.select_put_params_list)
def get_put_kwargs(self):
return self.base_get_kwargs(self.put_params_list)
def my_post(self, request):
self.check_post_params(request.data)
create_kwargs, other_kwargs = self.get_post_kwargs()
self.model.objects.create(**create_kwargs)
return SuccessReturn()
def my_put(self, request):
self.check_put_params(request.data)
select_put_kwargs, _ = self.get_put_select_kwargs()
put_kwargs, other_put_kwargs = self.get_put_kwargs()
self.model.objects.filter(**select_put_kwargs).update(**put_kwargs)
return SuccessReturn()
def my_delete(self, request):
self.check_delete_params(request.data)
delete_kwargs, other_kwargs = self.base_get_kwargs(self.delete_params_list)
if not delete_kwargs:
raise error.ARGUMENT_ERROR("删除条件为空")
delete_obj_list = self.model.objects.filter(**delete_kwargs)
count = delete_obj_list.count()
if count == 0:
raise error.ARGUMENT_ERROR("删除数量为0")
delete_obj_list.delete()
return SuccessReturn() | APIDjango | /APIDjango-1.3.tar.gz/APIDjango-1.3/api_deal/view.py | view.py |
import datetime
from decimal import Decimal
from . import error
class BaseArgument(object):
must = False # 参数是否必填
name = "" # 参数名称
type = None # 参数类型
value_type = None # 值类型
desc = "" # 参数详细信息
is_other_arg = False # 是否其他对象参数
value = None
f = {}
def __init__(self, desc, name, must=False, query_type=None, relate_name=None, is_other_arg=False):
self.desc = desc
self.name = name
self.must = must
self.query_type = query_type
self.relate_name = relate_name
self.is_other_arg = is_other_arg
if self.value_type is None:
self.value_type = self.type
def base_check(self, data):
value = data.get(self.name)
if self.must:
if self.name not in data.keys():
raise error.ARGUMENT_ERROR("缺少{field_name}参数".format(field_name=self.name))
if not value and not (value is False or value == [] or value == {}):
raise error.ARGUMENT_ERROR("{field_name}值不能为空".format(field_name=self.name))
self.f = {"field_name": self.name, "field_type": self.type, "value_type": type(value)}
if self.type is None:
return value
if value and not isinstance(value, self.type):
raise error.ARGUMENT_ERROR("{field_name}期望是: {field_type},实际是: {value_type}".format(**self.f))
return value
def get_key_name(self):
key_name = self.name
if self.relate_name:
key_name = self.relate_name
if self.query_type:
key_name = "__".join([key_name, self.query_type])
return key_name
def __str__(self):
return "-".join([self.desc, self.name, str(self.must)])
class IntArgument(BaseArgument):
type = int
class FloatArgument(BaseArgument):
type = float
def __init__(self, desc, name, must=False, decimal_places=2):
super().__init__(desc, name, must)
self.decimal_places = decimal_places
def base_check(self, data):
value = data.get(self.name)
if value:
try:
value = float(value)
except:
raise error.ARGUMENT_ERROR("{field_name}参数类型错误".format(field_name=self.name))
data[self.name] = value
super().base_check(data)
return round(value, self.decimal_places)
class DecimalArgument(FloatArgument):
type = Decimal
def base_check(self, data):
value = data.get(self.name)
if value:
try:
value = Decimal(value).quantize(Decimal("0." + "0" * self.decimal_places))
except:
raise error.ARGUMENT_ERROR("{field_name}参数类型错误".format(field_name=self.name))
data[self.name] = value
super().base_check(data)
return value
class StrArgument(BaseArgument):
type = str
class StrOfIntArgument(BaseArgument):
"""数字类型的字符串"""
type = str
value_type = int
def base_check(self, data):
value = super().base_check(data)
if value:
try:
value = int(value)
except:
raise error.ARGUMENT_ERROR("{field_name}类型错误".format(field_name=self.name))
return value
class EmailArgument(StrArgument):
def base_check(self, data):
value = super().base_check(data)
if value and value.find("@") == -1:
raise error.ARGUMENT_ERROR("邮箱格式不正确")
return value
class UrlArgument(StrArgument):
def base_check(self, data):
value = super().base_check(data)
if value:
if not (value.startswith("http") or value.startswith("//")):
raise error.ARGUMENT_ERROR("{name}格式不正确".format(name=self.name))
return value
class ListArgument(BaseArgument):
type = list
class DictArgument(BaseArgument):
type = dict
def __init__(self, desc, name, must=False, dict_arg_list=None, ):
super().__init__(desc, name, must)
self.dict_arg_list = dict_arg_list
def base_check(self, data):
value = super().base_check(data)
result = {}
for dict_arg in self.dict_arg_list:
v_v = dict_arg.base_check(value)
result[dict_arg.get_key_name()] = v_v
return result
class ListNestDictArgument(ListArgument):
dict_arg_list = None # 只判断最外层key值
def __init__(self, desc, name, must=False, is_other_arg=False, dict_arg_list=None, ):
super().__init__(desc=desc, name=name, must=must, is_other_arg=is_other_arg)
self.dict_arg_list = dict_arg_list
def base_check(self, data):
value = super().base_check(data)
result = []
if not [False for i in [self.dict_arg_list, value] if not isinstance(i, self.type)]:
for v in value:
one = {}
for dict_arg in self.dict_arg_list:
v_v = dict_arg.base_check(v)
one[dict_arg.get_key_name()] = v_v
result.append(one)
return result
class ChoiceArgument(BaseArgument):
type = str
choice_list = None
def __init__(self, desc, name, must=False, choice_list=None, relate_name=None):
super().__init__(desc, name, must, relate_name=relate_name)
self.choice_list = choice_list
def base_check(self, data):
value = super().base_check(data)
if isinstance(self.choice_list, tuple) or isinstance(self.choice_list, list):
if self.must and value not in self.choice_list:
self.f['choices'] = ";".join(self.choice_list)
raise error.ARGUMENT_ERROR("{field_name}超出允许的范围:{choices}".format(**self.f))
return value
class BoolArgument(ChoiceArgument):
type = bool
choice_list = (True, False)
def __init__(self, desc, name, must=False, relate_name=None):
super().__init__(desc, name, must=must, choice_list=self.choice_list, relate_name=relate_name)
class StrOfBoolArgument(BoolArgument):
type = str
value_type = bool
choice_list = ("true", "false")
def base_check(self, data):
value = super().base_check(data)
if value == "true":
value = True
elif value == "false":
value = False
else:
value = None
return value
class DateStrArgument(StrArgument):
type = str
datetime_format = None
def __init__(self, desc, name, must=False, query_type=None, relate_name=None, datetime_format="%Y-%m-%d"):
super().__init__(desc, name, must, query_type, relate_name)
self.datetime_format = datetime_format
def base_check(self, data):
value = super().base_check(data)
if value:
try:
_ = datetime.datetime.strptime(value, self.datetime_format)
except Exception as e:
raise error.ARGUMENT_ERROR("日期格式不正确")
return value | APIDjango | /APIDjango-1.3.tar.gz/APIDjango-1.3/api_deal/args.py | args.py |
from django.db import models
# Create your models here.
class MyForeignKey(models.ForeignKey):
def __init__(self, to, on_delete=models.DO_NOTHING, related_name=None, related_query_name=None,
limit_choices_to=None, parent_link=False, to_field=None,
db_constraint=False, **kwargs):
super().__init__(to=to, on_delete=on_delete, related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to, parent_link=parent_link,
to_field=to_field, db_constraint=db_constraint, **kwargs) # 使用super函数
class MyManyToManyField(models.ManyToManyField):
def __init__(self, to, related_name=None, related_query_name=None,
limit_choices_to=None, symmetrical=None, through=None,
through_fields=None, db_constraint=False, db_table=None,
swappable=False, **kwargs):
super().__init__(to, related_name=related_name, related_query_name=related_query_name,
limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through,
through_fields=through_fields, db_constraint=db_constraint, db_table=db_table,
swappable=swappable, **kwargs) # 使用super函数
class MyOneToOneField(models.OneToOneField):
def __init__(self, to, on_delete=models.DO_NOTHING, to_field=None, **kwargs):
kwargs['db_constraint'] = False
super().__init__(to=to, on_delete=on_delete, to_field=to_field, **kwargs) # 使用super函数
class MyModel(models.Model):
create_time = models.DateTimeField(verbose_name="创建时间", auto_now_add=True)
class Meta:
abstract = True
# 重写QuerySet
class WithDeleteQuerySet(models.QuerySet):
def delete(self):
return super().update(is_delete=True)
class WithDeleteManager(models.Manager):
# 自定义模型管理器中的方法
def get_queryset(self):
return WithDeleteQuerySet(self.model, using=self._db).filter(is_delete=False)
class WithDeleteModel(MyModel):
is_delete = models.BooleanField(verbose_name="是否删除", default=False)
class Meta:
abstract = True
objects = WithDeleteManager()
def delete(self, using=None, keep_parents=False):
self.is_delete = True
self.save() | APIDjango | /APIDjango-1.3.tar.gz/APIDjango-1.3/api_deal/models.py | models.py |
# APIDocTest
A lightweight Python WebAPI's documents and tests framework based on `__doc__`, VanillaJS-AJAX and Flask, but not limited to Flask.
## Details
Each of API handlers' `__doc__` line which starts with "@" will be treated as API's notes, excludes the leading spaces.
The "::" is the separator between key and value of each APIs='s note.
blanks in both ends of key and value are ignored.
|key|value(samples)|comments|
|:--|:----|:-------|
|@path-parameters|variablex=literalx & variabley=literaly & variablez=literalz|placeholders in router's url, such as "/path/to/<containeruuid>/status".|
|@request-parameters|variablex=literalx & variabley=literaly & variablez=literalz|like things after "?" in url, such as "http://nagexiucai.com/topic.php?title=ml-in-ten-pictures&md=ml-in-ten-pictures".|
|@request-headers|content-type:text/json;charset=utf-8 & user-agent:mozilla/5.0 gecko/20100101 firefox/38.0||
|@request-body|{"author":"bob-nx"}||
|@response-headers|content-type:text/html;charset=utf-8 & server:werkzeug/0.14.1 python/2.7.5||
|@response-body|"hi"||
|@norm|`==[caseignored]` or `~=[regular expression]` or ##|"==" for "same", "~=" for "similar", "##" for "silent"; if `"[extra]"` supplied, "extra" works.|
## Examples
The core is use `APIDocTest('router', ['methods'], name='name', description='description' version="version")` as handlers' first decorator.
```
from flask import blueprints
from apidoctest import APIDocTest
bapidoctest = blueprints.Blueprint("apidoctest", __name__)
@bapidoctest.route("/fuck", methods=["GET", "POST"])
@APIDocTest("/fuck", ["GET", "POST"], name="fuck", description="uncourteous.")
def Fuck():
'''
@path-parameters::
@request-parameters::
@request-headers:: Content-Type:application/json
@request-body:: {"who":"unknown"}
@response-headers:: Content-Type:text/html;charset=utf-8
@response-body:: "fuck"
@norm:: ==
'''
return "fuck"
import sys
reload(sys)
sys.setdefaultencoding("utf8")
from flask import Flask, request, render_template_string, json
from apidoctest import APIDocTestTemplateString
serv = Flask("APIDocTestDemo")
@serv.route("/apidoctest")
def documents():
return render_template_string(APIDocTestTemplateString, data=json.dumps(APIDocTest.apidocs, encoding="utf-8"))
serv.register_blueprint(bapidoctest, url_prefix="/what")
serv.run(debug=False, host="localhost", port=9527)
```
## Result
Use browser(Google Chrome recommended), access `http://localhost:9527/apidoctest`(in examples above).

- 2xx: <font color='black'>black</font>
- 3xx: <font color='orange'>orange</font>
- 4xx: <font color='blue'>blue</font>
- 5xx: <font color='red'>red</font>
# ToDo
- I18n(Chinese friendly now)
- Parameters validation
- Input/Output grouped by test cases
- Input/Output type adaption
| APIDocTest | /APIDocTest-0.0.3.tar.gz/APIDocTest-0.0.3/README.md | README.md |
from __future__ import annotations
import typing as t
from apispec import APISpec
from .exceptions import _bad_schema_message
from .schemas import EmptySchema
from .schemas import FileSchema
from .security import HTTPBasicAuth
from .security import HTTPTokenAuth
from .types import HTTPAuthType
from .types import OpenAPISchemaType
from .types import SchemaType
if t.TYPE_CHECKING: # pragma: no cover
from .blueprint import APIBlueprint
default_bypassed_endpoints: t.List[str] = [
'static',
'openapi.spec',
'openapi.docs',
'openapi.redoc',
'openapi.swagger_ui_oauth_redirect',
'_debug_toolbar.static', # Flask-DebugToolbar
]
default_response = {
'schema': {},
'status_code': 200,
'description': None,
'example': None,
'examples': None,
'links': None,
'content_type': 'application/json',
}
def get_tag(
blueprint: APIBlueprint,
blueprint_name: str
) -> t.Dict[str, t.Any]:
"""Get tag from blueprint object."""
tag: t.Dict[str, t.Any]
if blueprint.tag is not None:
if isinstance(blueprint.tag, dict):
tag = blueprint.tag
else:
tag = {'name': blueprint.tag}
else:
tag = {'name': blueprint_name.title()}
return tag
def get_operation_tags(
blueprint: APIBlueprint,
blueprint_name: str
) -> t.List[str]:
"""Get operation tag from blueprint object."""
tags: t.List[str]
if blueprint.tag is not None:
if isinstance(blueprint.tag, dict):
tags = [blueprint.tag['name']]
else:
tags = [blueprint.tag]
else:
tags = [blueprint_name.title()]
return tags
def get_auth_name(
auth: HTTPAuthType,
auth_names: t.List[str]
) -> str:
"""Get auth name from auth object."""
name: str = ''
if hasattr(auth, 'security_scheme_name'):
name = auth.security_scheme_name # type: ignore
if not name:
if isinstance(auth, HTTPBasicAuth):
name = 'BasicAuth'
elif isinstance(auth, HTTPTokenAuth):
if auth.scheme.lower() == 'bearer' and auth.header is None:
name = 'BearerAuth'
else:
name = 'ApiKeyAuth'
else:
raise TypeError('Unknown authentication scheme.')
if name in auth_names:
v = 2
new_name = f'{name}_{v}'
while new_name in auth_names:
v += 1
new_name = f'{name}_{v}'
name = new_name
return name
def get_security_scheme(auth: HTTPAuthType) -> t.Dict[str, t.Any]:
"""Get security scheme from auth object."""
security_scheme: t.Dict[str, t.Any]
if isinstance(auth, HTTPTokenAuth):
if auth.scheme.lower() == 'bearer' and auth.header is None:
security_scheme = {
'type': 'http',
'scheme': 'bearer',
}
else:
security_scheme = {
'type': 'apiKey',
'name': auth.header,
'in': 'header',
}
else:
security_scheme = {
'type': 'http',
'scheme': 'basic',
}
return security_scheme
def get_security_and_security_schemes(
auth_names: t.List[str],
auth_schemes: t.List[HTTPAuthType]
) -> t.Tuple[t.Dict[HTTPAuthType, str], t.Dict[str, t.Dict[str, str]]]:
"""Make security and security schemes from given auth names and schemes."""
security: t.Dict[HTTPAuthType, str] = {}
security_schemes: t.Dict[str, t.Dict[str, str]] = {}
for name, auth in zip(auth_names, auth_schemes): # noqa: B905
security[auth] = name
security_schemes[name] = get_security_scheme(auth)
if hasattr(auth, 'description') and auth.description is not None:
security_schemes[name]['description'] = auth.description
return security, security_schemes
def get_path_summary(func: t.Callable, fallback: t.Optional[str] = None) -> str:
"""Get path summary from the name or docstring of the view function."""
summary: str
docs: list = (func.__doc__ or '').strip().split('\n')
if docs[0]:
# Use the first line of docstring
summary = docs[0]
else:
# Use the function name
summary = fallback or ' '.join(func.__name__.split('_')).title()
return summary
def get_path_description(func: t.Callable) -> str:
"""Get path description from the docstring of the view function."""
docs = (func.__doc__ or '').strip().split('\n')
if len(docs) > 1:
# use the remain lines of docstring as description
return '\n'.join(docs[1:]).strip()
return ''
def add_response(
operation: dict,
status_code: str,
schema: t.Union[SchemaType, dict],
description: str,
example: t.Optional[t.Any] = None,
examples: t.Optional[t.Dict[str, t.Any]] = None,
links: t.Optional[t.Dict[str, t.Any]] = None,
content_type: t.Optional[str] = 'application/json',
) -> None:
"""Add response to operation.
*Version changed: 1.3.0*
- Add parameter `content_type`.
*Version changed: 0.10.0*
- Add `links` parameter.
"""
operation['responses'][status_code] = {}
if status_code != '204':
if isinstance(schema, FileSchema):
schema = {'type': schema.type, 'format': schema.format}
elif isinstance(schema, EmptySchema):
schema = {}
operation['responses'][status_code]['content'] = {
content_type: {
'schema': schema
}
}
operation['responses'][status_code]['description'] = description
if example is not None:
operation['responses'][status_code]['content'][
content_type]['example'] = example
if examples is not None:
operation['responses'][status_code]['content'][
content_type]['examples'] = examples
if links is not None:
operation['responses'][status_code]['links'] = links
def add_response_with_schema(
spec: APISpec,
operation: dict,
status_code: str,
schema: OpenAPISchemaType,
schema_name: str,
description: str
) -> None:
"""Add response with given schema to operation."""
if isinstance(schema, type):
schema = schema()
add_response(operation, status_code, schema, description)
elif isinstance(schema, dict):
if schema_name not in spec.components.schemas:
spec.components.schema(schema_name, schema)
schema_ref = {'$ref': f'#/components/schemas/{schema_name}'}
add_response(operation, status_code, schema_ref, description)
else:
raise TypeError(_bad_schema_message)
def get_argument(argument_type: str, argument_name: str) -> t.Dict[str, t.Any]:
"""Make argument from given type and name."""
argument: t.Dict[str, t.Any] = {
'in': 'path',
'name': argument_name,
}
if argument_type == 'int:':
argument['schema'] = {'type': 'integer'}
elif argument_type == 'float:':
argument['schema'] = {'type': 'number'}
else:
argument['schema'] = {'type': 'string'}
return argument | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/openapi.py | openapi.py |
import typing as t
from flask import current_app
from flask import g
from flask_httpauth import HTTPBasicAuth as BaseHTTPBasicAuth
from flask_httpauth import HTTPTokenAuth as BaseHTTPTokenAuth
from .exceptions import HTTPError
from .types import ErrorCallbackType
from .types import ResponseReturnValueType
class _AuthBase:
"""Base class for `HTTPBasicAuth` and `HTTPBasicAuth`."""
def __init__(
self,
description: t.Optional[str] = None,
security_scheme_name: t.Optional[str] = None,
) -> None:
self.description = description
self.security_scheme_name = security_scheme_name
self.error_handler(self._auth_error_handler) # type: ignore
@property
def current_user(self) -> t.Union[None, t.Any]:
return g.get('flask_httpauth_user', None)
@staticmethod
def _auth_error_handler(
status_code: int
) -> ResponseReturnValueType:
"""The default error handler for Flask-HTTPAuth.
This handler will return JSON response when setting `APIFlask(json_errors=True)` (default).
*Version changed: 0.9.0*
- The default reason phrase is used for auth errors.
- It will call the `app.error_callback` for auth errors.
"""
error = HTTPError(status_code)
if current_app.json_errors: # type: ignore
return current_app.error_callback(error) # type: ignore
return error.message, status_code # type: ignore
def error_processor(
self,
f: ErrorCallbackType
) -> None:
"""A decorator to register an error callback function for auth errors (401/403).
The error callback function will be called when authentication errors happened.
It should accept an `HTTPError` instance and return a valid response. APIFlask will pass
the callback function you decorated to Flask-HTTPAuth's `error_handler` method internally.
Example:
```python
from apiflask import APIFlask, HTTPTokenAuth
app = APIFlask(__name__)
auth = HTTPTokenAuth()
@auth.error_processor
def my_auth_error_processor(error):
return {
'status_code': error.status_code,
'message': error.message
}, error.status_code
```
See more details of the error object in
[APIFlask.error_processor][apiflask.APIFlask.error_processor].
*Version added: 0.9.0*
"""
self.error_handler(lambda status_code: f(HTTPError(status_code))) # type: ignore
class HTTPBasicAuth(_AuthBase, BaseHTTPBasicAuth):
"""Flask-HTTPAuth's HTTPBasicAuth with some modifications.
- Add an authentication error handler that returns JSON response.
- Expose the `auth.current_user` as a property.
- Add a `description` attribute for OpenAPI Spec.
Examples:
```python
from apiflask import APIFlask, HTTPBasicAuth
app = APIFlask(__name__)
auth = HTTPBasicAuth()
```
*Version changed: 1.3.0*
- Add `security_scheme_name` parameter.
"""
def __init__(
self,
scheme: str = 'Basic',
realm: t.Optional[str] = None,
description: t.Optional[str] = None,
security_scheme_name: t.Optional[str] = None,
) -> None:
"""Initialize an `HTTPBasicAuth` object.
Arguments:
scheme: The authentication scheme used in the `WWW-Authenticate`
header. Defaults to `'Basic'`.
realm: The realm used in the `WWW-Authenticate` header to indicate
a scope of protection, defaults to use `'Authentication Required'`.
description: The description of the OpenAPI security scheme.
security_scheme_name: The name of the OpenAPI security scheme, default to `BasicAuth`.
"""
BaseHTTPBasicAuth.__init__(self, scheme=scheme, realm=realm)
super().__init__(description=description, security_scheme_name=security_scheme_name)
class HTTPTokenAuth(_AuthBase, BaseHTTPTokenAuth):
"""Flask-HTTPAuth's HTTPTokenAuth with some modifications.
- Add an authentication error handler that returns JSON response.
- Expose the `auth.current_user` as a property.
- Add a `description` attribute for OpenAPI Spec.
Examples:
```python
from apiflask import APIFlask, HTTPTokenAuth
app = APIFlask(__name__)
auth = HTTPTokenAuth()
```
"""
def __init__(
self,
scheme: str = 'Bearer',
realm: t.Optional[str] = None,
header: t.Optional[str] = None,
description: t.Optional[str] = None,
security_scheme_name: t.Optional[str] = None,
) -> None:
"""Initialize a `HTTPTokenAuth` object.
Arguments:
scheme: The authentication scheme used in the `WWW-Authenticate`
header. One of `'Bearer'` and `'ApiKey'`, defaults to `'Bearer'`.
realm: The realm used in the `WWW-Authenticate` header to indicate
a scope of protection, defaults to use `'Authentication Required'`.
header: The custom header where to obtain the token (instead
of from `Authorization` header). If a custom header is used,
the scheme should not be included. Example:
```
X-API-Key: this-is-my-token
```
description: The description of the OpenAPI security scheme.
security_scheme_name: The name of the OpenAPI security scheme,
defaults to `BearerAuth` or `ApiKeyAuth`.
*Version changed: 1.3.0*
- Add `security_scheme_name` parameter.
"""
BaseHTTPTokenAuth.__init__(self, scheme=scheme, realm=realm, header=header)
super().__init__(description=description, security_scheme_name=security_scheme_name) | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/security.py | security.py |
import sys
import typing as t
if sys.version_info >= (3, 8):
from typing import Protocol
else: # pragma: no cover
from typing_extensions import Protocol
if t.TYPE_CHECKING: # pragma: no cover
from flask.wrappers import Response # noqa: F401
from werkzeug.datastructures import Headers # noqa: F401
from _typeshed.wsgi import WSGIApplication # noqa: F401
from .fields import Field # noqa: F401
from .schemas import Schema # noqa: F401
from .security import HTTPBasicAuth # noqa: F401
from .security import HTTPTokenAuth # noqa: F401
from .exceptions import HTTPError # noqa: F401
from .views import View # noqa: F401
DecoratedType = t.TypeVar('DecoratedType', bound=t.Callable[..., t.Any])
RequestType = t.TypeVar('RequestType')
ResponseBodyType = t.Union[
str,
bytes,
t.List[t.Any],
# Only dict is actually accepted, but Mapping allows for TypedDict.
t.Mapping[str, t.Any],
t.Iterator[str],
t.Iterator[bytes],
'Response',
]
ResponseStatusType = t.Union[str, int]
_HeaderName = str
_HeaderValue = t.Union[str, t.List[str], t.Tuple[str, ...]]
ResponseHeaderType = t.Union[
t.Dict[_HeaderName, _HeaderValue],
t.Mapping[_HeaderName, _HeaderValue],
t.Sequence[t.Tuple[_HeaderName, _HeaderValue]],
'Headers'
]
ResponseReturnValueType = t.Union[
ResponseBodyType,
t.Tuple[ResponseBodyType, ResponseHeaderType],
t.Tuple[ResponseBodyType, ResponseStatusType],
t.Tuple[ResponseBodyType, ResponseStatusType, ResponseHeaderType],
'WSGIApplication',
]
SpecCallbackType = t.Callable[[t.Union[dict, str]], t.Union[dict, str]]
ErrorCallbackType = t.Callable[['HTTPError'], ResponseReturnValueType]
DictSchemaType = t.Dict[str, t.Union['Field', type]]
SchemaType = t.Union['Schema', t.Type['Schema'], DictSchemaType]
OpenAPISchemaType = t.Union['Schema', t.Type['Schema'], dict]
HTTPAuthType = t.Union['HTTPBasicAuth', 'HTTPTokenAuth']
TagsType = t.Union[t.List[str], t.List[t.Dict[str, t.Any]]]
ViewClassType = t.Type['View']
ViewFuncOrClassType = t.Union[t.Callable, ViewClassType]
ResponseObjectType = t.Dict[str, t.Union[str, t.Dict[str, t.Dict[str, t.Any]]]]
ResponsesObjectType = t.Dict[t.Union[int, str], ResponseObjectType]
ResponsesType = t.Union[t.List[int], t.Dict[int, str], ResponsesObjectType]
RouteCallableType = t.Union[
t.Callable[..., ResponseReturnValueType],
t.Callable[..., t.Awaitable[ResponseReturnValueType]],
]
class PaginationType(Protocol):
page: int
per_page: int
pages: int
total: int
next_num: int
has_next: bool
prev_num: int
has_prev: bool
class ViewFuncType(Protocol):
_spec: t.Any
_method_spec: t.Any | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/types.py | types.py |
import typing as t
from flask import Blueprint
from .helpers import _sentinel
from .route import route_patch
from .scaffold import APIScaffold
@route_patch
class APIBlueprint(APIScaffold, Blueprint):
"""Flask's `Blueprint` object with some web API support.
Examples:
```python
from apiflask import APIBlueprint
bp = APIBlueprint('foo', __name__)
```
*Version changed: 0.5.0*
- Add `enable_openapi` parameter.
*Version added: 0.2.0*
"""
def __init__(
self,
name: str,
import_name: str,
tag: t.Optional[t.Union[str, dict]] = None,
enable_openapi: bool = True,
static_folder: t.Optional[str] = None,
static_url_path: t.Optional[str] = None,
template_folder: t.Optional[str] = None,
url_prefix: t.Optional[str] = None,
subdomain: t.Optional[str] = None,
url_defaults: t.Optional[dict] = None,
root_path: t.Optional[str] = None,
cli_group: t.Union[t.Optional[str]] = _sentinel # type: ignore
) -> None:
"""Make a blueprint instance.
Arguments:
name: The name of the blueprint. Will be prepended to
each endpoint name.
import_name: The name of the blueprint package, usually
`__name__`. This helps locate the `root_path` for the
blueprint.
tag: The tag of this blueprint. If not set, the
`<blueprint name>.title()` will be used (`'foo'` -> `'Foo'`).
Accepts a tag name string or an OpenAPI tag dict.
Example:
```python
bp = APIBlueprint('foo', __name__, tag='Foo')
```
```python
bp = APIBlueprint('foo', __name__, tag={'name': 'Foo'})
```
enable_openapi: If `False`, will disable OpenAPI support for the
current blueprint.
Other keyword arguments are directly passed to `flask.Blueprint`.
"""
super().__init__(
name,
import_name,
static_folder=static_folder,
static_url_path=static_url_path,
template_folder=template_folder,
url_prefix=url_prefix,
subdomain=subdomain,
url_defaults=url_defaults,
root_path=root_path,
cli_group=cli_group,
)
self.tag = tag
self.enable_openapi = enable_openapi | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/blueprint.py | blueprint.py |
import typing as t
from collections.abc import Mapping as ABCMapping
from functools import wraps
from flask import current_app
from flask import jsonify
from flask import Response
from marshmallow import ValidationError as MarshmallowValidationError
from webargs.flaskparser import FlaskParser as BaseFlaskParser
from webargs.multidictproxy import MultiDictProxy
from .exceptions import _ValidationError
from .helpers import _sentinel
from .schemas import EmptySchema
from .schemas import Schema
from .types import DecoratedType
from .types import DictSchemaType
from .types import HTTPAuthType
from .types import OpenAPISchemaType
from .types import RequestType
from .types import ResponseReturnValueType
from .types import ResponsesType
from .types import SchemaType
from .views import MethodView
BODY_LOCATIONS = ['json', 'files', 'form', 'form_and_files', 'json_or_form']
class FlaskParser(BaseFlaskParser):
"""Overwrite the default `webargs.FlaskParser.handle_error`.
Update the default status code and the error description from related
configuration variables.
"""
USE_ARGS_POSITIONAL = False
def handle_error( # type: ignore
self,
error: MarshmallowValidationError,
req: RequestType,
schema: Schema,
*,
error_status_code: int,
error_headers: t.Mapping[str, str]
) -> None:
raise _ValidationError(
error_status_code or current_app.config['VALIDATION_ERROR_STATUS_CODE'],
current_app.config['VALIDATION_ERROR_DESCRIPTION'],
error.messages,
error_headers
)
parser: FlaskParser = FlaskParser()
use_args: t.Callable = parser.use_args
def _get_files_and_form(request, schema):
form_and_files_data = request.files.copy()
form_and_files_data.update(request.form)
return MultiDictProxy(form_and_files_data, schema)
@parser.location_loader('form_and_files')
def load_form_and_files(request, schema):
return _get_files_and_form(request, schema)
@parser.location_loader('files')
def load_files(request, schema):
return _get_files_and_form(request, schema)
def _annotate(f: t.Any, **kwargs: t.Any) -> None:
if not hasattr(f, '_spec'):
f._spec = {}
for key, value in kwargs.items():
f._spec[key] = value
def _ensure_sync(f):
if hasattr(f, '_sync_ensured'):
return f
@wraps(f)
def wrapper(*args, **kwargs):
return current_app.ensure_sync(f)(*args, **kwargs)
wrapper._sync_ensured = True
return wrapper
def _generate_schema_from_mapping(
schema: DictSchemaType,
schema_name: t.Optional[str]
) -> t.Type[Schema]:
if schema_name is None:
schema_name = 'GeneratedSchema'
return Schema.from_dict(schema, name=schema_name)() # type: ignore
class APIScaffold:
"""A base class for [`APIFlask`][apiflask.app.APIFlask] and
[`APIBlueprint`][apiflask.blueprint.APIBlueprint].
This class contains the route shortcut decorators (i.e. `get`, `post`, etc.) and
API-related decorators (i.e. `auth_required`, `input`, `output`, `doc`).
*Version added: 1.0*
"""
def _method_route(self, method: str, rule: str, options: t.Any):
if 'methods' in options:
raise RuntimeError('Use the "route" decorator to use the "methods" argument.')
def decorator(f):
if isinstance(f, type(MethodView)):
raise RuntimeError(
'The route shortcuts cannot be used with "MethodView" classes, '
'use the "route" decorator instead.'
)
return self.route(rule, methods=[method], **options)(f)
return decorator
def get(self, rule: str, **options: t.Any):
"""Shortcut for `app.route()` or `app.route(methods=['GET'])`."""
return self._method_route('GET', rule, options)
def post(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['POST'])`."""
return self._method_route('POST', rule, options)
def put(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['PUT'])`."""
return self._method_route('PUT', rule, options)
def patch(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['PATCH'])`."""
return self._method_route('PATCH', rule, options)
def delete(self, rule: str, **options: t.Any):
"""Shortcut for `app.route(methods=['DELETE'])`."""
return self._method_route('DELETE', rule, options)
def auth_required(
self,
auth: HTTPAuthType,
roles: t.Optional[list] = None,
optional: t.Optional[str] = None
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Protect a view with provided authentication settings.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
Examples:
```python
from apiflask import APIFlask, HTTPTokenAuth
app = APIFlask(__name__)
auth = HTTPTokenAuth()
@app.get('/')
@app.auth_required(auth)
def hello():
return 'Hello'!
```
Arguments:
auth: The `auth` object, an instance of
[`HTTPBasicAuth`][apiflask.security.HTTPBasicAuth]
or [`HTTPTokenAuth`][apiflask.security.HTTPTokenAuth].
roles: The selected roles to allow to visit this view, accepts a list of role names.
See [Flask-HTTPAuth's documentation][_role]{target:_blank} for more details.
[_role]: https://flask-httpauth.readthedocs.io/en/latest/#user-roles
optional: Set to `True` to allow the view to execute even the authentication
information is not included with the request, in which case the attribute
`auth.current_user` will be `None`.
*Version changed: 2.0.0*
- Remove the deprecated `role` parameter.
*Version changed: 1.0.0*
- The `role` parameter is deprecated.
*Version changed: 0.12.0*
- Move to `APIFlask` and `APIBlueprint` classes.
*Version changed: 0.4.0*
- Add parameter `roles`.
"""
def decorator(f):
f = _ensure_sync(f)
_annotate(f, auth=auth, roles=roles or [])
return auth.login_required(role=roles, optional=optional)(f)
return decorator
def input(
self,
schema: SchemaType,
location: str = 'json',
arg_name: t.Optional[str] = None,
schema_name: t.Optional[str] = None,
example: t.Optional[t.Any] = None,
examples: t.Optional[t.Dict[str, t.Any]] = None,
**kwargs: t.Any
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Add input settings for view functions.
If the validation passed, the data will be injected into the view
function as a keyword argument in the form of `dict` and named `{location}_data`.
Otherwise, an error response with the detail of the validation result will be
returned.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
Examples:
```python
from apiflask import APIFlask
app = APIFlask(__name__)
@app.get('/')
@app.input(PetIn, location='json')
def hello(json_data):
print(json_data)
return 'Hello'!
```
Arguments:
schema: The marshmallow schema of the input data.
location: The location of the input data, one of `'json'` (default),
`'files'`, `'form'`, `'cookies'`, `'headers'`, `'query'`
(same as `'querystring'`).
arg_name: The name of the argument passed to the view function,
defaults to `{location}_data`.
schema_name: The schema name for dict schema, only needed when you pass
a schema dict (e.g., `{'name': String(required=True)}`) for `json`
location.
example: The example data in dict for request body, you should use either
`example` or `examples`, not both.
examples: Multiple examples for request body, you should pass a dict
that contains multiple examples. Example:
```python
{
'example foo': { # example name
'summary': 'an example of foo', # summary field is optional
'value': {'name': 'foo', 'id': 1} # example value
},
'example bar': {
'summary': 'an example of bar',
'value': {'name': 'bar', 'id': 2}
},
}
```
*Version changed: 2.0.0*
- Always pass parsed data to view function as a keyword argument.
The argument name will be in the form of `{location}_data`.
*Version changed: 1.0*
- Ensure only one input body location was used.
- Add `form_and_files` and `json_or_form` (from webargs) location.
- Rewrite `files` to act as `form_and_files`.
- Use correct request content type for `form` and `files`.
*Version changed: 0.12.0*
- Move to APIFlask and APIBlueprint classes.
*Version changed: 0.4.0*
- Add parameter `examples`.
"""
if isinstance(schema, ABCMapping):
schema = _generate_schema_from_mapping(schema, schema_name)
if isinstance(schema, type): # pragma: no cover
schema = schema()
def decorator(f):
f = _ensure_sync(f)
is_body_location = location in BODY_LOCATIONS
if is_body_location and hasattr(f, '_spec') and 'body' in f._spec:
raise RuntimeError(
'When using the app.input() decorator, you can only declare one request '
'body location (one of "json", "form", "files", "form_and_files", '
'and "json_or_form").'
)
if location == 'json':
_annotate(f, body=schema, body_example=example, body_examples=examples)
elif location == 'form':
_annotate(
f,
body=schema,
body_example=example,
body_examples=examples,
content_type='application/x-www-form-urlencoded'
)
elif location in ['files', 'form_and_files']:
_annotate(
f,
body=schema,
body_example=example,
body_examples=examples,
content_type='multipart/form-data'
)
else:
if not hasattr(f, '_spec') or f._spec.get('args') is None:
_annotate(f, args=[])
if location == 'path':
_annotate(f, omit_default_path_parameters=True)
# TODO: Support set example for request parameters
f._spec['args'].append((schema, location))
return use_args(
schema,
location=location,
arg_name=arg_name or f'{location}_data',
**kwargs
)(f)
return decorator
def output(
self,
schema: SchemaType,
status_code: int = 200,
description: t.Optional[str] = None,
schema_name: t.Optional[str] = None,
example: t.Optional[t.Any] = None,
examples: t.Optional[t.Dict[str, t.Any]] = None,
links: t.Optional[t.Dict[str, t.Any]] = None,
content_type: t.Optional[str] = 'application/json',
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Add output settings for view functions.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
The decorator will format the return value of your view function with
provided marshmallow schema. You can return a dict or an object (such
as a model class instance of ORMs). APIFlask will handle the formatting
and turn your return value into a JSON response.
P.S. The output data will not be validated; it's a design choice of marshmallow.
marshmallow 4.0 may be support the output validation.
Examples:
```python
from apiflask import APIFlask
app = APIFlask(__name__)
@app.get('/')
@app.output(PetOut)
def hello():
return the_dict_or_object_match_petout_schema
```
Arguments:
schema: The schemas of the output data.
status_code: The status code of the response, defaults to `200`.
description: The description of the response.
schema_name: The schema name for dict schema, only needed when you pass
a schema dict (e.g., `{'name': String()}`).
example: The example data in dict for response body, you should use either
`example` or `examples`, not both.
examples: Multiple examples for response body, you should pass a dict
that contains multiple examples. Example:
```python
{
'example foo': { # example name
'summary': 'an example of foo', # summary field is optional
'value': {'name': 'foo', 'id': 1} # example value
},
'example bar': {
'summary': 'an example of bar',
'value': {'name': 'bar', 'id': 2}
},
}
```
links: The `links` of response. It accepts a dict which maps a link name to
a link object. Example:
```python
{
'getAddressByUserId': {
'operationId': 'getUserAddress',
'parameters': {
'userId': '$request.path.id'
}
}
}
```
See the [docs](https://apiflask.com/openapi/#response-links) for more details
about setting response links.
content_type: The content/media type of the response. It defautls to `application/json`.
*Version changed: 2.0.0*
- Don't change the status code to 204 for EmptySchema.
*Version changed: 1.3.0*
- Add parameter `content_type`.
*Version changed: 0.12.0*
- Move to APIFlask and APIBlueprint classes.
*Version changed: 0.10.0*
- Add `links` parameter.
*Version changed: 0.9.0*
- Add base response customization support.
*Version changed: 0.6.0*
- Support decorating async views.
*Version changed: 0.5.2*
- Return the `Response` object directly.
*Version changed: 0.4.0*
- Add parameter `examples`.
"""
if schema == {}:
schema = EmptySchema
if isinstance(schema, ABCMapping):
schema = _generate_schema_from_mapping(schema, schema_name)
if isinstance(schema, type): # pragma: no cover
schema = schema()
def decorator(f):
f = _ensure_sync(f)
_annotate(f, response={
'schema': schema,
'status_code': status_code,
'description': description,
'example': example,
'examples': examples,
'links': links,
'content_type': content_type,
})
def _jsonify(
obj: t.Any,
many: bool = _sentinel, # type: ignore
*args: t.Any,
**kwargs: t.Any
) -> Response: # pragma: no cover
"""From Flask-Marshmallow, see the NOTICE file for license information."""
if many is _sentinel:
many = schema.many # type: ignore
base_schema: OpenAPISchemaType = current_app.config['BASE_RESPONSE_SCHEMA']
if base_schema is not None and status_code != 204:
data_key: str = current_app.config['BASE_RESPONSE_DATA_KEY']
if isinstance(obj, dict):
if data_key not in obj:
raise RuntimeError(
f'The data key {data_key!r} is not found in the returned dict.'
)
obj[data_key] = schema.dump(obj[data_key], many=many) # type: ignore
else:
if not hasattr(obj, data_key):
raise RuntimeError(
f'The data key {data_key!r} is not found in the returned object.'
)
setattr(
obj,
data_key,
schema.dump(getattr(obj, data_key), many=many) # type: ignore
)
data = base_schema().dump(obj) # type: ignore
else:
data = schema.dump(obj, many=many) # type: ignore
return jsonify(data, *args, **kwargs)
@wraps(f)
def _response(*args: t.Any, **kwargs: t.Any) -> ResponseReturnValueType:
rv = f(*args, **kwargs)
if isinstance(rv, Response):
return rv
if not isinstance(rv, tuple):
return _jsonify(rv), status_code
json = _jsonify(rv[0])
if len(rv) == 2:
rv = (json, rv[1]) if isinstance(rv[1], int) else (json, status_code, rv[1])
elif len(rv) >= 3:
rv = (json, rv[1], rv[2])
else:
rv = (json, status_code)
return rv # type: ignore
return _response
return decorator
def doc(
self,
summary: t.Optional[str] = None,
description: t.Optional[str] = None,
tags: t.Optional[t.List[str]] = None,
responses: t.Optional[ResponsesType] = None,
deprecated: t.Optional[bool] = None,
hide: t.Optional[bool] = None,
operation_id: t.Optional[str] = None,
security: t.Optional[t.Union[str, t.List[t.Union[str, t.Dict[str, list]]]]] = None,
) -> t.Callable[[DecoratedType], DecoratedType]:
"""Set up the OpenAPI Spec for view functions.
> Be sure to put it under the routes decorators (i.e., `app.route`, `app.get`,
`app.post`, etc.).
Examples:
```python
from apiflask import APIFlask
app = APIFlask(__name__)
@app.get('/')
@app.doc(summary='Say hello', tags=['Foo'])
def hello():
return 'Hello'
```
Arguments:
summary: The summary of this endpoint. If not set, the name of the view function
will be used. If your view function is named with `get_pet`, then the summary
will be "Get Pet". If the view function has a docstring, then the first
line of the docstring will be used. The precedence will be:
```
@app.doc(summary='blah') > the first line of docstring > the view function name
```
description: The description of this endpoint. If not set, the lines after the empty
line of the docstring will be used.
tags: A list of tag names of this endpoint, map the tags you passed in the `app.tags`
attribute. If `app.tags` is not set, the blueprint name will be used as tag name.
responses: The other responses for this view function, accepts a list of status codes
(`[404, 418]`) or a dict in a format of either `{404: 'Not Found'}` or
`{404: {'description': 'Not Found', 'content': {'application/json':
{'schema': FooSchema}}}}`. If a dict is passed and a response with the same status
code is already present, the existing data will be overwritten.
deprecated: Flag this endpoint as deprecated in API docs.
hide: Hide this endpoint in API docs.
operation_id: The `operationId` of this endpoint. Set config `AUTO_OPERATION_ID` to
`True` to enable the auto-generating of operationId (in the format of
`{method}_{endpoint}`).
security: The `security` used for this endpoint. Match the security info specified in
the `SECURITY_SCHEMES` configuration. If you don't need specify the scopes, just
pass a security name (equals to `[{'foo': []}]`) or a list of security names (equals
to `[{'foo': []}, {'bar': []}]`).
*Version changed: 2.0.0*
- Remove the deprecated `tag` parameter.
- Expand `responses` to support additional structure and parameters.
*Version changed: 1.0*
- Add `security` parameter to support customizing security info.
- The `role` parameter is deprecated.
*Version changed: 0.12.0*
- Move to `APIFlask` and `APIBlueprint` classes.
*Version changed: 0.10.0*
- Add parameter `operation_id`.
*Version changed: 0.5.0*
- Change the default value of parameters `hide` and `deprecated` from `False` to `None`.
*Version changed: 0.4.0*
- Add parameter `tag`.
*Version changed: 0.3.0*
- Change the default value of `deprecated` from `None` to `False`.
- Rename parameter `tags` to `tag`.
*Version added: 0.2.0*
"""
def decorator(f):
f = _ensure_sync(f)
_annotate(
f,
summary=summary,
description=description,
tags=tags,
responses=responses,
deprecated=deprecated,
hide=hide,
operation_id=operation_id,
security=security,
)
return f
return decorator | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/scaffold.py | scaffold.py |
import typing as t
from .openapi import get_path_description
from .openapi import get_path_summary
from .types import ViewClassType
from .types import ViewFuncOrClassType
from .types import ViewFuncType
from .views import MethodView
def route_patch(cls):
"""A decorator to add a patched `add_url_rule` method for `APIFlask` and
`APIBlueprint` objects.
The patched `add_url_rule` method will create a view function if passed a
view class, and then generate spec from it.
*Version changed: 0.10.0*
- Remove the `route` decorator, and move the logic into `add_url_rule`.
*Version added: 0.5.0*
"""
def record_spec_for_view_class(
view_func: ViewFuncType,
view_class: ViewClassType
) -> ViewFuncType:
# when the user call add_url_rule multiple times for one view class,
# we only need to extract info from view class once since it will
# loop all the methods of the class.
if hasattr(view_func, '_method_spec'):
return view_func
view_func._method_spec = {}
if not hasattr(view_func, '_spec'):
view_func._spec = {}
if not view_class.methods: # no methods defined
return view_func
for method_name in view_class.methods: # type: ignore
# method_name: ['GET', 'POST', ...]
method = view_class.__dict__[method_name.lower()]
# collect spec info from class attribute "decorators"
if hasattr(view_func, '_spec') and view_func._spec != {}:
if not hasattr(method, '_spec'):
method._spec = view_func._spec
else:
for key, value in view_func._spec.items():
if value is not None and method._spec.get(key) is None:
method._spec[key] = value
else:
if not hasattr(method, '_spec'):
method._spec = {'no_spec': True}
if not method._spec.get('summary'):
method._spec['summary'] = get_path_summary(
method, f'{method_name.title()} {view_class.__name__}'
)
method._spec['generated_summary'] = True
if not method._spec.get('description'):
method._spec['description'] = get_path_description(method)
method._spec['generated_description'] = True
view_func._method_spec[method_name] = method._spec
return view_func
def add_url_rule(
self,
rule: str,
endpoint: t.Optional[str] = None,
view_func: t.Optional[ViewFuncOrClassType] = None,
provide_automatic_options: t.Optional[bool] = None,
**options: t.Any,
):
"""Record the spec for view classes before calling the actual `add_url_rule` method.
When calling this method directly, the `view_func` argument can be a view function or
a view function created by `ViewClass.as_view()`. It only accepts a view class when
using the route decorator on a view class.
"""
if isinstance(view_func, type):
# call as_view() for MethodView passed with @route
if endpoint is None:
endpoint = view_func.__name__
view_func = view_func.as_view(endpoint) # type: ignore
if hasattr(view_func, 'view_class'):
# view function created with MethodViewClass.as_view()
view_class = view_func.view_class # type: ignore
if not issubclass(view_class, MethodView):
# skip View-based class
view_func._spec = {'hide': True} # type: ignore
else:
# record spec for MethodView class
if hasattr(self, 'enable_openapi') and self.enable_openapi:
view_func = record_spec_for_view_class(view_func, view_class) # type: ignore
super(cls, self).add_url_rule(
rule,
endpoint,
view_func,
provide_automatic_options=provide_automatic_options,
**options
)
cls.add_url_rule = add_url_rule
return cls | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/route.py | route.py |
import inspect
import json
import re
import typing as t
import warnings
from apispec import APISpec
from apispec import BasePlugin
from apispec.ext.marshmallow import MarshmallowPlugin
from flask import Blueprint
from flask import Flask
from flask import has_request_context
from flask import jsonify
from flask import render_template_string
from flask import request
from flask.config import ConfigAttribute
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from flask_marshmallow import fields
try:
from flask_marshmallow import sqla
except ImportError:
sqla = None
from werkzeug.exceptions import HTTPException as WerkzeugHTTPException
from .exceptions import HTTPError
from .exceptions import _bad_schema_message
from .helpers import get_reason_phrase
from .route import route_patch
from .schemas import Schema
from .types import ResponseReturnValueType, ResponsesType
from .types import ViewFuncType
from .types import ErrorCallbackType
from .types import SpecCallbackType
from .types import SchemaType
from .types import HTTPAuthType
from .types import TagsType
from .types import OpenAPISchemaType
from .openapi import add_response
from .openapi import add_response_with_schema
from .openapi import default_bypassed_endpoints
from .openapi import default_response
from .openapi import get_tag
from .openapi import get_operation_tags
from .openapi import get_path_summary
from .openapi import get_auth_name
from .openapi import get_argument
from .openapi import get_security_and_security_schemes
from .ui_templates import ui_templates
from .ui_templates import swagger_ui_oauth2_redirect_template
from .scaffold import APIScaffold
@route_patch
class APIFlask(APIScaffold, Flask):
"""The `Flask` object with some web API support.
Examples:
```python
from apiflask import APIFlask
app = APIFlask(__name__)
```
Attributes:
openapi_version: The version of OpenAPI Specification (openapi.openapi).
This attribute can also be configured from the config with the
`OPENAPI_VERSION` configuration key. Defaults to `'3.0.3'`.
servers: The servers information of the API (openapi.servers), accepts
multiple server dicts. Example value:
```python
app.servers = [
{
'name': 'Production Server',
'url': 'http://api.example.com'
}
]
```
This attribute can also be configured from the config with the
`SERVERS` configuration key. Defaults to `None`.
tags: The list of tags of the OpenAPI spec documentation (openapi.tags),
accepts a list of dicts. You can also pass a simple list contains the
tag name:
```python
app.tags = ['foo', 'bar', 'baz']
```
A standard OpenAPI tags list will look like this:
```python
app.tags = [
{'name': 'foo', 'description': 'The description of foo'},
{'name': 'bar', 'description': 'The description of bar'},
{'name': 'baz', 'description': 'The description of baz'}
]
```
If not set, the blueprint names will be used as tags.
This attribute can also be configured from the config with the
`TAGS` configuration key. Defaults to `None`.
external_docs: The external documentation information of the API
(openapi.externalDocs). Example:
```python
app.external_docs = {
'description': 'Find more info here',
'url': 'http://docs.example.com'
}
```
This attribute can also be configured from the config with the
`EXTERNAL_DOCS` configuration key. Defaults to `None`.
info: The info object (openapi.info), it accepts a dict contains following info fields:
`description`, `termsOfService`, `contact`, `license`. You can use separate
configuration variables to overwrite this dict. Example:
```python
app.info = {
'description': '...',
'termsOfService': 'http://example.com',
'contact': {
'name': 'API Support',
'url': 'http://www.example.com/support',
'email': '[email protected]'
},
'license': {
'name': 'Apache 2.0',
'url': 'http://www.apache.org/licenses/LICENSE-2.0.html'
}
}
```
description: The description of the API (openapi.info.description).
This attribute can also be configured from the config with the
`DESCRIPTION` configuration key. Defaults to `None`.
contact: The contact information of the API (openapi.info.contact). Example:
```python
app.contact = {
'name': 'API Support',
'url': 'http://www.example.com/support',
'email': '[email protected]'
}
```
This attribute can also be configured from the config with the
`CONTACT` configuration key. Defaults to `None`.
license: The license of the API (openapi.info.license). Example:
```python
app.license = {
'name': 'Apache 2.0',
'url': 'http://www.apache.org/licenses/LICENSE-2.0.html'
}
```
This attribute can also be configured from the config with the
`LICENSE` configuration key. Defaults to `None`.
terms_of_service: The terms of service URL of the API
(openapi.info.termsOfService). Example:
```python
app.terms_of_service = 'http://example.com/terms/'
```
This attribute can also be configured from the config with the
`TERMS_OF_SERVICE` configuration key. Defaults to `None`.
security_schemes: The security schemes of the API
(openapi.components.securitySchemes). Example:
```python
app.security_schemes = [
'ApiKeyAuth': {
'type': 'apiKey',
'in': 'header',
'name': 'X-API-Key'
}
]
```
This attribute can also be configured from the config with the
`SECURITY_SCHEMES` configuration key. Defaults to `None`.
spec_callback: It stores the function object registerd by
[`spec_processor`][apiflask.APIFlask.spec_processor]. You can also
pass a callback function to it directly without using `spec_processor`.
Example:
```python
def update_spec(spec):
spec['title'] = 'Updated Title'
return spec
app.spec_callback = update_spec
```
error_callback: It stores the function object registerd by
[`error_processor`][apiflask.APIFlask.error_processor]. You can also
pass a callback function to it directly without using `error_processor`.
See the docstring of `error_processor` for more details.
Example:
```python
def my_error_handler(error):
return {
'status_code': error.status_code,
'message': error.message,
'detail': error.detail
}, error.status_code, error.headers
app.error_processor = my_error_handler
```
schema_name_resolver: It stores the function that used to decided the schema name.
The schema name resolver should accept the schema object as argument and return
the name.
Example:
```python
# this is the default schema name resolver used in APIFlask
def schema_name_resolver(schema):
name = schema.__class__.__name__ # get schema class name
if name.endswith('Schema'): # remove the "Schema" suffix
name = name[:-6] or name
if schema.partial: # add a "Update" suffix for partial schema
name += 'Update'
return name
app.schema_name_resolver = schema_name_resolver
```
*Version changed: 1.0*
- Add instance attribute `security_schemes` as an alias of config `SECURITY_SCHEMES`.
*Version changed: 0.9.0*
- Add instance attribute `schema_name_resolver`.
"""
openapi_version: str = ConfigAttribute('OPENAPI_VERSION') # type: ignore
tags: t.Optional[TagsType] = ConfigAttribute('TAGS') # type: ignore
servers: t.Optional[t.List[t.Dict[str, str]]] = ConfigAttribute('SERVERS') # type: ignore
info: t.Optional[t.Dict[str, t.Union[str, dict]]] = ConfigAttribute('INFO') # type: ignore
description: t.Optional[str] = ConfigAttribute('DESCRIPTION') # type: ignore
contact: t.Optional[t.Dict[str, str]] = ConfigAttribute('CONTACT') # type: ignore
license: t.Optional[t.Dict[str, str]] = ConfigAttribute('LICENSE') # type: ignore
external_docs: t.Optional[t.Dict[str, str]] = ConfigAttribute('EXTERNAL_DOCS') # type: ignore
terms_of_service: t.Optional[str] = ConfigAttribute('TERMS_OF_SERVICE') # type: ignore
security_schemes: t.Optional[t.Dict[str, t.Any]] = \
ConfigAttribute('SECURITY_SCHEMES') # type: ignore
def __init__(
self,
import_name: str,
title: str = 'APIFlask',
version: str = '0.1.0',
spec_path: t.Optional[str] = '/openapi.json',
docs_path: t.Optional[str] = '/docs',
docs_oauth2_redirect_path: t.Optional[str] = '/docs/oauth2-redirect',
docs_ui: str = 'swagger-ui',
openapi_blueprint_url_prefix: t.Optional[str] = None,
json_errors: bool = True,
enable_openapi: bool = True,
spec_plugins: t.Optional[t.List[BasePlugin]] = None,
static_url_path: t.Optional[str] = None,
static_folder: str = 'static',
static_host: t.Optional[str] = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: str = 'templates',
instance_path: t.Optional[str] = None,
instance_relative_config: bool = False,
root_path: t.Optional[str] = None
) -> None:
"""Make an app instance.
Arguments:
import_name: The name of the application package, usually
`__name__`. This helps locate the `root_path` for the
application.
title: The title of the API (openapi.info.title), defaults to "APIFlask".
You can change it to the name of your API (e.g., "Pet API").
version: The version of the API (openapi.info.version), defaults to "0.1.0".
spec_path: The path to OpenAPI Spec documentation. It
defaults to `/openapi.json`, if the path ends with `.yaml`
or `.yml`, the YAML format of the OAS will be returned.
docs_path: The path to API UI documentation, defaults to `/docs`.
docs_ui: The UI of API documentation, one of `swagger-ui` (default), `redoc`,
`elements`, `rapidoc`, and `rapipdf`.
docs_oauth2_redirect_path: The path to Swagger UI OAuth redirect.
openapi_blueprint_url_prefix: The url prefix of the OpenAPI blueprint. This
prefix will append before all the OpenAPI-related paths (`sepc_path`,
`docs_path`, etc.), defaults to `None`.
json_errors: If `True`, APIFlask will return a JSON response for HTTP errors.
enable_openapi: If `False`, will disable OpenAPI spec and API docs views.
spec_plugins: List of apispec-compatible plugins (subclasses of `apispec.BasePlugin`),
defaults to `None`. The `MarshmallowPlugin` for apispec is already included
by default, so it doesn't need to be provided here.
Other keyword arguments are directly passed to `flask.Flask`.
*Version changed: 2.0.0*
- Remove the deprecated `redoc_path` parameter.
*Version changed: 1.2.0*
- Add `spec_plugins` parameter.
*Version changed: 1.1.0*
- Add `docs_ui` parameter.
*Version changed: 0.7.0*
- Add `openapi_blueprint_url_prefix` parameter.
"""
super().__init__(
import_name,
static_url_path=static_url_path,
static_folder=static_folder,
static_host=static_host,
host_matching=host_matching,
subdomain_matching=subdomain_matching,
template_folder=template_folder,
instance_path=instance_path,
instance_relative_config=instance_relative_config,
root_path=root_path
)
# Set default config
self.config.from_object('apiflask.settings')
self.title = title
self.version = version
self.spec_path = spec_path
self.docs_ui = docs_ui
self.docs_path = docs_path
self.docs_oauth2_redirect_path = docs_oauth2_redirect_path
self.openapi_blueprint_url_prefix = openapi_blueprint_url_prefix
self.enable_openapi = enable_openapi
self.json_errors = json_errors
self.spec_callback: t.Optional[SpecCallbackType] = None
self.error_callback: ErrorCallbackType = self._error_handler
self.schema_name_resolver = self._schema_name_resolver
self.spec_plugins: t.List[BasePlugin] = spec_plugins or []
self._spec: t.Optional[t.Union[dict, str]] = None
self._auth_blueprints: t.Dict[str, t.Dict[str, t.Any]] = {}
self._register_openapi_blueprint()
self._register_error_handlers()
def _register_error_handlers(self) -> None:
"""Register default error handlers for HTTPError and WerkzeugHTTPException.
*Version changed: 0.9.0*
- Always pass an `HTTPError` instance to error handlers.
"""
@self.errorhandler(HTTPError) # type: ignore
def handle_http_errors(
error: HTTPError
) -> ResponseReturnValueType:
return self.error_callback(error)
if self.json_errors:
self._apply_error_callback_to_werkzeug_errors()
def _apply_error_callback_to_werkzeug_errors(self) -> None:
@self.errorhandler(WerkzeugHTTPException) # type: ignore
def handle_werkzeug_errors(
e: WerkzeugHTTPException
) -> ResponseReturnValueType:
headers = dict(e.get_headers())
# remove the original MIME header
del headers['Content-Type']
error = HTTPError(
e.code,
message=e.name,
headers=headers
)
return self.error_callback(error)
@staticmethod
def _error_handler(
error: HTTPError
) -> ResponseReturnValueType:
"""The default error handler.
Arguments:
error: An instance of [`HTTPError`][apiflask.exceptions.HTTPError].
*Version changed: 0.10.0*
- Remove the `status_code` field from the response.
- Add `HTTPError.extra_data` to the reponse body.
"""
body = {
'detail': error.detail,
'message': error.message,
**error.extra_data
}
return body, error.status_code, error.headers
def error_processor(
self,
f: ErrorCallbackType
) -> ErrorCallbackType:
"""A decorator to register a custom error response processor function.
The decorated callback function will be called in the following situations:
- Any HTTP exception is raised by Flask when handling request.
- A validation error happened when parsing a request.
- An exception triggered with [`HTTPError`][apiflask.exceptions.HTTPError]
- An exception triggered with [`abort`][apiflask.exceptions.abort].
You can still register a specific error handler for a specific error code
or exception with the `app.errorhandler(code_or_exection)` decorator,
in that case, the return value of the specific error handler will be used as the
response when the corresponding error or exception happened.
The callback function must accept an error object as argument and return a valid
response.
Examples:
```python
@app.error_processor
def my_error_processor(error):
return {
'status_code': error.status_code,
'message': error.message,
'detail': error.detail,
**error.extra_data
}, error.status_code, error.headers
```
The error object is an instance of [`HTTPError`][apiflask.exceptions.HTTPError],
so you can get error information via it's attributes:
- status_code: If the error is triggered by a validation error, the value will be
422 (default) or the value you passed in config `VALIDATION_ERROR_STATUS_CODE`.
If the error is triggered by [`HTTPError`][apiflask.exceptions.HTTPError]
or [`abort`][apiflask.exceptions.abort], it will be the status code
you passed. Otherwise, it will be the status code set by Werkzueg when
processing the request.
- message: The error description for this error, either you passed or grabbed from
Werkzeug.
- detail: The detail of the error. When the validation error happens, it will
be filled automatically in the following structure:
```python
"<location>": {
"<field_name>": ["<error_message>", ...],
"<field_name>": ["<error_message>", ...],
...
},
"<location>": {
...
},
...
```
The value of `location` can be `json` (i.e., request body) or `query`
(i.e., query string) depending on the place where the validation error
happened.
- headers: The value will be `{}` unless you pass it in `HTTPError` or `abort`.
- extra_data: Additional error information.
If you want, you can rewrite the whole response body to anything you like:
```python
@app.error_processor
def my_error_processor(error):
body = {'error_detail': error.detail, **error.extra_data}
return body, error.status_code, error.headers
```
However, I would recommend keeping the `detail` in the response since it contains
the detailed information about the validation error when the validation error
happened.
*Version changed: 1.0*
- Apply this error processor to normal HTTP errors even when
`json_error` is set to `False` when creating `APIFlask` instance.
*Version changed: 0.7.0*
- Support registering an async callback function.
"""
self.error_callback = self.ensure_sync(f)
self._apply_error_callback_to_werkzeug_errors()
return f
def _register_openapi_blueprint(self) -> None:
"""Register a blueprint for OpenAPI support.
The name of the blueprint is "openapi". This blueprint will hold the view
functions for spec file and API docs.
*Version changed: 1.1.0*
- Deprecate the redoc view at /redoc path.
*Version changed: 0.7.0*
- The format of the spec now rely on the `SPEC_FORMAT` config.
"""
bp = Blueprint(
'openapi',
__name__,
url_prefix=self.openapi_blueprint_url_prefix
)
if self.spec_path:
@bp.route(self.spec_path)
def spec():
if self.config['SPEC_FORMAT'] == 'json':
response = jsonify(self._get_spec('json'))
response.mimetype = self.config['JSON_SPEC_MIMETYPE']
return response
return self._get_spec('yaml'), 200, \
{'Content-Type': self.config['YAML_SPEC_MIMETYPE']}
if self.docs_path:
if self.docs_ui not in ui_templates:
valid_values = list(ui_templates.keys())
raise ValueError(
f'Invalid docs_ui value, expected one of {valid_values!r}, '
f'got {self.docs_ui!r}.'
)
@bp.route(self.docs_path)
def docs():
return render_template_string(
ui_templates[self.docs_ui],
title=self.title,
version=self.version,
oauth2_redirect_path=self.docs_oauth2_redirect_path
)
if self.docs_ui == 'swagger-ui':
if self.docs_oauth2_redirect_path:
@bp.route(self.docs_oauth2_redirect_path)
def swagger_ui_oauth_redirect() -> str:
return render_template_string(swagger_ui_oauth2_redirect_template)
if self.enable_openapi and (
self.spec_path or self.docs_path
):
self.register_blueprint(bp)
def _get_spec(
self,
spec_format: t.Optional[str] = None,
force_update: bool = False
) -> t.Union[dict, str]:
"""Get the current OAS document file.
This method will return the cached spec on the first call. If you want
to get the latest spec, set the `force_update` to `True` or use the
public attribute `app.spec`, which will always return the newly generated
spec when you call it.
If the config `SYNC_LOCAL_SPEC` is `True`, the local spec
specified in config `LOCAL_SPEC_PATH` will be automatically updated
when the spec changes.
Arguments:
spec_format: The format of the spec file, one of `'json'`, `'yaml'`
and `'yml'`, defaults to the `SPEC_FORMAT` config.
force_update: If ture, will generate the spec for every call instead
of using the cache.
*Version changed: 0.7.0*
- The default format now rely on the `SPEC_FORMAT` config.
- Support to sync local spec file.
*Version changed: 0.7.1*
- Rename the method name to `_get_spec`.
- Add the `force_update` parameter.
*Version changed: 1.3.0*
- Add the `SPEC_PROCESSOR_PASS_OBJECT` config to control the argument type
when calling the spec processor.
"""
if spec_format is None:
spec_format = self.config['SPEC_FORMAT']
if self._spec is None or force_update:
spec_object: APISpec = self._generate_spec()
if self.spec_callback:
if self.config['SPEC_PROCESSOR_PASS_OBJECT']:
self._spec = self.spec_callback(
spec_object # type: ignore
).to_dict()
else:
self._spec = self.spec_callback(
spec_object.to_dict()
)
else:
self._spec = spec_object.to_dict()
if spec_format in ['yml', 'yaml']:
from apispec.yaml_utils import dict_to_yaml
self._spec = dict_to_yaml(self._spec) # type: ignore
# sync local spec
if self.config['SYNC_LOCAL_SPEC']:
spec_path = self.config['LOCAL_SPEC_PATH']
if spec_path is None:
raise TypeError(
'The spec path (LOCAL_SPEC_PATH) should be a valid path string.'
)
spec: str
if spec_format == 'json':
spec = json.dumps(
self._spec, indent=self.config['LOCAL_SPEC_JSON_INDENT']
)
else:
spec = str(self._spec)
with open(spec_path, 'w') as f:
f.write(spec)
return self._spec # type: ignore
def spec_processor(self, f: SpecCallbackType) -> SpecCallbackType:
"""A decorator to register a spec handler callback function.
You can register a function to update the spec. The callback function
should accept the spec as an argument and return it in the end. The
callback function will be called when generating the spec file.
Examples:
```python
@app.spec_processor
def update_spec(spec):
spec['info']['title'] = 'Updated Title'
return spec
```
Notice the format of the spec is depends on the the value of configuration
variable `SPEC_FORMAT` (defaults to `'json'`):
- `'json'` -> dict
- `'yaml'` -> string
*Version Changed: 0.7.0*
- Support registering an async callback function.
"""
self.spec_callback = self.ensure_sync(f)
return f
@property
def spec(self) -> t.Union[dict, str]:
"""Get the current OAS document file.
This property will call `app._get_spec()` method and set the
`force_update` parameter to `True`.
*Version changed: 0.7.1*
- Generate the spec on every call.
"""
return self._get_spec(force_update=True)
@staticmethod
def _schema_name_resolver(schema: t.Type[Schema]) -> str:
"""Default schema name resovler."""
# some schema are passed through the `doc(responses=...)`
# we need to make sure the schema is an instance of `Schema`
if isinstance(schema, type): # pragma: no cover
schema = schema() # type: ignore
name = schema.__class__.__name__
if name.endswith('Schema'):
name = name[:-6] or name
if schema.partial:
name += 'Update'
return name
def _make_info(self) -> dict:
"""Make OpenAPI info object."""
info: dict
if self.info:
info = self.info
else:
info = {}
if self.contact:
info['contact'] = self.contact
if self.license:
info['license'] = self.license
if self.terms_of_service:
info['termsOfService'] = self.terms_of_service
if self.description:
info['description'] = self.description
return info
def _make_tags(self) -> t.List[t.Dict[str, t.Any]]:
"""Make OpenAPI tags object."""
tags: t.Optional[TagsType] = self.tags
if tags is not None:
# convert simple tags list into standard OpenAPI tags
if isinstance(tags[0], str):
for index, tag_name in enumerate(tags):
tags[index] = {'name': tag_name} # type: ignore
else:
tags: t.List[t.Dict[str, t.Any]] = [] # type: ignore
if self.config['AUTO_TAGS']:
# auto-generate tags from blueprints
for blueprint_name, blueprint in self.blueprints.items():
if blueprint_name == 'openapi' or \
not hasattr(blueprint, 'enable_openapi') or \
not blueprint.enable_openapi: # type: ignore
continue
tag: t.Dict[str, t.Any] = get_tag(blueprint, blueprint_name) # type: ignore
tags.append(tag) # type: ignore
return tags # type: ignore
def _collect_security_info(self) -> t.Tuple[t.List[str], t.List[HTTPAuthType]]:
"""Detect `auth_required` on blueprint before_request functions and view functions."""
# security schemes
auth_names: t.List[str] = []
auth_schemes: t.List[HTTPAuthType] = []
def _update_auth_info(auth: HTTPAuthType) -> None:
# update auth_schemes and auth_names
auth_schemes.append(auth)
auth_name: str = get_auth_name(auth, auth_names)
auth_names.append(auth_name)
# collect auth info on blueprint before_request functions
for blueprint_name, funcs in self.before_request_funcs.items():
# skip app-level before_request functions (blueprint_name is None)
if blueprint_name is None or \
not self.blueprints[blueprint_name].enable_openapi: # type: ignore
continue
for f in funcs:
if hasattr(f, '_spec'): # pragma: no cover
auth = f._spec.get('auth') # type: ignore
if auth is not None and auth not in auth_schemes:
self._auth_blueprints[blueprint_name] = {
'auth': auth,
'roles': f._spec.get('roles') # type: ignore
}
_update_auth_info(auth)
# collect auth info on view functions
for rule in self.url_map.iter_rules():
view_func: ViewFuncType = self.view_functions[rule.endpoint] # type: ignore
if hasattr(view_func, '_spec'):
auth = view_func._spec.get('auth')
if auth is not None and auth not in auth_schemes:
_update_auth_info(auth)
# method views
if hasattr(view_func, '_method_spec'):
for method_spec in view_func._method_spec.values():
auth = method_spec.get('auth')
if auth is not None and auth not in auth_schemes:
_update_auth_info(auth)
return auth_names, auth_schemes
def _generate_spec(self) -> APISpec:
"""Generate the spec, return an instance of `apispec.APISpec`.
*Version changed: 1.3.0*
- Support setting custom response content type.
*Version changed: 1.2.1*
- Set default `servers` value.
*Version changed: 0.10.0*
- Add support for `operationId`.
- Add support for response `links`.
*Version changed: 0.9.0*
- Add base response customization support.
*Version changed: 0.8.0*
- Add automatic 404 response support.
"""
kwargs: dict = {}
if self.servers:
kwargs['servers'] = self.servers
else:
if self.config['AUTO_SERVERS'] and has_request_context():
kwargs['servers'] = [{'url': request.url_root}]
if self.external_docs:
kwargs['externalDocs'] = self.external_docs
ma_plugin: MarshmallowPlugin = MarshmallowPlugin(
schema_name_resolver=self.schema_name_resolver # type: ignore
)
spec_plugins: t.List[BasePlugin] = [ma_plugin, *self.spec_plugins]
spec: APISpec = APISpec(
title=self.title,
version=self.version,
openapi_version=self.config['OPENAPI_VERSION'],
plugins=spec_plugins,
info=self._make_info(),
tags=self._make_tags(),
**kwargs
)
# configure flask-marshmallow URL types
ma_plugin.converter.field_mapping[fields.URLFor] = ('string', 'url') # type: ignore
ma_plugin.converter.field_mapping[fields.AbsoluteURLFor] = ( # type: ignore
'string', 'url'
)
if sqla is not None: # pragma: no cover
ma_plugin.converter.field_mapping[sqla.HyperlinkRelated] = ( # type: ignore
'string', 'url'
)
auth_names, auth_schemes = self._collect_security_info()
security, security_schemes = get_security_and_security_schemes(
auth_names, auth_schemes
)
if self.config['SECURITY_SCHEMES'] is not None:
security_schemes.update(self.config['SECURITY_SCHEMES'])
for name, scheme in security_schemes.items():
spec.components.security_scheme(name, scheme)
# paths
paths: t.Dict[str, t.Dict[str, t.Any]] = {}
rules: t.List[t.Any] = sorted(
list(self.url_map.iter_rules()), key=lambda rule: len(rule.rule)
)
for rule in rules:
operations: t.Dict[str, t.Any] = {}
view_func: ViewFuncType = self.view_functions[rule.endpoint] # type: ignore
# skip endpoints from openapi blueprint and the built-in static endpoint
if rule.endpoint in default_bypassed_endpoints:
continue
blueprint_name: t.Optional[str] = None # type: ignore
if '.' in rule.endpoint:
blueprint_name: str = rule.endpoint.rsplit('.', 1)[0] # type: ignore
blueprint = self.blueprints.get(blueprint_name) # type: ignore
if blueprint is None:
# just a normal view with dots in its endpoint, reset blueprint_name
blueprint_name = None
else:
if rule.endpoint == (f'{blueprint_name}.static') or \
not hasattr(blueprint, 'enable_openapi') or \
not blueprint.enable_openapi: # type: ignore
continue
# add a default 200 response for bare views
if not hasattr(view_func, '_spec'):
if not inspect.ismethod(view_func) and self.config['AUTO_200_RESPONSE']:
view_func._spec = {'response': default_response}
else:
continue # pragma: no cover
# method views
if hasattr(view_func, '_method_spec'):
skip = True
for method, method_spec in view_func._method_spec.items():
if method_spec.get('no_spec'):
if self.config['AUTO_200_RESPONSE']:
view_func._method_spec[method]['response'] = default_response
skip = False
else:
skip = False
if skip:
continue
# skip views flagged with @app.doc(hide=True)
if view_func._spec.get('hide'):
continue
# operation tags
operation_tags: t.Optional[t.List[str]] = None
if view_func._spec.get('tags'):
operation_tags = view_func._spec.get('tags')
else:
# use blueprint name as tag
if self.tags is None and self.config['AUTO_TAGS'] and \
blueprint_name is not None:
blueprint = self.blueprints[blueprint_name]
operation_tags = \
get_operation_tags(blueprint, blueprint_name) # type: ignore
for method in ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']:
if method not in rule.methods:
continue
# method views
if hasattr(view_func, '_method_spec'):
if method not in view_func._method_spec:
continue # pragma: no cover
view_func._spec = view_func._method_spec[method]
if view_func._spec.get('no_spec') and \
not self.config['AUTO_200_RESPONSE']:
continue
if view_func._spec.get('generated_summary') and \
not self.config['AUTO_OPERATION_SUMMARY']:
view_func._spec['summary'] = ''
if view_func._spec.get('generated_description') and \
not self.config['AUTO_OPERATION_DESCRIPTION']:
view_func._spec['description'] = ''
if view_func._spec.get('hide'):
continue
if view_func._spec.get('tags'):
operation_tags = view_func._spec.get('tags')
else:
if self.tags is None and self.config['AUTO_TAGS'] and \
blueprint_name is not None:
blueprint = self.blueprints[blueprint_name]
operation_tags = \
get_operation_tags(blueprint, blueprint_name) # type: ignore
operation: t.Dict[str, t.Any] = {
'parameters': [
{'in': location, 'schema': schema}
for schema, location in view_func._spec.get('args', [])
],
'responses': {},
}
if operation_tags:
operation['tags'] = operation_tags
# summary
if view_func._spec.get('summary'):
operation['summary'] = view_func._spec.get('summary')
else:
# auto-generate summary from dotstring or view function name
if self.config['AUTO_OPERATION_SUMMARY']:
operation['summary'] = get_path_summary(view_func) # type: ignore
# description
if view_func._spec.get('description'):
operation['description'] = view_func._spec.get('description')
else:
# auto-generate description from dotstring
if self.config['AUTO_OPERATION_DESCRIPTION']:
docs = [
line.strip() for line in (view_func.__doc__ or '').strip().split('\n')
]
if len(docs) > 1:
# use the remain lines of docstring as description
operation['description'] = '\n'.join(docs[1:]).strip()
# deprecated
if view_func._spec.get('deprecated'):
operation['deprecated'] = view_func._spec.get('deprecated')
# operationId
operation_id = view_func._spec.get('operation_id')
if operation_id is None:
if self.config['AUTO_OPERATION_ID']:
operation['operationId'] = \
f"{method.lower()}_{rule.endpoint.replace('.', '_')}"
else:
operation['operationId'] = operation_id
# responses
if view_func._spec.get('response'):
schema = view_func._spec.get('response')['schema']
base_schema: OpenAPISchemaType = self.config['BASE_RESPONSE_SCHEMA']
if base_schema is not None:
base_schema_spec: dict
if isinstance(base_schema, type):
base_schema_spec = \
ma_plugin.converter.schema2jsonschema( # type: ignore
base_schema()
)
elif isinstance(base_schema, dict):
base_schema_spec = base_schema
else:
raise TypeError(_bad_schema_message)
data_key: str = self.config['BASE_RESPONSE_DATA_KEY']
if data_key not in base_schema_spec['properties']:
raise RuntimeError(
f'The data key {data_key!r} is not found in'
' the base response schema spec.'
)
base_schema_spec['properties'][data_key] = schema
schema = base_schema_spec
status_code: str = str(view_func._spec.get('response')['status_code'])
description: str = view_func._spec.get('response')['description'] or \
self.config['SUCCESS_DESCRIPTION']
example = view_func._spec.get('response')['example']
examples = view_func._spec.get('response')['examples']
links = view_func._spec.get('response')['links']
content_type = view_func._spec.get('response')['content_type']
add_response(
operation,
status_code,
schema,
description,
example,
examples,
links,
content_type,
)
else:
# add a default 200 response for views without using @app.output
# or @app.doc(responses={...})
if not view_func._spec.get('responses') and self.config['AUTO_200_RESPONSE']:
add_response(
operation, '200', {}, self.config['SUCCESS_DESCRIPTION']
)
# add validation error response
if self.config['AUTO_VALIDATION_ERROR_RESPONSE'] and \
(view_func._spec.get('body') or view_func._spec.get('args')):
status_code: str = str( # type: ignore
self.config['VALIDATION_ERROR_STATUS_CODE']
)
description: str = self.config[ # type: ignore
'VALIDATION_ERROR_DESCRIPTION'
]
schema: SchemaType = self.config['VALIDATION_ERROR_SCHEMA'] # type: ignore
add_response_with_schema(
spec, operation, status_code, schema, 'ValidationError', description
)
# add authentication error response
has_bp_level_auth = blueprint_name is not None and \
blueprint_name in self._auth_blueprints
view_func_auth = view_func._spec.get('auth')
custom_security = view_func._spec.get('security')
if self.config['AUTO_AUTH_ERROR_RESPONSE'] and \
(has_bp_level_auth or view_func_auth or custom_security):
status_code: str = str( # type: ignore
self.config['AUTH_ERROR_STATUS_CODE']
)
description: str = self.config['AUTH_ERROR_DESCRIPTION'] # type: ignore
schema: SchemaType = self.config['HTTP_ERROR_SCHEMA'] # type: ignore
add_response_with_schema(
spec, operation, status_code, schema, 'HTTPError', description
)
# add 404 error response
if self.config['AUTO_404_RESPONSE'] and rule.arguments:
description: str = self.config['NOT_FOUND_DESCRIPTION'] # type: ignore
schema: SchemaType = self.config['HTTP_ERROR_SCHEMA'] # type: ignore
add_response_with_schema(
spec, operation, '404', schema, 'HTTPError', description
)
if view_func._spec.get('responses'):
responses: ResponsesType = view_func._spec.get('responses')
# turn status_code list to dict {status_code: reason_phrase}
if isinstance(responses, list):
responses: t.Dict[int, str] = {} # type: ignore
for status_code in view_func._spec.get('responses'):
responses[ # type: ignore
status_code
] = get_reason_phrase(int(status_code), '')
for status_code, value in responses.items(): # type: ignore
status_code: str = str(status_code) # type: ignore
# custom complete response spec
if isinstance(value, dict):
operation['responses'][status_code] = value
continue
else:
description = value
# overwrite existing response description
if status_code in operation['responses']:
if not isinstance(
view_func._spec.get('responses'), list
): # pragma: no cover
operation['responses'][status_code]['description'] = description
continue
# add error response schema for error responses
if status_code.startswith('4') or status_code.startswith('5'):
schema: SchemaType = self.config['HTTP_ERROR_SCHEMA'] # type: ignore
add_response_with_schema(
spec, operation, status_code, schema, 'HTTPError', description
)
else: # add default response for other responses
add_response(operation, status_code, {}, description)
# requestBody
if view_func._spec.get('body'):
content_type = view_func._spec.get('content_type', 'application/json')
operation['requestBody'] = {
'content': {
content_type: {
'schema': view_func._spec['body'],
}
}
}
if view_func._spec.get('body_example'):
example = view_func._spec.get('body_example')
operation['requestBody']['content'][
content_type]['example'] = example
if view_func._spec.get('body_examples'):
examples = view_func._spec.get('body_examples')
operation['requestBody']['content'][
content_type]['examples'] = examples
# security
if custom_security: # custom security
# TODO: validate the security name and the format
operation['security'] = []
operation_security = custom_security
if isinstance(operation_security, str): # 'A' -> [{'A': []}]
operation['security'] = [{operation_security: []}]
elif isinstance(operation_security, list):
# ['A', 'B'] -> [{'A': []}, {'B': []}]
if isinstance(operation_security[0], str):
operation['security'] = [{name: []} for name in operation_security]
else:
operation['security'] = operation_security
else:
raise ValueError(
'The operation security must be a string or a list.'
)
else:
if has_bp_level_auth:
bp_auth_info = self._auth_blueprints[blueprint_name] # type: ignore
operation['security'] = [{
security[bp_auth_info['auth']]: bp_auth_info['roles']
}]
# view-wide auth
if view_func_auth:
operation['security'] = [{
security[view_func_auth]: view_func._spec['roles']
}]
operations[method.lower()] = operation
# parameters
path_arguments: t.Iterable = re.findall(r'<(([^<:]+:)?([^>]+))>', rule.rule)
if (
path_arguments
and not (
hasattr(view_func, '_spec')
and view_func._spec.get('omit_default_path_parameters', False)
)
):
arguments: t.List[t.Dict[str, str]] = []
for _, argument_type, argument_name in path_arguments:
argument = get_argument(argument_type, argument_name)
arguments.append(argument)
for _method, operation in operations.items():
operation['parameters'] = arguments + operation['parameters']
path: str = re.sub(r'<([^<:]+:)?', '{', rule.rule).replace('>', '}')
if path not in paths:
paths[path] = operations
else:
paths[path].update(operations)
for path, operations in paths.items():
# sort by method before adding them to the spec
sorted_operations: t.Dict[str, t.Any] = {}
for method in ['get', 'post', 'put', 'patch', 'delete']:
if method in operations:
sorted_operations[method] = operations[method]
spec.path(path=path, operations=sorted_operations)
return spec | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/app.py | app.py |
import typing as t
from .schemas import http_error_schema
from .schemas import validation_error_schema
from .types import OpenAPISchemaType
from .types import TagsType
# OpenAPI fields
OPENAPI_VERSION: str = '3.0.3'
SERVERS: t.Optional[t.List[t.Dict[str, str]]] = None
TAGS: t.Optional[TagsType] = None
EXTERNAL_DOCS: t.Optional[t.Dict[str, str]] = None
INFO: t.Optional[t.Dict[str, t.Union[str, dict]]] = None
DESCRIPTION: t.Optional[str] = None
TERMS_OF_SERVICE: t.Optional[str] = None
CONTACT: t.Optional[t.Dict[str, str]] = None
LICENSE: t.Optional[t.Dict[str, str]] = None
SECURITY_SCHEMES: t.Optional[t.Dict[str, t.Any]] = None
# OpenAPI spec
SPEC_FORMAT: str = 'json'
YAML_SPEC_MIMETYPE: str = 'text/vnd.yaml'
JSON_SPEC_MIMETYPE: str = 'application/json'
LOCAL_SPEC_PATH: t.Optional[str] = None
LOCAL_SPEC_JSON_INDENT: int = 2
SYNC_LOCAL_SPEC: t.Optional[bool] = None
SPEC_PROCESSOR_PASS_OBJECT: bool = False
# Automation behavior control
AUTO_TAGS: bool = True
AUTO_SERVERS: bool = True
AUTO_OPERATION_SUMMARY: bool = True
AUTO_OPERATION_DESCRIPTION: bool = True
AUTO_OPERATION_ID: bool = False
AUTO_200_RESPONSE: bool = True
AUTO_404_RESPONSE: bool = True
AUTO_VALIDATION_ERROR_RESPONSE: bool = True
AUTO_AUTH_ERROR_RESPONSE: bool = True
# Response customization
SUCCESS_DESCRIPTION: str = 'Successful response'
NOT_FOUND_DESCRIPTION: str = 'Not found'
VALIDATION_ERROR_DESCRIPTION: str = 'Validation error'
AUTH_ERROR_DESCRIPTION: str = 'Authentication error'
VALIDATION_ERROR_STATUS_CODE: int = 422
AUTH_ERROR_STATUS_CODE: int = 401
VALIDATION_ERROR_SCHEMA: OpenAPISchemaType = validation_error_schema
HTTP_ERROR_SCHEMA: OpenAPISchemaType = http_error_schema
BASE_RESPONSE_SCHEMA: t.Optional[OpenAPISchemaType] = None
BASE_RESPONSE_DATA_KEY: str = 'data'
# API docs
DOCS_FAVICON: str = 'https://apiflask.com/_assets/favicon.png'
REDOC_USE_GOOGLE_FONT: bool = True
REDOC_STANDALONE_JS: str = 'https://cdn.redoc.ly/redoc/latest/bundles/\
redoc.standalone.js' # TODO: rename to REDOC_JS
REDOC_CONFIG: t.Optional[dict] = None
SWAGGER_UI_CSS: str = 'https://unpkg.com/swagger-ui-dist/swagger-ui.css'
SWAGGER_UI_BUNDLE_JS: str = 'https://unpkg.com/swagger-ui-dist/\
swagger-ui-bundle.js' # TODO: rename to SWAGGER_UI_JS
SWAGGER_UI_STANDALONE_PRESET_JS: str = 'https://unpkg.com/swagger-ui-dist/\
swagger-ui-standalone-preset.js' # TODO: rename to SWAGGER_UI_STANDALONE_JS
SWAGGER_UI_LAYOUT: str = 'BaseLayout'
SWAGGER_UI_CONFIG: t.Optional[dict] = None
SWAGGER_UI_OAUTH_CONFIG: t.Optional[dict] = None
ELEMENTS_JS: str = 'https://unpkg.com/@stoplight/elements/web-components.min.js'
ELEMENTS_CSS: str = 'https://unpkg.com/@stoplight/elements/styles.min.css'
ELEMENTS_LAYOUT: str = 'sidebar'
ELEMENTS_CONFIG: t.Optional[dict] = None
RAPIDOC_JS: str = 'https://unpkg.com/rapidoc/dist/rapidoc-min.js'
RAPIDOC_THEME: str = 'light'
RAPIDOC_CONFIG: t.Optional[dict] = None
RAPIPDF_JS: str = 'https://unpkg.com/rapipdf/dist/rapipdf-min.js'
RAPIPDF_CONFIG: t.Optional[dict] = None
# Version changed: 1.2.0
# Change VALIDATION_ERROR_STATUS_CODE from 400 to 422.
# Version added: 1.3.0
# SPEC_PROCESSOR_PASS_OBJECT | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/settings.py | settings.py |
import typing as t
from marshmallow import Schema as BaseSchema
from marshmallow.fields import Integer
from marshmallow.fields import URL
from marshmallow.orderedset import OrderedSet
# schema for the detail object of validation error response
validation_error_detail_schema: t.Dict[str, t.Any] = {
'type': 'object',
'properties': {
'<location>': {
'type': 'object',
'properties': {
'<field_name>': {
'type': 'array',
'items': {
'type': 'string'
}
}
}
}
}
}
# schema for validation error response
validation_error_schema: t.Dict[str, t.Any] = {
'properties': {
'detail': validation_error_detail_schema,
'message': {
'type': 'string'
},
},
'type': 'object'
}
# schema for generic error response
http_error_schema: t.Dict[str, t.Any] = {
'properties': {
'detail': {
'type': 'object'
},
'message': {
'type': 'string'
},
},
'type': 'object'
}
class Schema(BaseSchema):
"""A base schema for all schemas.
The different between marshmallow's `Schema` and APIFlask's `Schema` is that the latter
sets `set_class` to `OrderedSet` by default.
*Version Added: 1.2.0*
"""
# use ordered set to keep the order of fields
# can be removed when https://github.com/marshmallow-code/marshmallow/pull/1896 is merged
set_class = OrderedSet
class EmptySchema(Schema):
"""An empty schema used to generate empty response/schema.
For 204 response, you can use this schema to
generate an empty response body. For 200 response, you can use this schema
to generate an empty response body schema.
Example:
```python
@app.delete('/foo')
@app.output(EmptySchema, status_code=204)
def delete_foo():
return ''
```
It equals to use `{}`:
```python
@app.delete('/foo')
@app.output({}, status_code=204)
def delete_foo():
return ''
```
"""
pass
class PaginationSchema(Schema):
"""A schema for common pagination information."""
page = Integer()
per_page = Integer()
pages = Integer()
total = Integer()
current = URL()
next = URL()
prev = URL()
first = URL()
last = URL()
class FileSchema(Schema):
"""A schema for file response.
This is used to represent a file response in OpenAPI spec. If you want to
embed a file as base64 string in the JSON body, you can use the
`apiflask.fields.File` field instead.
Example:
```python
from apiflask.schemas import FileSchema
from flask import send_from_directory
@app.get('/images/<filename>')
@app.output(
FileSchema(type='string', format='binary'),
content_type='image/png',
description='An image file'
)
@app.doc(summary="Returns the image file")
def get_image(filename):
return send_from_directory(app.config['IMAGE_FOLDER'], filename)
```
The output OpenAPI spec will be:
```yaml
paths:
/images/{filename}:
get:
summary: Returns the image file
responses:
'200':
description: An image file
content:
image/png:
schema:
type: string
format: binary
```
*Version Added: 2.0.0*
"""
def __init__(
self,
*,
type: str = 'string',
format: str = 'binary'
) -> None:
"""
Arguments:
type: The type of the file. Defaults to `string`.
format: The format of the file, one of `binary` and `base64`. Defaults to `binary`.
"""
self.type = type
self.format = format
def __repr__(self) -> str:
return f'schema: \n type: {self.type}\n format: {self.format}' | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/schemas.py | schemas.py |
redoc_template = """
<!DOCTYPE html>
<html>
<head>
<title>{{ title }} {{ version }} - Redoc</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
{% if config.REDOC_USE_GOOGLE_FONT %}
<link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
{% endif %}
<link rel="icon" type="image/png"
href="{{ config.DOCS_FAVICON }}">
<style>
body {
margin: 0;
padding: 0;
}
</style>
</head>
<body>
<div id="redoc"></div>
<script src="{{ config.REDOC_STANDALONE_JS }}"> </script>
<script>
Redoc.init(
"{{ url_for('openapi.spec') }}",
{% if config.REDOC_CONFIG %}{{ config.REDOC_CONFIG | tojson }}{% else %}{}{% endif %},
document.getElementById("redoc")
)
</script>
</body>
</html>
"""
swagger_ui_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{{ title }} {{ version }} - Swagger UI</title>
<link rel="stylesheet" type="text/css" href="{{ config.SWAGGER_UI_CSS }}">
<link rel="icon" type="image/png"
href="{{ config.DOCS_FAVICON }}">
<style>
html {
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after {
box-sizing: inherit;
}
body {
margin: 0;
background: #fafafa;
}
</style>
</head>
<body>
<div id="swagger-ui"></div>
<script src="{{ config.SWAGGER_UI_BUNDLE_JS }}"></script>
<script src="{{ config.SWAGGER_UI_STANDALONE_PRESET_JS }}"></script>
<script>
// we can get several config items of Function type
// referring to https://swagger.io/docs/open-source-tools/swagger-ui/usage/configuration/
var funcConfigItems = ['operationsSorter', 'tagsSorter', 'onComplete', 'requestInterceptor', 'responseInterceptor', 'modelPropertyMacro', 'parameterMacro']
function parseFunc(funcStr) {
return new Function('"use strict"; return ' + funcStr)()
}
var baseConfig = {
url: "{{ url_for('openapi.spec') }}",
dom_id: "#swagger-ui",
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "{{ config.SWAGGER_UI_LAYOUT }}",
{% if oauth2_redirect_path %} oauth2RedirectUrl: "{{ oauth2_redirect_path }}"{% endif %}
}
{% if config.SWAGGER_UI_CONFIG %}
var userConfig = {{ config.SWAGGER_UI_CONFIG | tojson }}
for (var attr in userConfig) {
baseConfig[attr] = funcConfigItems.includes(attr) ? parseFunc(userConfig[attr]) : userConfig[attr]
}
{% endif %}
window.onload = function () {
const ui = SwaggerUIBundle(baseConfig)
{% if config.SWAGGER_UI_OAUTH_CONFIG %}
oauthConfig = {}
var userOauthConfig = {{ config.SWAGGER_UI_OAUTH_CONFIG | tojson }}
for (var attr in userOauthConfig) {
oauthConfig[attr] = userOauthConfig[attr]
}
ui.initOAuth(oauthConfig)
{% endif %}
}
</script>
</body>
</html>
"""
swagger_ui_oauth2_redirect_template = """
<!doctype html>
<html lang="en-US">
<head>
<title>Swagger UI: OAuth2 Redirect</title>
</head>
<body>
<script>
'use strict';
function run() {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1);
} else {
qp = location.search.substring(1);
}
arr = qp.split("&");
arr.forEach(function (v, i, _arr) { _arr[i] = '"' + v.replace('=', '":"') + '"'; });
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value);
}
) : {};
isValid = qp.state === sentState;
if ((
oauth2.auth.schema.get("flow") === "accessCode" ||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
oauth2.auth.schema.get("flow") === "authorization_code"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({ auth: oauth2.auth, redirectUrl: redirectUrl });
} else {
let oauthErrorMsg;
if (qp.error) {
oauthErrorMsg = "[" + qp.error + "]: " +
(qp.error_description ? qp.error_description + ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: " + qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server"
});
}
} else {
oauth2.callback({ auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl });
}
window.close();
}
window.addEventListener('DOMContentLoaded', function () {
run();
});
</script>
</body>
</html>
"""
elements_template = """
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<title>{{ title }} {{ version }} - Elements</title>
<link rel="icon" type="image/png"
href="{{ config.DOCS_FAVICON }}">
<script src="{{ config.ELEMENTS_JS }}"></script>
<link rel="stylesheet" href="{{ config.ELEMENTS_CSS }}">
</head>
<body>
<elements-api
apiDescriptionUrl="{{ url_for('openapi.spec') }}"
layout="{{ config.ELEMENTS_LAYOUT }}"
{% if config.ELEMENTS_CONFIG and 'router' in config.ELEMENTS_CONFIG %}
{% set router = config.ELEMENTS_CONFIG['router'] %}
{% else %}
{% set router = 'hash' %}
{% endif %}
router={{ router | tojson }}
{% if config.ELEMENTS_CONFIG %}
{% for key, value in config.ELEMENTS_CONFIG.items() %}
{{ key }}={{ value | tojson }}
{% endfor %}
{% endif %}
/>
</body>
</html>
"""
rapidoc_template = """
<!doctype html> <!-- Important: must specify -->
<html>
<head>
<meta charset="utf-8"> <!-- Important: rapi-doc uses utf8 characters -->
<title>{{ title }} {{ version }} - RapiDoc</title>
<link rel="icon" type="image/png"
href="{{ config.DOCS_FAVICON }}">
<script type="module" src="{{ config.RAPIDOC_JS }}"></script>
</head>
<body>
<rapi-doc
spec-url="{{ url_for('openapi.spec') }}"
theme="{{ config.RAPIDOC_THEME }}"
{% if config.RAPIDOC_CONFIG and 'show-header' in config.RAPIDOC_CONFIG %}
{% set show_header = config.RAPIDOC_CONFIG['show-header'] %}
{% else %}
{% set show_header = False %}
{% endif %}
show-header="{{ show_header | tojson }}"
{% if config.RAPIDOC_CONFIG %}
{% for key, value in config.RAPIDOC_CONFIG.items() %}
{{ key }}={{ value | tojson }}
{% endfor %}
{% endif %}
> </rapi-doc>
</body>
</html>
"""
rapipdf_template = """
<!doctype html>
<html>
<head>
<title>{{ title }} {{ version }} - RapiPDF</title>
<link rel="icon" type="image/png"
href="{{ config.DOCS_FAVICON }}">
<script src="{{ config.RAPIPDF_JS }}"></script>
</head>
<body>
<rapi-pdf
spec-url="{{ url_for('openapi.spec') }}"
{% if config.RAPIPDF_CONFIG %}
{% for key, value in config.RAPIPDF_CONFIG.items() %}
{{ key }}={{ value | tojson }}
{% endfor %}
{% endif %}
> </rapi-pdf>
</body>
</html>
"""
ui_templates = {
'swagger-ui': swagger_ui_template,
'redoc': redoc_template,
'elements': elements_template,
'rapidoc': rapidoc_template,
'rapipdf': rapipdf_template,
} | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/ui_templates.py | ui_templates.py |
import typing as t
from flask import request
from flask import url_for
from werkzeug.http import HTTP_STATUS_CODES
from .types import PaginationType
_sentinel = object()
def get_reason_phrase(status_code: int, default: str = 'Unknown') -> str:
"""A helper function to get the reason phrase of the given status code.
Arguments:
status_code: A standard HTTP status code.
default: The default phrase to use if not found, defaults to "Unknown".
*Version Changed: 0.6.0*
- Add `default` parameter.
"""
return HTTP_STATUS_CODES.get(status_code, default)
def pagination_builder(pagination: PaginationType, **kwargs: t.Any) -> dict:
"""A helper function to make pagination data.
This function is designed based on Flask-SQLAlchemy's `Pagination` class.
If you are using a different or custom pagination class, make sure the
passed pagination object has the following attributes:
- page
- per_page
- pages
- total
- next_num
- has_next
- prev_num
- has_prev
Or you can write your own builder function to build the pagination data.
Examples:
```python
from apiflask import PaginationSchema, pagination_builder
...
class PetQuery(Schema):
page = Integer(load_default=1)
per_page = Integer(load_default=20, validate=Range(max=30))
class PetsOut(Schema):
pets = List(Nested(PetOut))
pagination = Nested(PaginationSchema)
@app.get('/pets')
@app.input(PetQuery, location='query')
@app.output(PetsOut)
def get_pets(query):
pagination = PetModel.query.paginate(
page=query['page'],
per_page=query['per_page']
)
pets = pagination.items
return {
'pets': pets,
'pagination': pagination_builder(pagination)
}
```
See <https://github.com/apiflask/apiflask/blob/main/examples/pagination/app.py>
for the complete example.
Arguments:
pagination: The pagination object.
**kwargs: Additional keyword arguments that passed to the
`url_for` function when generate the page-related URLs.
*Version Added: 0.6.0*
"""
endpoint: t.Optional[str] = request.endpoint
per_page: int = pagination.per_page
def get_page_url(page: int) -> str:
if endpoint is None: # pragma: no cover
return ''
return url_for(
endpoint, page=page, per_page=per_page, _external=True, **kwargs
)
next: str = get_page_url(pagination.next_num) if pagination.has_next else ''
prev: str = get_page_url(pagination.prev_num) if pagination.has_prev else ''
return {
'total': pagination.total,
'pages': pagination.pages,
'per_page': per_page,
'page': pagination.page,
'next': next,
'prev': prev,
'first': get_page_url(1),
'last': get_page_url(pagination.pages),
'current': get_page_url(pagination.page),
} | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/helpers.py | helpers.py |
import typing as t
from werkzeug.exceptions import default_exceptions
from .helpers import get_reason_phrase
from .types import ResponseHeaderType
_bad_schema_message = 'The schema must be a marshmallow schema class or an OpenAPI schema dict.'
class HTTPError(Exception):
"""The exception to end the request handling and return an JSON error response.
Examples:
```python
from apiflask import APIFlask, HTTPError
from markupsafe import escape
app = APIFlask(__name__)
@app.get('/<name>')
def hello(name):
if name == 'Foo':
raise HTTPError(404, 'This man is missing.')
return f'Hello, escape{name}'!
```
"""
status_code: int = 500
message: t.Optional[str] = None
detail: t.Any = {}
headers: ResponseHeaderType = {}
extra_data: t.Mapping[str, t.Any] = {}
def __init__(
self,
status_code: t.Optional[int] = None,
message: t.Optional[str] = None,
detail: t.Optional[t.Any] = None,
headers: t.Optional[ResponseHeaderType] = None,
extra_data: t.Optional[t.Mapping[str, t.Any]] = None
) -> None:
"""Initialize the error response.
Arguments:
status_code: The status code of the error (4XX and 5xx), defaults to 500.
message: The simple description of the error. If not provided,
the reason phrase of the status code will be used.
detail: The detailed information of the error, you can use it to
provide the addition information such as custom error code,
documentation URL, etc.
headers: A dict of headers used in the error response.
extra_data: A dict of additioinal fields (custom error information) that will
added to the error response body.
*Version changed: 0.9.0*
- Set `detail` and `headers` to empty dict if not set.
*Version changed: 0.10.0*
- Add `extra_data` parameter to accept additional error information.
*Version changed: 0.11.0*
- Change `status_code` from position argument to keyword argument, defaults to 500.
Add class attributes with default values to support error subclasses.
"""
super().__init__()
if status_code is not None:
# TODO: support use custom error status code?
if status_code not in default_exceptions:
raise LookupError(
f'No exception for status code {status_code!r},'
' valid error status code are "4XX" and "5XX".'
)
self.status_code = status_code
if detail is not None:
self.detail = detail
if headers is not None:
self.headers = headers
if message is not None:
self.message = message
if extra_data is not None:
self.extra_data = extra_data
if self.message is None:
# make sure the error message is not empty
self.message: str = get_reason_phrase(self.status_code, 'Unknown error')
class _ValidationError(HTTPError):
"""The exception used when the request validation error happened."""
pass
def abort(
status_code: int,
message: t.Optional[str] = None,
detail: t.Optional[t.Any] = None,
headers: t.Optional[ResponseHeaderType] = None,
extra_data: t.Optional[dict] = None
) -> None:
"""A function to raise HTTPError exception.
Similar to Flask's `abort`, but returns a JSON response.
Examples:
```python
from apiflask import APIFlask, abort
from markupsafe import escape
app = APIFlask(__name__)
@app.get('/<name>')
def hello(name):
if name == 'Foo':
abort(404, 'This man is missing.')
# or just `abort(404)`
return f'Hello, escape{name}'!
```
P.S. When `app.json_errors` is `True` (default), Flask's `flask.abort` will also
return JSON error response.
Arguments:
status_code: The status code of the error (4XX and 5xx).
message: The simple description of the error. If not provided,
the reason phrase of the status code will be used.
detail: The detailed information of the error, you can use it to
provide the addition information such as custom error code,
documentation URL, etc.
headers: A dict of headers used in the error response.
extra_data: A dict of additioinal fields (custom error information) that will
added to the error response body.
*Version changed: 0.4.0*
- Rename the function name from `abort_json` to `abort`.
*Version changed: 0.10.0*
- Add new parameter `extra_data`.
"""
raise HTTPError(status_code, message, detail, headers, extra_data) | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/exceptions.py | exceptions.py |
import typing as t
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from flask_marshmallow.fields import AbsoluteURLFor as AbsoluteURLFor
from flask_marshmallow.fields import Hyperlinks as Hyperlinks
from flask_marshmallow.fields import URLFor as URLFor
from marshmallow.fields import AwareDateTime as AwareDateTime
from marshmallow.fields import Boolean as Boolean
from marshmallow.fields import Constant as Constant
from marshmallow.fields import Date as Date
from marshmallow.fields import DateTime as DateTime
from marshmallow.fields import Decimal as Decimal
from marshmallow.fields import Dict as Dict
from marshmallow.fields import Email as Email
from marshmallow.fields import Field as Field
from marshmallow.fields import Float as Float
from marshmallow.fields import Function as Function
from marshmallow.fields import Integer as Integer
from marshmallow.fields import IP as IP
from marshmallow.fields import IPv4 as IPv4
from marshmallow.fields import IPv6 as IPv6
from marshmallow.fields import List as List
from marshmallow.fields import Mapping as Mapping
from marshmallow.fields import Method as Method
from marshmallow.fields import NaiveDateTime as NaiveDateTime
from marshmallow.fields import Nested as Nested
from marshmallow.fields import Number as Number
from marshmallow.fields import Pluck as Pluck
from marshmallow.fields import Raw as Raw
from marshmallow.fields import String as String
from marshmallow.fields import Time as Time
from marshmallow.fields import TimeDelta as TimeDelta
from marshmallow.fields import Tuple as Tuple
from marshmallow.fields import URL as URL
from marshmallow.fields import UUID as UUID
from marshmallow.fields import Enum as Enum
from webargs.fields import DelimitedList as DelimitedList
from webargs.fields import DelimitedTuple as DelimitedTuple
class File(Field):
"""A binary file field, it should only be used in an input schema.
Examples:
```python
import os
from werkzeug.utils import secure_filename
from apiflask.fields import File
class Image(Schema):
image = File()
@app.post('/images')
@app.input(Image, location='files')
def upload_image(files):
f = files['image']
# use `secure_filename` to clean the filename, notice it will only keep ascii characters
filename = secure_filename(f.filename)
f.save(os.path.join(the_path_to_uploads, filename))
return {'message': f'file {filename} saved.'}
```
The file object is an instance of `werkzeug.datastructures.FileStorage`, see more details in the
[docs](https://werkzeug.palletsprojects.com/datastructures/#werkzeug.datastructures.FileStorage). # noqa: B950, E501
Use `form_and_files` location if you want to put both files
and other normal fields in one schema.
```python
import os
from werkzeug.utils import secure_filename
from apiflask.fields import String, File
class ProfileIn(Schema):
name = String()
avatar = File()
@app.post('/profiles')
@app.input(ProfileIn, location='form_and_files')
def create_profile(form_and_files_data):
avatar_file = form_and_files_data['avatar']
name = form_and_files_data['name']
# use `secure_filename` to clean the filename, notice it will only keep ascii characters
avatar_filename = secure_filename(avatar_file.filename)
avatar_file.save(os.path.join(the_path_to_uploads, avatar_filename))
profile = Profile(name=name, avatar_filename=avatar_filename)
# ...
return {'message': 'profile created.'}
```
In the current implementation, `files` location data will also include
the form data (equals to `form_and_files`).
*Version Added: 1.0*
This field accepts the same keyword arguments that `Field` receives.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metadata['type'] = 'string'
self.metadata['format'] = 'binary'
default_error_messages: t.Dict[str, str] = {
'invalid': 'Not a valid file.'
}
def _deserialize(self, value, attr, data, **kwargs) -> t.Any:
from werkzeug.datastructures import FileStorage
if not isinstance(value, FileStorage):
raise self.make_error('invalid')
return value
class Config(Field):
"""A field for Flask configuration values.
Examples:
```python
from apiflask import APIFlask, Schema
from apiflask.fields import String, Config
app = APIFlask(__name__)
app.config['API_TITLE'] = 'Pet API'
class FooSchema(Schema):
user = String()
title = Config('API_TITLE')
@app.get('/foo')
@app.output(FooSchema)
def foo():
return {'user': 'test'}
```
This field should only be used in an output schema. The `ValueError` will
be raised if the config key is not found in the app config.
*Version Added: 2.0.1*
"""
_CHECK_ATTRIBUTE = False
def __init__(self, key, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key = key
def _serialize(self, value, attr, obj, **kwargs) -> t.Any:
from flask import current_app
if self.key not in current_app.config:
raise ValueError(f'The key {self.key} is not found in the app config.')
return current_app.config[self.key] | APIFlask | /APIFlask-2.0.1-py3-none-any.whl/apiflask/fields.py | fields.py |
# ApiFred
## Introduzione
Il progetto propone la realizzazione di una libreria python che permette di scaricare, memorizzare in un database e analizzare tramite grafici delle informazioni ottenute dall'API Fred. La classe più importante della libreria è senza dubbio 'Fred'. Nel momento in cui viene creata un'istanza di questa classe viene istanziato anche un oggetto della classe 'Datamanager' che si occupa di gestire le interazioni con il database. Tale classe infatti ha come obiettivo quello di leggere e scrivere informazioni in opportune taballe Mysql. La struttura di queste tabelle è descritta nella documentazione della classe Fred.
L'idea di base è che tutti i dati scaricati debbano essere memorizzati in un database in questo modo quando l'utente ha bisogno nuovamente di queste informazioni esse varranno prelevarle dal database, a meno che non venga chiesto esplicitamente un aggiornamento dei dati tramite API.
##Esempi di codice
Poter utilizzare un'istanza della classe Fred è necessario fornire due parametri che sono la connessione con il database e la chiave dell'API. Una volta creata l'istanza è possibile scaricare e graficare i dati.
### Scaricare categorie figlie di una categoria data
fred= Fred(connection,apy_key)
fred.dowloadSubCategoryByCategory(1)
### Scaricare serie da una categoria data
fred= Fred(connection,apy_key)
fred.dowloadSeriesByCategory(1)
### Scaricare Osservazioni di una serie data
fred= Fred(connection,apy_key)
fred.dowloadObservationBySeries('BABANAICS11NSAUS')
## Grafici
I grafici realizzati sono principalmente di 7 tipologie:
1. Andamento delle osservabili di una serie
2. Grafico della media mobile delle osservabili di una data serie
3. Grafico delle differenze prime delle osservabili di una data serie
4. Grafico delle differenze prime percentuali delle osservabili di una data serie
5. Matrice delle covarianze di un gruppo di serie
6. Matride delle correlazioni di un gruppo di serie
7. Regressione lineare di una data serie
Di seguito verranno mostrati alcuni esempi.
### Andamento delle osservabili di una serie
fred= Fred(connection,apy_key)
fred.plotOservationBySeries('BABANAICS11NSAUS').show()
### Grafico della media mobile delle osservabili di una data serie
fred= Fred(connection,apy_key)
fred.plotMovingAverage(11,'BABANAICS11NSAUS').show()
### Grafico delle differenze prime delle osservabili di una data serie
fred= Fred(connection,apy_key)
fred.plotPrimeDifferenceSeries('BABANAICS11NSAUS').show()
## Combinazioni grafici
La classe Fred mette a disposizione una serie di metodi che restituiscono dati grezzi utili per personalizzare e comporre i propri grafici. In partiolare questi metodi sono utili per creare dei 'collage' di grafici. E' possibile combinare più grafici in un'unica immagine in maniera molto agevole. Ad esempio nel codice che segue è possibile combinare i grafici di 3 serie differenti in uno.
### Esempio
x1,y1 = fred.giveDataPlotOservationBySeries('BABANAICS11NSAUS')
x2,y2 = fred.giveDataPlotOservationBySeries('BABANAICS23SAUS')
x3,y3 = fred.giveDataPlotOservationBySeries('BFBF8QNAICS71SAUS')
fig, axs = plt.subplots(3,sharex=True)
step = int(len(x1) / 8)
axs[0].set_title('BABANAICS11NSAUS')
axs[0].plot(x1, y1,'tab:red')
axs[1].set_title('BABANAICS23SAUS')
axs[1].plot(x2, y2,'tab:green')
axs[2].set_title('BFBF8QNAICS71SAUS')
axs[2].plot(x3, y3)
plt.xticks(x1[::step], x1[::step],rotation=-45)
plt.gcf().set_size_inches(16, 9)
| APIFredTorVergata | /APIFredTorVergata-0.1.0.tar.gz/APIFredTorVergata-0.1.0/README.md | README.md |
from apifuzzer.utils import pretty_print, get_logger
class JsonSectionAbove:
"""
By organizing the API definition OpenAPI v3 and Swagger v2 API definitions can be processed in a similar way
"""
def __init__(self, api_definition, section_to_up="schema"):
self.logger = get_logger(f"{self.__class__.__name__}-{section_to_up}")
self.api_definition = api_definition
self.section_to_up = section_to_up
def _resolve(self, data):
schema_fount = False
self.logger.debug(f"Processing {pretty_print(data, 50)}")
if isinstance(data, dict):
return_data = dict()
for key, value in data.items():
self.logger.debug(f"Checking {key} - {pretty_print(value, 50)}")
if key == self.section_to_up and value:
schema_fount = True
if isinstance(value, dict):
return_data.update(value)
else:
return_data = value
self.logger.debug(f"Processed {key} -> {pretty_print(return_data)}")
elif isinstance(value, dict):
self.logger.debug(f"Process dict {key}")
return_data[key] = self.resolve(value)
elif isinstance(value, list):
if not return_data.get(key):
return_data[key] = list()
for _iter, val in enumerate(value):
self.logger.debug(f"Process {key} list elem: {_iter}")
return_data[key].append(self.resolve(data=val))
else:
return_data[key] = value
self.logger.debug(
f"Processed: {key} -> {pretty_print(return_data, 100)}"
)
else:
return_data = data
return [return_data, schema_fount]
def resolve(self, data=None):
self.logger.info("Resolving schema references")
if data is None:
data = self.api_definition
resolved_in_this_iteration = True
iteration = 1
while resolved_in_this_iteration:
self.logger.debug(f"{iteration} resolving reference")
data, resolved_in_this_iteration = self._resolve(data)
iteration += 1
return data | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/move_json_parts.py | move_json_parts.py |
import six
from bitstring import Bits
from kitty.core import kassert
from kitty.model import RandomBits, String, BaseField, Group
from kitty.model.low_level.encoder import ENC_BITS_DEFAULT, strToBytes
from apifuzzer.utils import secure_randint, get_logger
class APIFuzzerGroup(Group):
def __init__(self, name, value):
super().__init__(values=value, name=name, fuzzable=True)
@staticmethod
def accept_list_as_value():
return True
class Utf8Chars(BaseField):
"""
This custom fuzzer iterates through the UTF8 chars and gives back random section between min and max length
Highly relies on random numbers so most probably will give you different values each time to run it.
You can generate the chars like this:
:example:
>>>for st in range(0, 1114111):
>>> try:
>>> print(f'{st}-> {chr(st)}')
>>> except (UnicodeEncodeError, ValueError):
>>> pass
Above 1114111 chars started to getting unprocessable so this is the upper limit for now.
"""
MAX = 1114111
def __init__(
self,
value,
name,
fuzzable=True,
min_length=20,
max_length=None,
num_mutations=80,
):
super(BaseField, self).__init__(name=name) # pylint: disable=E1003
self.logger = self.logger = get_logger(self.__class__.__name__)
self.name = name
self.value = value
self.min_length = min_length
self.max_length = max_length if max_length else len(value) * 2
self._num_mutations = num_mutations
self.position = self.init_position()
self._initialized = False
self._default_value = self.to_bits(chr(self.MAX))
self._encoder = ENC_BITS_DEFAULT
self._default_rendered = self._encode_value(self._default_value)
self._hash = None
self._fuzzable = fuzzable
self._need_second_pass = False
self._controlled = False
def init_position(self):
return secure_randint(0, self.MAX)
@staticmethod
def str_to_bytes(value):
"""
:type value: ``str``
:param value: value to encode
"""
kassert.is_of_types(value, (bytes, bytearray, six.string_types))
if isinstance(value, six.string_types):
return value.encode(encoding="utf-8")
if isinstance(value, bytearray):
return bytes(value)
return value
def to_bits(self, val):
return Bits(self.str_to_bytes(val))
def _mutate(self):
current_value = list()
current_mutation_length = secure_randint(self.min_length, self.max_length)
for st in range(self.position, self.position + current_mutation_length):
current_value.append(chr(st))
self._current_value = self.to_bits("".join(current_value))
self.position += current_mutation_length
if self.position > self.MAX:
self.position = self.init_position()
def __str__(self):
return f"{self.name}->{self.value}"
def __repr__(self):
return f"{self.name}->{self.value}"
class RandomBitsField(RandomBits):
"""
Creates a fields which compatible field with String and Delimiter
https://lcamtuf.blogspot.hu/2014/08/binary-fuzzing-strategies-what-works.html
"""
def not_implemented(self, func_name):
_ = func_name
pass
def __init__(self, value, name, fuzzable=True):
self.name = name
self.value = value
super(RandomBitsField, self).__init__(
name=name,
value=value,
min_length=0,
max_length=len(value) * 2,
fuzzable=fuzzable,
num_mutations=80,
)
def _mutate(self):
if self._step:
length = self._min_length + self._step * self._current_index
else:
length = self._random.randint(self._min_length, self._max_length)
current_bytes = ""
for _ in range(length // 8 + 1):
current_bytes += chr(self._random.randint(0, 255))
self._current_value = Bits(bytes=strToBytes(current_bytes))[:length]
def __str__(self):
return f"{self.name}->{self.value}"
def __repr__(self):
return f"{self.name}->{self.value}"
class UnicodeStrings(String):
def __init__(
self,
value,
name,
min_length=0,
max_length=None,
num_mutations=80,
fuzzable=True,
):
self.min_length = min_length
self.max_length = max_length if max_length else len(value) * 2
self._num_mutations = num_mutations
self.name = name
self.value = value
super(UnicodeStrings, self).__init__(name=name, value=value, fuzzable=fuzzable)
def not_implemented(self, func_name):
_ = func_name
pass
def __str__(self):
return f"{self.name}->{self.value}"
def __repr__(self):
return f"{self.name}->{self.value}" | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/custom_fuzzers.py | custom_fuzzers.py |
from kitty.interfaces import WebInterface
from apifuzzer.fuzz_model import APIFuzzerModel
from apifuzzer.fuzzer_target.fuzz_request_sender import FuzzerTarget
from apifuzzer.openapi_template_generator import OpenAPITemplateGenerator
from apifuzzer.server_fuzzer import OpenApiServerFuzzer
from apifuzzer.utils import set_logger
from apifuzzer.version import get_version
class Fuzzer(object):
def __init__(
self,
report_dir,
test_level,
log_level,
basic_output,
alternate_url=None,
test_result_dst=None,
auth_headers=None,
api_definition_url=None,
api_definition_file=None,
junit_report_path=None,
):
self.base_url = None
self.alternate_url = alternate_url
self.templates = None
self.test_level = test_level
self.report_dir = report_dir
self.test_result_dst = test_result_dst
self.auth_headers = auth_headers if auth_headers else {}
self.junit_report_path = junit_report_path
self.logger = set_logger(log_level, basic_output)
self.logger.info("%s initialized", get_version())
self.api_definition_url = api_definition_url
self.api_definition_file = api_definition_file
def prepare(self):
# here we will be able to branch the template generator if we will support other than Swagger / OpenAPI
template_generator = OpenAPITemplateGenerator(
api_definition_url=self.api_definition_url,
api_definition_file=self.api_definition_file,
)
try:
template_generator.process_api_resources()
except Exception as e:
self.logger.error(f"Exception: {e}", exc_info=True)
raise e
self.templates = template_generator.templates
self.base_url = template_generator.compile_base_url(self.alternate_url)
def run(self):
target = FuzzerTarget(
name="target",
base_url=self.base_url,
report_dir=self.report_dir,
auth_headers=self.auth_headers,
junit_report_path=self.junit_report_path,
)
interface = WebInterface()
model = APIFuzzerModel()
for template in self.templates:
model.connect(template.compile_template())
model.content_type = template.get_content_type()
fuzzer = OpenApiServerFuzzer()
fuzzer.set_model(model)
fuzzer.set_target(target)
fuzzer.set_interface(interface)
fuzzer.start()
fuzzer.stop() | APIFuzzer | /APIFuzzer-0.9.13-py3-none-any.whl/apifuzzer/fuzzer.py | fuzzer.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.