content
stringlengths 5
1.05M
|
---|
from allmydata.storage.backends.cloud.u1.u1_container import configure_u1_container
configure_container = configure_u1_container
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Generated from Agtype.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .AgtypeParser import AgtypeParser
else:
from AgtypeParser import AgtypeParser
# This class defines a complete listener for a parse tree produced by AgtypeParser.
class AgtypeListener(ParseTreeListener):
# Enter a parse tree produced by AgtypeParser#agType.
def enterAgType(self, ctx:AgtypeParser.AgTypeContext):
pass
# Exit a parse tree produced by AgtypeParser#agType.
def exitAgType(self, ctx:AgtypeParser.AgTypeContext):
pass
# Enter a parse tree produced by AgtypeParser#agValue.
def enterAgValue(self, ctx:AgtypeParser.AgValueContext):
pass
# Exit a parse tree produced by AgtypeParser#agValue.
def exitAgValue(self, ctx:AgtypeParser.AgValueContext):
pass
# Enter a parse tree produced by AgtypeParser#StringValue.
def enterStringValue(self, ctx:AgtypeParser.StringValueContext):
pass
# Exit a parse tree produced by AgtypeParser#StringValue.
def exitStringValue(self, ctx:AgtypeParser.StringValueContext):
pass
# Enter a parse tree produced by AgtypeParser#IntegerValue.
def enterIntegerValue(self, ctx:AgtypeParser.IntegerValueContext):
pass
# Exit a parse tree produced by AgtypeParser#IntegerValue.
def exitIntegerValue(self, ctx:AgtypeParser.IntegerValueContext):
pass
# Enter a parse tree produced by AgtypeParser#FloatValue.
def enterFloatValue(self, ctx:AgtypeParser.FloatValueContext):
pass
# Exit a parse tree produced by AgtypeParser#FloatValue.
def exitFloatValue(self, ctx:AgtypeParser.FloatValueContext):
pass
# Enter a parse tree produced by AgtypeParser#TrueBoolean.
def enterTrueBoolean(self, ctx:AgtypeParser.TrueBooleanContext):
pass
# Exit a parse tree produced by AgtypeParser#TrueBoolean.
def exitTrueBoolean(self, ctx:AgtypeParser.TrueBooleanContext):
pass
# Enter a parse tree produced by AgtypeParser#FalseBoolean.
def enterFalseBoolean(self, ctx:AgtypeParser.FalseBooleanContext):
pass
# Exit a parse tree produced by AgtypeParser#FalseBoolean.
def exitFalseBoolean(self, ctx:AgtypeParser.FalseBooleanContext):
pass
# Enter a parse tree produced by AgtypeParser#NullValue.
def enterNullValue(self, ctx:AgtypeParser.NullValueContext):
pass
# Exit a parse tree produced by AgtypeParser#NullValue.
def exitNullValue(self, ctx:AgtypeParser.NullValueContext):
pass
# Enter a parse tree produced by AgtypeParser#ObjectValue.
def enterObjectValue(self, ctx:AgtypeParser.ObjectValueContext):
pass
# Exit a parse tree produced by AgtypeParser#ObjectValue.
def exitObjectValue(self, ctx:AgtypeParser.ObjectValueContext):
pass
# Enter a parse tree produced by AgtypeParser#ArrayValue.
def enterArrayValue(self, ctx:AgtypeParser.ArrayValueContext):
pass
# Exit a parse tree produced by AgtypeParser#ArrayValue.
def exitArrayValue(self, ctx:AgtypeParser.ArrayValueContext):
pass
# Enter a parse tree produced by AgtypeParser#obj.
def enterObj(self, ctx:AgtypeParser.ObjContext):
pass
# Exit a parse tree produced by AgtypeParser#obj.
def exitObj(self, ctx:AgtypeParser.ObjContext):
pass
# Enter a parse tree produced by AgtypeParser#pair.
def enterPair(self, ctx:AgtypeParser.PairContext):
pass
# Exit a parse tree produced by AgtypeParser#pair.
def exitPair(self, ctx:AgtypeParser.PairContext):
pass
# Enter a parse tree produced by AgtypeParser#array.
def enterArray(self, ctx:AgtypeParser.ArrayContext):
pass
# Exit a parse tree produced by AgtypeParser#array.
def exitArray(self, ctx:AgtypeParser.ArrayContext):
pass
# Enter a parse tree produced by AgtypeParser#typeAnnotation.
def enterTypeAnnotation(self, ctx:AgtypeParser.TypeAnnotationContext):
pass
# Exit a parse tree produced by AgtypeParser#typeAnnotation.
def exitTypeAnnotation(self, ctx:AgtypeParser.TypeAnnotationContext):
pass
# Enter a parse tree produced by AgtypeParser#floatLiteral.
def enterFloatLiteral(self, ctx:AgtypeParser.FloatLiteralContext):
pass
# Exit a parse tree produced by AgtypeParser#floatLiteral.
def exitFloatLiteral(self, ctx:AgtypeParser.FloatLiteralContext):
pass
del AgtypeParser |
# -*- coding: utf-8 -*-
import calendar
from copy import deepcopy
from datetime import date, datetime, time
from typing import Any, Dict, List
from uuid import UUID
from croniter import croniter
from flask import abort, jsonify
from marshmallow import ValidationError
import simplejson as json
from chaosplt_account.model import User
from chaosplt_account.schemas import new_org_schema, org_schema, \
link_workspace_schema, org_schema_short, orgs_schema_tiny, \
workspaces_schema, org_members_schema, org_member_schema, \
org_name_schema, org_settings_schema, experiments_schema, \
schedules_schema
from chaosplt_account.service import Services
from chaosplt_account.storage.model.org import OrgType, DEFAULT_ORG_SETTINGS
__all__ = ["list_all_orgs", "create_org", "get_org", "delete_org",
"get_org_workspaces", "link_workspace_to_org",
"unlink_workspace_from_org", "lookup_org", "get_members",
"get_member", "set_org_name", "set_org_infos",
"get_schedulings"]
def list_all_orgs(services: Services, authed_user: User):
orgs = services.account.org.list_all()
return orgs_schema_tiny.jsonify(orgs)
def create_org(services: Services, authed_user: User, payload: Dict[str, Any]):
try:
payload = new_org_schema.load(payload)
except ValidationError as err:
return jsonify(err.messages), 422
org_name = payload["name"]
has_org = services.account.org.has_org_by_name(org_name)
if has_org:
return jsonify({
"name": ["Name already used"]
}), 409
user_id = authed_user.id
org = services.account.org.create(org_name, user_id)
services.activity.event.record(
authenticated_user_id=user_id, user_id=user_id,
org_id=org.id, event_type="organization", phase="create")
return org_schema.jsonify(org), 201
def get_org(services: Services, authed_user: User, org_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
if authed_user.is_authenticated:
user_id = authed_user.id
org.owner = services.account.org.is_owner(org.id, user_id)
for w in org.workspaces:
w.owner = services.account.workspace.is_owner(w.id, user_id)
return org_schema_short.jsonify(org)
def delete_org(services: Services, authed_user: User, org_id: UUID):
org = services.account.org.get(org_id)
if org:
user_id = authed_user.id
owns_org = services.account.org.is_owner(org.id, user_id)
if not owns_org:
return abort(404)
# cannot delete your own org
if org.kind == OrgType.personal.value:
return jsonify({
"error": "Cannot delete your personal organization"
}), 422
services.account.org.delete(org_id)
services.activity.event.record(
authenticated_user_id=user_id, user_id=user_id,
org_id=org.id, event_type="organization", phase="delete")
return "", 204
def get_org_workspaces(services: Services, authed_user: User, org_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
return workspaces_schema.jsonify(org.workspaces)
def link_workspace_to_org(services: Services, authed_user: User, org_id: UUID,
workspace_id: UUID, payload: Dict[str, Any]):
try:
payload = link_workspace_schema.load(payload)
except ValidationError as err:
return jsonify(err.messages), 422
org = services.account.org.get(org_id)
if not org:
return abort(404)
workspace = services.account.workspace.get(workspace_id)
if not workspace:
return abort(404)
services.account.org.add_workspace(
org_id, workspace_id, owner=payload["owner"])
user_id = authed_user.id
services.activity.event.record(
authenticated_user_id=user_id, user_id=user_id,
org_id=org.id, event_type="organization", phase="link-workspace")
return "", 204
def unlink_workspace_from_org(services: Services, authed_user: User,
org_id: UUID, workspace_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
workspace = services.account.workspace.get(workspace_id)
if not workspace:
return abort(404)
services.account.org.remove_org(org_id, workspace_id)
user_id = authed_user.id
services.activity.event.record(
authenticated_user_id=user_id, user_id=user_id,
org_id=org.id, event_type="organization", phase="unlink-workspace")
return "", 204
def lookup_org(services: Services, authed_user: User, org_name: str,
workspaces: List[str] = None):
org = services.account.org.get_by_name(org_name)
if not org:
return abort(404)
org.owner = services.account.org.is_owner(org.id, authed_user.id)
if org.owner:
org.member = True
else:
org.member = services.account.org.is_member(org.id, authed_user.id)
workspaces = workspaces or []
for workspace_name in workspaces:
workspace = services.account.workspace.get_by_name(
org.id, workspace_name)
if not workspace:
return abort(404)
if authed_user.is_authenticated:
for w in org.workspaces:
w.owner = services.account.workspace.is_owner(w.id, authed_user.id)
return org_schema.jsonify(org)
def get_members(services: Services, authed_user: User, org_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
members = services.account.org.get_members(org.id)
return org_members_schema.jsonify(members)
def get_member(services: Services, authed_user: User, org_id: UUID,
user_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
member = services.account.org.get_member(org.id, user_id)
if not member:
return abort(404)
return org_member_schema.jsonify(member)
def add_member(services: Services, authed_user: User, org_id: UUID,
user_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
owns_org = services.account.org.is_owner(org.id, authed_user.id)
if not owns_org:
return abort(404)
member = services.account.org.add_member(org.id, user_id)
if not member:
return abort(404)
return org_member_schema.jsonify(member)
def set_org_name(services: Services, authed_user, org_id: UUID,
payload: Dict[str, Any]):
org = services.account.org.get(org_id)
if not org:
return abort(404)
owns_org = services.account.org.is_owner(org.id, authed_user.id)
if not owns_org:
return abort(404)
try:
payload = org_name_schema.load(payload)
except ValidationError as err:
return jsonify(err.messages), 422
old_name = org.name
new_name = payload["name"]
existing_org = services.account.org.get_by_name(new_name)
if existing_org:
return jsonify({
1: {'name': ['Name not available']}
}), 422
org.name = new_name
services.account.org.save(org)
user_id = authed_user.id
services.activity.event.record(
authenticated_user_id=user_id, user_id=user_id,
org_id=org.id, event_type="organization", phase="rename",
payload=json.dumps({"old_name": old_name}))
return jsonify(""), 204
def set_org_infos(services: Services, authed_user, org_id: UUID,
payload: Dict[str, Any]):
org = services.account.org.get(org_id)
if not org:
return abort(404)
owns_org = services.account.org.is_owner(org.id, authed_user.id)
if not owns_org:
return abort(404)
try:
payload = org_settings_schema.load(payload)
except ValidationError as err:
return jsonify(err.messages), 422
if org.settings is None:
org.settings = deepcopy(DEFAULT_ORG_SETTINGS)
old_settings = deepcopy(org.settings)
org.settings["email"] = payload.get("email")
org.settings["url"] = payload.get("url")
org.settings["logo"] = payload.get("logo")
org.settings["description"] = payload.get("description")
services.account.org.save(org)
user_id = authed_user.id
services.activity.event.record(
authenticated_user_id=user_id, user_id=user_id,
org_id=org.id, event_type="organization", phase="edit",
payload=json.dumps({"old_settings": old_settings}))
return jsonify(""), 204
def get_org_experiments(services: Services, authed_user: User,
org_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
experiments = services.experiment.get_by_org(org_id)
for experiment in experiments:
workspace = services.account.workspace.get(experiment.workspace_id)
experiment.org_name = org.name
experiment.workspace_name = workspace.name
user = services.account.user.get(experiment.user_id)
if user:
experiment.username = user.username
experiment.user_org_name = user.org_name
return experiments_schema.jsonify(experiments)
def get_schedulings(services: Services, authed_user: User,
org_id: UUID):
org = services.account.org.get(org_id)
if not org:
return abort(404)
schedules = services.scheduling.get_by_org(org_id)
for schedule in schedules:
workspace = services.account.workspace.get(schedule.workspace_id)
schedule.org_name = org.name
schedule.workspace_name = workspace.name
user = services.account.user.get(schedule.user_id)
if user:
schedule.username = user.username
schedule.user_org_name = user.org_name
experiment = services.experiment.get(schedule.experiment_id)
if experiment:
schedule.experiment_name = experiment.payload["title"]
if schedule.cron:
schedule.plan = []
today = date.today()
candidates = list(calendar.Calendar().itermonthdates(
today.year, today.month
))
start = max(
schedule.active_from,
datetime.combine(candidates[0], time.min))
g = croniter(schedule.cron, start_time=start).all_next(datetime)
repeat = None if not schedule.repeat else schedule.repeat - 1
for i, d in enumerate(g):
if repeat and i > repeat:
break
if d.date() not in candidates:
break
schedule.plan.append(d)
return schedules_schema.jsonify(schedules)
|
instructions = input()
h_count = 0
v_count = 0
for letter in instructions:
if letter == "H":
h_count += 1
elif letter == "V":
v_count += 1
if h_count%2 == 0:
if v_count%2 == 0:
print("1 2\n3 4")
elif v_count%2 == 1:
print("2 1\n4 3")
elif h_count%2 == 1:
if v_count%2 == 0:
print("3 4\n1 2")
elif v_count%2 == 1:
print("4 3\n2 1") |
import FWCore.ParameterSet.Config as cms
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring('Geometry/CMSCommonData/data/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/normal/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
'Geometry/CMSCommonData/data/caloBase.xml',
'Geometry/CMSCommonData/data/cmsCalo.xml',
'Geometry/EcalCommonData/data/eregalgo.xml',
'Geometry/EcalCommonData/data/ebalgo.xml',
'Geometry/EcalCommonData/data/ebcon.xml',
'Geometry/EcalCommonData/data/ebrot.xml',
'Geometry/EcalCommonData/data/eecon.xml',
'Geometry/EcalCommonData/data/eehier.xml',
'Geometry/EcalCommonData/data/eefixed.xml',
'Geometry/EcalCommonData/data/eealgo.xml',
'Geometry/EcalCommonData/data/escon.xml',
'Geometry/EcalCommonData/data/esalgo.xml',
'Geometry/EcalCommonData/data/eeF.xml',
'Geometry/EcalCommonData/data/eeB.xml',
'Geometry/EcalSimData/data/ecalsens.xml',
'Geometry/HcalSimData/data/CaloUtil.xml',
'Geometry/EcalSimData/data/EcalProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml',
'Geometry/TrackerCommonData/data/trackermaterial.xml'),
rootNodeName = cms.string('cms:OCMS')
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import asyncio
import logging
import uuid
import os
import time
logging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG)
import os.path
import sys
SOCKET_PING_INTERVAL = 30
_DEFAULT_LIMIT = 2**30
class SocketClient:
"""
Client socket object for sending and receiving JSON messages
"""
def __init__(self, address, handler_functions, storage_dir=None, reader=None, writer=None):
"""
Initialize client object
:param address: array
:param handler_functions: dict, keys - handling action, values - function to handle message, must be coroutine
:param storage_dir: string, directory for storing messages on terminate
:param reader: socket stream reader
:param writer: socket stream writer
"""
self._host = address[0]
self._port = address[1]
self._reader = reader
self._writer = writer
self._wait_tasks = None
self._storage_dir = storage_dir
self._handler_functions = handler_functions
self._loop = asyncio.get_event_loop()
self._message_queue = asyncio.Queue(loop=self._loop)
self._incoming_message_queue = asyncio.Queue(loop=self._loop)
self.load_saved_messages()
self._connected = False
self._terminate = False
self._handler_started = False
self.ponged = True
self.last_ping_at = time.time()
self.last_ping_time = 0
self.wait_responses = {}
@asyncio.coroutine
def message_handler(self):
"""
Get messages from queue and handle it
:return:
"""
logging.info("start handler on %s" % self._host)
self._handler_started = True
while not self._terminate:
# I made this so that sender will correctly finish on terminate
if self._incoming_message_queue.qsize() > 0:
logging.debug("messages in handler queue %d" % self._incoming_message_queue.qsize())
message = yield from self._incoming_message_queue.get()
try:
result = yield from self._handler_functions[message["action"]](message)
if result:
if "action_id" in message.keys():
result["action_id"] = message["action_id"]
self.append_message(result)
except Exception as ex:
logging.error("handling error in line %s: %s" % (str(sys.exc_info()[-1].tb_lineno), str(ex)))
self._incoming_message_queue.put_nowait(message)
else:
yield from asyncio.sleep(0.1)
logging.info("end handler on %s" % self._host)
@asyncio.coroutine
def wait_for_disconnect(self):
"""
Waiting for finish tasks
:return:
"""
if self._wait_tasks is not None:
yield from self._wait_tasks
@asyncio.coroutine
def connector(self, limit=_DEFAULT_LIMIT):
"""
Endless function for connect to remote server
Auto-reconnect on socket disconnections
"""
while not self._terminate:
if not self._connected:
yield from self.wait_for_disconnect()
logging.debug("connecting to server %s" % self._host)
try:
asyncio.set_event_loop(self._loop)
reader, writer = yield from asyncio.open_connection(self._host, self._port,
loop=self._loop, limit=limit)
self.start(reader, writer, True)
except OSError:
logging.error("connection to server %s failed!" % self._host)
self.disconnect()
yield from asyncio.sleep(2.0)
def start(self, reader, writer, with_pinger=False):
"""
Function for start sender and receiver tasks
:param reader: socket stream reader
:param writer: socket stream writer
:param with_pinger: whether to start pinger task or not
:return: boolean, whether reader and writer start
"""
if not self.connected():
self._connected = True
self._reader = reader
self._writer = writer
tasks = [self._loop.create_task(self._sender()), self._loop.create_task(self._receiver())]
if with_pinger:
tasks.append(self._loop.create_task(self.pinger()))
if not self._handler_started:
self._loop.create_task(self.message_handler())
self._wait_tasks = asyncio.wait(tasks)
return True
else:
logging.warning("connection to server %s already exists!" % self._host)
return False
@asyncio.coroutine
def _receiver(self):
"""
Loop for receive and handle data from remote host
Run while connection opened
"""
logging.info("start receiver on %s" % self._host)
while self.connected():
try:
message = yield from self.receive_json()
if message:
if "action_id" in message.keys():
if message["action_id"] in self.wait_responses.keys():
future = self.wait_responses.pop(message["action_id"])
future.set_result(message)
if message["action"] == 'pong':
self.last_ping_time = time.time() - self.last_ping_at
self.ponged = True
if message["action"] == 'ping':
logging.debug("received ping")
self.append_message({"action": "pong", "status": "success"})
if message["action"] in self._handler_functions.keys():
self._incoming_message_queue.put_nowait(message)
except Exception as ex:
logging.error("receiver error in line %s: %s" % (str(sys.exc_info()[-1].tb_lineno), str(ex)))
logging.info("end receiver on %s" % self._host)
@asyncio.coroutine
def _sender(self):
"""
Loop for read data from message queue and send it to remote host
Run while connection opened
"""
logging.info("start sender on %s" % self._host)
while self.connected():
# I made this so that sender will correctly finish on disconnect
if self._message_queue.qsize() > 0:
message = yield from self._message_queue.get()
try:
logging.debug("Send: %r" % message)
message = message + "\n"
data = message.encode()
self._writer.write(data)
yield from self._writer.drain()
except Exception as ex:
logging.error("sending error in line %s: %s" % (str(sys.exc_info()[-1].tb_lineno), str(ex)))
self._message_queue.put_nowait(message)
self.disconnect()
else:
yield from asyncio.sleep(1)
logging.info("end sender on %s" % self._host)
@asyncio.coroutine
def pinger(self):
"""
Loop for send ping actions to remote host and waiting for pong
Run while connection opened
"""
while self.connected():
self.ponged = False
self.last_ping_at = time.time()
self.append_message({"action": "ping"})
yield from asyncio.sleep(SOCKET_PING_INTERVAL)
if not self.ponged and self.connected():
logging.error("pong from %s not received in %d seconds, disconnect" % (self._host,
SOCKET_PING_INTERVAL))
self.disconnect()
@asyncio.coroutine
def receive_json(self):
"""
Receive json string and decode it to object
:return: message object or False if remote host disconnected
"""
try:
data = yield from self._reader.readline()
message = data.decode()
if message != '':
logging.debug("Received %r from %r" % (message, self._host))
message_object = json.loads(message)
return message_object
else:
self.disconnect()
return None
except Exception as ex:
logging.error("receiver error in line %s: %s" % (str(sys.exc_info()[-1].tb_lineno), str(ex)))
self.disconnect()
return None
def connected(self):
return self._connected
def append_message(self, message_object, wait_for_response=False, response_timeout=30.0, action_id=None):
"""
Append message to message queue and wait for message response if wait_for_response is True
:param message_object: json-serializable object, message to send
:param wait_for_response: boolean, whether wait for response or not
:param response_timeout: unsigned float
:param action_id: message action id for tracking response (if not specified, it will be generated automatically)
:return: boolean or response message if wait_for_response is True and response accepted
"""
if action_id is None and "action_id" not in message_object.keys():
action_id = str(uuid.uuid4())
message_object["action_id"] = action_id
result = True
message = json.dumps(message_object)
future = asyncio.Future(loop=self._loop)
if wait_for_response:
self.wait_responses[message_object["action_id"]] = future
self._message_queue.put_nowait(message)
if wait_for_response:
time_passed = 0.0
while not future.done():
time.sleep(0.1)
time_passed += 0.1
if time_passed > response_timeout:
future.cancel()
self.wait_responses.pop(message_object["action_id"])
if future.cancelled():
# TODO: maybe should throw new exception
result = False
else:
result = future.result()
return result
def save_all_messages(self):
"""
Save all messages in queue to file
"""
if self._storage_dir:
with open(self._storage_dir + 'queries_%s.txt' % self._host, "a") as queries_file:
while self._message_queue.qsize() > 0:
message = yield from self._message_queue.get()
queries_file.write("%s\n" % message)
def load_saved_messages(self):
"""
Load all messages from file to queue
"""
if self._storage_dir:
if os.path.isfile(self._storage_dir + 'queries_%s.txt' % self._host):
with open(self._storage_dir + 'queries_%s.txt' % self._host, "r") as queries_file:
messages = queries_file.read()
messages = messages.split('\n')
for message in messages:
if message:
self._message_queue.put_nowait(message)
# Truncate queries file
open(self._storage_dir + 'queries_%s.txt' % self._host, 'w').close()
def disconnect(self):
if self._connected:
self._connected = False
if not self._loop.is_closed():
self._writer.close()
def __del__(self):
self.disconnect()
self.save_all_messages()
self._terminate = True
|
import numpy as np
import os
import seas.video
from seas.video import load, dfof, rotate, rescale
from seas.filemanager import sort_experiments, get_exp_span_string, read_yaml
from seas.rois import roi_loader, make_mask, get_masked_region, insert_masked_region, draw_bounding_box
from seas.hdf5manager import hdf5manager
from seas.ica import project, filter_mean
from seas.signalanalysis import sort_noise, lag_n_autocorr
from seas.waveletAnalysis import waveletAnalysis
class Experiment:
'''
A class to store mesoscale calcium imaging experiment information and provide functions used for common experiment and video manipulations.
Includes functionality for loading and rotating videos, cropping to a specific region of interest (defined by user input and/or roi files, loading and storing yaml metadata, etc.)
Attributes:
downsample:
The spatial downsampling factor
downsample_t:
The temporal downsampling factor
movie:
The loaded raw movie file
path:
The pathlist of loaded videos
n_rotations:
The number of times the video was rotated
rotate_rois_with_video:
Whether rois are rotated with the video or not
bounding_box:
The bounding coordinates selected for the content of interest from the video file
shape:
The video shape
name:
The detected name of the experiment from the input files
dir:
The directory the video files reside in
The following attributes are also available if rois are loaded:
n_roi_rotations:
The number of times the rois were rotated
rois:
The roi dictionary loaded from FIJI RoiSet.zip file
roimask:
A binary masked array denoting where the movie should be masked
meta:
The experiment metadata loaded from a yaml file
Functions:
load_rois:
Load rois from a FIJI RoiSet.zip file
load_meta:
Load metadata from a yaml file
rotate:
Rotate the video CCW, adjust mask and bounding box accordingly
define_mask_boundaries:
Auto detect the mask boundaries from the loaded roimask
draw_bounding_box:
Launch a GUI to draw a bounding box to crop the movie
bound_mask:
Returns the mask bound to the bounding box
bound_movie:
Returns the movie bound to the bounding box
ica_project:
Perform an ICA projection to the movie
Initialization Arguments:
pathlist:
The list of paths to load raw video data from, in order. To sort, use seas.filemanager functions.
downsample:
An integer factor to spatially downsample frames with. Implements an integer averaging spatial downsample where downsample x downsample pixels are reduced to 1.
downsample_t:
An integer factor to spatially downsample frames with. Takes the mean between sets of downsample_t frames.
n_rotations:
The number of ccw rotations to rotate the video.
rotate_rois_with_video:
If true, rotate all loaded rois by n_rotations as well.
'''
def __init__(self,
pathlist,
downsample=False,
downsample_t=False,
n_rotations=0,
rotate_rois_with_video=False):
'''
Arguments:
pathlist:
The list of paths to load raw video data from, in order. To sort, use seas.filemanager functions.
downsample:
An integer factor to spatially downsample frames with. Implements an integer averaging spatial downsample where downsample x downsample pixels are reduced to 1.
downsample_t:
An integer factor to spatially downsample frames with. Takes the mean between sets of downsample_t frames.
n_rotations:
The number of ccw rotations to rotate the video.
rotate_rois_with_video:
If true, rotate all loaded rois by n_rotations as well. This parameter overrides roi rotations set by load_rois.
'''
print('\nInitializing Experiment\n-----------------------')
if isinstance(pathlist, str):
pathlist = [pathlist]
movie = seas.video.load(pathlist, downsample, downsample_t)
assert (len(movie.shape) == 3), 'File was not a 3 dimensional video.\n'
if np.any(np.isnan(movie)):
# if the video was already masked
roimask = np.zeros(movie[0].shape, dtype='uisnt8')
roimask[np.where(~np.isnan(movie[0]))] = 1
self.roimask = roimask
self.downsample = downsample
self.downsample_t = downsample_t
self.movie = movie
self.path = pathlist
self.n_rotations = n_rotations
self.rotate_rois_with_video = rotate_rois_with_video
# define default bounding box as full size video
self.bounding_box = np.array([[0, self.movie.shape[1]],
[0, self.movie.shape[2]]])
self.shape = self.bound_movie().shape
# if multiple experiments included, get a span string
# (i.e. 01, 02, 03, 04 - > 01-04)
experiments = sort_experiments(pathlist, verbose=False).keys()
spanstring = get_exp_span_string(experiments)
self.name = spanstring
self.dir = os.path.dirname(pathlist[0])
self.rotate()
def rotate(self):
'''
Rotates movie by self.n_rotations, and updates the shape and bounding box to reflect this change.
'''
if self.n_rotations > 0:
self.movie = rotate(self.movie, self.n_rotations)
self.bounding_box = np.array([[0, self.movie.shape[1]],
[0, self.movie.shape[2]]
]) #resets to whole movie
self.shape = self.bound_movie().shape
def load_rois(self, path, n_roi_rotations=0):
'''
Load rois set in an FIJI/ImageJ RoiSet.zip file to the experiment file, and creates a roimask based on the rois.
Arguments:
path:
The path to the .zip file.
n_roi_rotations:
The number of CCW rotations to apply to the roimask after loading. This argument is not used if rotate_rois_with_video was True when loading the experiment.
'''
if self.rotate_rois_with_video:
n_roi_rotations = self.n_rotations
rois = roi_loader(path)
self.n_roi_rotations = n_roi_rotations
# Store in class file
print(len(rois), 'ROIs found')
# resize (and flip) if necessary
if self.downsample is not False:
print('video was downsampled.. downsampling rois.')
for roi in rois:
rois[roi] = rois[roi] // self.downsample
self.rois = rois
# Initialize Empty Mask
roimask = np.zeros(self.shape[1:3], dtype='uint8')
# Add mask region from all rois
for i, roi in enumerate(rois):
roimask += make_mask(rois[roi], self.shape[1:3])
roimask[np.where(roimask > 1)] = 1
if roimask.sum().sum() == 0:
print('Roimask contains no ROI regions. Not storing..')
return
self.roimask = rotate(roimask, n_roi_rotations)
print('')
def load_meta(self, meta_path):
'''
Load metadata to the experiment file. This is not used in any decomposition, but provides a convenient way to save metadata along with the processed file.
Arguments:
meta_path:
The path to the metadata .yaml file.
'''
print('\nLoading Metadata\n-----------------------\n')
assert metapath.endswith('.yaml'), 'Metadata was not a valid yaml file.'
meta = read_yaml(meta_path)
self.meta = meta
def draw_bounding_box(self):
'''
Launches an opencv GUI to click and define a bounding box for the video. Click and drag to assign the bounding box borders.
'''
frame = self.movie[0, :, :].copy()
frame = rescale(frame, cap=False).astype('uint8')
ROI = draw_bounding_box(frame, required)
if ROI is not None:
self.bounding_box = ROI
self.shape = (self.shape[0], ROI[0][1] - ROI[0][0],
ROI[1][1] - ROI[1][0])
def define_mask_boundaries(self):
'''
Updates the experiment bounding_box to go up to the edge of the rois previously loaded by load_rois.
'''
assert hasattr(self, 'roimask'), ('Define roimask before '
'finding boundaries')
row, cols = np.nonzero(self.roimask)
ROI = np.array([[np.min(row), np.max(row)],
[np.min(cols), np.max(cols)]])
self.bounding_box = ROI
def bound_movie(self, movie=None, bounding_box=None):
'''
Returns the movie cropped by the bounding box.
'''
if bounding_box == None:
bounding_box = self.bounding_box
if movie is None:
movie = self.movie
ROI = bounding_box
return movie[:, ROI[0][0]:ROI[0][1], ROI[1][0]:ROI[1][1]]
def bound_mask(self, bounding_box=None):
'''
Returns the roimask cropped by the bounding box.
'''
try:
if bounding_box == None:
bounding_box = self.bounding_box
ROI = bounding_box
return self.roimask[ROI[0][0]:ROI[0][1], ROI[1][0]:ROI[1][1]]
except:
return None
def ica_project(self,
movie=None,
savedata=True,
calc_dfof=True,
del_movie=True,
n_components=None,
svd_multiplier=None,
suffix='',
output_folder=None,
mean_filter_method='wavelet',
low_cutoff=0.5):
'''
Apply an ica decomposition to the experiment. If rois and/or a bounding box have been defined, these will be used to crop the movie before filtration.
By default, results are all saved to a [experiment]_[parameters]_ica.hdf5 file in the same directory as the original video files.
Arguments:
movie:
The movie to apply ica decomposition to. If left blank, the movie cropped by the roimask and bounding box is used.
save_data:
Whether to save components to a file, or just return as a variable.
calc_dfof:
If true, calculate the dFoF before applying ICA decomposition. If false, ICA is computed on the raw movie.
del_movie:
If true, delete the original full movie array before decomposition to save memory.
n_components:
A specified number of components to project. If left as None, the svd_multiplier auto component selection is used.
svd_multiplier:
The factor to multiply by the detected SVD noise threshold while estimating the number of ICA components to identify. When left blank, the automatic value set in seas.ica.project is used.
suffix:
Optional suffix to append to the ica processed file.
output_folder:
By default, the results are saved to an [experiment]_ica.hdf5 file, in the same folder as the original video. If a different folder is specified by output_folder, the ica file will be saved there instead.
mean_filter_method:
Which method to use while filtering the mean. Default is highpass wavelet filter.
low_cutoff:
The lower cutoff for a highpass filter. Default is 0.5Hz.
Returns:
components: A dictionary containing all the results, metadata, and information regarding the filter applied.
mean:
the original video mean
roimask:
the mask applied to the video before decomposing
shape:
the original shape of the movie array
eig_mix:
the ICA mixing matrix
timecourses:
the ICA component time series
eig_vec:
the eigenvectors
n_components:
the number of components in eig_vec (reduced to only have 25% of total components as noise)
project_meta:
The metadata for the ica projection
expmeta:
All metadata created for this class
lag1:
the lag-1 autocorrelation
noise_components:
a vector (n components long) to store binary representation of which components were detected as noise
cutoff:
the signal-noise cutoff value
mean_filtered:
the filtered mean
mean_filter_meta:
metadata on how the mean filter was applied
if the n_components was automatically set, the following additional keys are also returned in components
svd_cutoff:
the number of components originally decomposed
lag1_full:
the lag-1 autocorrelation of the full set of components decomposed before cropping to only 25% noise components
svd_multiplier:
the svd multiplier value used to determine cutoff
'''
print('\nICA Projecting\n-----------------------')
if savedata:
suffix_list = []
if len(suffix) > 0:
suffix_list.append(suffix)
if self.downsample:
suffix_list.append(str(self.downsample) + 'xds')
if self.downsample_t:
suffix_list.append(str(self.downsample_t) + 'xtds')
if not calc_dfof:
suffix_list.append('raw')
if svd_multiplier is not None:
suffix_list.append(str(svd_multiplier) + 'svdmult')
if output_folder is None:
output_folder = os.path.dirname(self.path[0])
suffix_list.append('ica.hdf5')
suffix = '_'.join(suffix_list)
savepath = os.path.join(output_folder, self.name + '_' + suffix)
print('Saving ICA data to:', savepath)
else:
savepath = None
if savedata:
f = hdf5manager(savepath)
components = f.load() # should be empty if it didn't exist yet.
else:
components = {}
# Load all attributes of experiment class into expmeta dictionary
# to keep info in ica and filtered files.
ignore = ['movie', 'filtered', 'notifications']
expdict = self.__dict__
expmeta = {}
for key in expdict:
if key not in ignore:
expmeta[key] = expdict[key]
components['expmeta'] = expmeta
print('Saving keys under expmeta in PC components:')
for key in expmeta:
print(key)
if savedata:
f.save(components)
# calculate decomposition:
if 'eig_vec' and 'eig_mix' in components:
# if data was already in the save path, use it
print('Found ICA decomposition in components')
else:
if hasattr(self, 'roimask'):
roimask = self.bound_mask()
else:
roimask = None
if movie is None:
movie = self.bound_movie()
if calc_dfof:
movie = dfof(movie)
if del_movie:
print('Deleting original movie to save memory..')
del self.movie
#drop dimension and flip to prepare timecourse for ICA
shape = movie.shape
t, x, y = shape
vector = movie.reshape(t, x * y)
vector = vector.T # now vector is (x*y, t) for ICA along x*y dimension
print('M has been reshaped from {0} to {1}\n'.format(
movie.shape, vector.shape))
# run ICA projection
ica_project_kwargs = {'vector': vector, 'shape': shape}
if svd_multiplier is not None:
ica_project_kwargs['svd_multiplier'] = svd_multiplier
if roimask is not None:
ica_project_kwargs['roimask'] = roimask
if n_components is not None:
ica_project_kwargs['n_components'] = n_components
components = project(**ica_project_kwargs)
components['expmeta'] = expmeta
if savedata:
f.save(components)
# Calculate other relevant parameters
components['mean_filtered'] = filter_mean(
components['mean'],
filter_method=mean_filter_method,
low_cutoff=low_cutoff)
components['mean_filter_meta'] = {
'mean_filter_method': mean_filter_method,
'low_cutoff': low_cutoff
}
if savedata:
f.save({
'noise_components': components['noise_components'],
'cutoff': components['cutoff'],
'lag1': components['lag1'],
'mean_filtered': components['mean_filtered'],
'mean_filter_meta': components['mean_filter_meta'],
})
print('Saved all data to file:')
f.print()
return components
|
import os
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# transforms
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# datasets
trainset = torchvision.datasets.FashionMNIST('./data',
download=True,
train=True,
transform=transform)
testset = torchvision.datasets.FashionMNIST('./data',
download=True,
train=False,
transform=transform)
# dataloaders
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# constant for classes
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot')
# helper function to show an image
# (used in the `plot_classes_preds` function below)
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
|
from django.urls import reverse
from service_catalog.models.documentation import Doc
from tests.test_service_catalog.base_test_request import BaseTestRequest
class TestCustomerCatalogViews(BaseTestRequest):
def setUp(self):
super(TestCustomerCatalogViews, self).setUp()
self.client.login(username=self.standard_user, password=self.common_password)
self.new_doc = Doc.objects.create(title="test_doc", content="# tittle 1")
self.new_doc.services.add(self.service_test)
def _test_can_list_doc(self):
url = reverse('service_catalog:doc_list')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
self.assertTrue("table" in response.context)
self.assertEqual(response.context["table"].data.data.count(), 1)
def test_customer_can_list_doc(self):
self.client.login(username=self.standard_user, password=self.common_password)
self._test_can_list_doc()
def test_admin_can_list_doc(self):
self._test_can_list_doc()
def test_cannot_get_doc_list_when_logout(self):
self.client.logout()
url = reverse('service_catalog:doc_list')
response = self.client.get(url)
self.assertEqual(302, response.status_code)
def test_admin_can_list_admin_doc_list(self):
self.client.login(username=self.superuser, password=self.common_password)
url = reverse('admin:service_catalog_doc_changelist')
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_admin_can_edit_admin_doc(self):
self.client.login(username=self.superuser, password=self.common_password)
url = reverse('admin:service_catalog_doc_change', args=[self.new_doc.id])
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_customer_cannot_edit_admin_doc(self):
self.client.login(username=self.standard_user, password=self.common_password)
url = reverse('admin:service_catalog_doc_change', args=[self.new_doc.id])
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertTrue("next=/admin", response.url)
def test_cannot_edit_admin_doc_when_logout(self):
self.client.login(username=self.standard_user, password=self.common_password)
url = reverse('admin:service_catalog_doc_change', args=[self.new_doc.id])
response = self.client.get(url)
self.assertEqual(302, response.status_code)
def test_customer_cannot_list_admin_doc(self):
self.client.login(username=self.standard_user, password=self.common_password)
url = reverse('admin:service_catalog_doc_changelist')
response = self.client.get(url)
self.assertEqual(302, response.status_code)
self.assertTrue("next=/admin", response.url)
def test_get_doc_page(self):
response = self.client.get(reverse('service_catalog:doc_show', args=[self.new_doc.id]))
self.assertEqual(200, response.status_code)
|
#
# Copyright 2022 European Centre for Medium-Range Weather Forecasts (ECMWF)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
import logging
import polytope_server.common.config as polytope_config
from polytope_server import worker
from polytope_server.common.request import Request
class TestWorker:
def setup_method(self, method):
self.request = Request()
self.request.collection = "debug"
self.request.user_request = "hello_world" # all default
self.worker = worker.Worker(polytope_config.global_config)
def test_worker(self):
self.worker.process_request(self.request)
data = self.worker.staging.read(self.request.id)
logging.info("Size of data is {}".format(len(data)))
assert len(data) == 11
def test_worker_failed(self):
self.request.user_request = {"abcdef": 789}
try:
self.worker.process_request(self.request)
except Exception as e:
self.worker.on_request_fail(self.request, e)
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict
import threading
from towhee.dag.operator_repr import OperatorRepr
from towhee.utils import HandlerMixin
from towhee.engine.task import Task
# from towhee.engine.graph_context import GraphContext
from towhee.dataframe import DataFrame
from towhee.engine._operator_io import create_reader, create_writer
class OperatorContext(HandlerMixin):
"""
The OperatorContext manages an operator's input data and output data at runtime,
as well as the operators' dependency within a GraphContext.
The abstraction of OperatorContext hides the complexity of Dataframe management,
input iteration, and data dependency between Operators. It offers a Task-based
scheduling context.
Args:
op_repr: (OperatorRepr)
The operator representation
dataframes: (`dict` of `DataFrame`)
All the `DataFrames` in `GraphContext`
is_schedulable: (`bool`)
Whether the `OperatorContext` is schedulable.
There are special `OperatorContext`s that are not schedulable, such as
`_start_op`, `_end_op`.
"""
def __init__(
self,
op_repr: OperatorRepr,
dataframes: Dict[str, DataFrame],
is_schedulable: bool = True
):
self._repr = op_repr
self._is_schedulable = is_schedulable
# todo: GuoRentong, issue #114
inputs = list({dataframes[input['df']] for input in op_repr.inputs})
input_iter_type = op_repr.iter_info['type']
inputs_index = dict((item['name'], item['col'])
for item in op_repr.inputs)
self.inputs = inputs
self._reader = create_reader(inputs, input_iter_type, inputs_index)
outputs = list({dataframes[output['df']]
for output in op_repr.outputs})
self._writer = create_writer(outputs)
self.outputs = outputs
self._finished = False
self._has_tasks = True
self._taskid = 0
self._finished_task_count = 0
self._lock = threading.Lock()
self.add_handler_methods('op_start', 'op_finish', 'task_ready', 'task_start', 'task_finish')
self.add_task_finish_handler(self._write_outputs)
@property
def name(self):
return self._repr.name
@property
def is_schedulable(self) -> bool:
return self._is_schedulable
def pop_ready_tasks(self, n_tasks: int = 1) -> List:
"""
Pop n ready Tasks if any. The number of returned Tasks may be less than n
if there are not enough Tasks.
Return: a list of ready Tasks.
"""
ready_tasks = []
task_num = n_tasks
while task_num > 0:
op_input_params = self._reader.read()
if op_input_params:
task = self._create_new_task(op_input_params)
ready_tasks.append(task)
task_num -= 1
continue
if op_input_params is None:
self._has_tasks = False
if not self._has_tasks and self._taskid == self._finished_task_count:
self._writer.close()
self._finished = True
break
return ready_tasks
@property
def is_finished(self) -> bool:
# todo: GuoRentong. see issue #124
return self._finished
@property
def has_tasks(self) -> bool:
# todo: GuoRentong. see issue #124
return self._has_tasks
@property
def num_ready_tasks(self) -> int:
"""
Get the number of ready Tasks.
"""
return self._reader.size
@property
def num_finished_tasks(self) -> int:
"""
Get the number of finished tasks.
"""
raise NotImplementedError
# consider the thread-safe read write. This OperatorContext should be
# self._finished_tasks' only monifier.
def _write_outputs(self, task: Task):
with self._lock:
self._finished_task_count += 1
self._writer.write(task.outputs)
def _next_task_inputs(self):
"""
Manage the preparation works for an operator's inputs
Returns one task's inputs on each call.
Return: a list of inputs, list element can be scalar or Array.
"""
raise NotImplementedError
def _create_new_task(self, inputs: Dict[str, any]):
with self._lock:
t = Task(self.name, self._repr.function,
self._repr.init_args, inputs, self._taskid)
self._taskid += 1
t.add_task_finish_handler(self.task_finish_handlers)
return t
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import math
from . import utilities
from . import validations
from . import activations
from . import initializers
from .utilities import *
from .validations import *
from .math import *
from .find_neighbors import *
|
#! /usr/bin/python3
r'''###############################################################################
###################################################################################
#
#
# GPT-2 with Relative Global Attention
# Version 0.5
#
# PLEASE NOTE THAT THIS IS A WORK IN PROGRESS
# CHECK BACK FOR UPDATES SOON
#
# Based upon a source-code of Sashmark97:
# https://github.com/Sashmark97/midigen
#
# Project Los Angeles
# Tegridy Code 2021
#
# https://github.com/Tegridy-Code/Project-Los-Angeles
#
#
###################################################################################
###################################################################################
# Copyright 2021 Project Los Angeles / Tegridy Code
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################################################
###################################################################################'''
########################################################
#
# Critical dependencies/requirements:
#
# pip install torch
# pip install tqdm
#
########################################################
print('Loading GPT2-RGA Module...')
########################################################
import glob
import os
import sys
import math
import time
import random
import pickle
import joblib
from tqdm import tqdm
import torch
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
import torch.nn as nn
from torch.nn import functional as F
from torch.optim.lr_scheduler import LambdaLR
from torch.nn.modules.normalization import LayerNorm
from torch.nn.parameter import Parameter
from torch.nn.modules.linear import Linear
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.normalization import LayerNorm
from torch.nn.init import *
from torch.nn.functional import linear, softmax, dropout
########################################################
# Constants
SEQUENCE_START = 0
RANGE_NOTE_ON = 128
RANGE_NOTE_OFF = 128
RANGE_VEL = 32
RANGE_TIME_SHIFT = 100
# Taken from the paper
ADAM_BETA_1 = 0.9
ADAM_BETA_2 = 0.98
ADAM_EPSILON = 10e-9
LR_DEFAULT_START = 1.0
SCHEDULER_WARMUP_STEPS = 4000
# LABEL_SMOOTHING_E = 0.1
# DROPOUT_P = 0.1
TOKEN_END = RANGE_NOTE_ON + RANGE_NOTE_OFF + RANGE_VEL + RANGE_TIME_SHIFT
TOKEN_PAD = TOKEN_END + 1
VOCAB_SIZE = TOKEN_PAD + 1
TORCH_FLOAT = torch.float32
TORCH_INT = torch.int32
TORCH_LABEL_TYPE = torch.long
PREPEND_ZEROS_WIDTH = 4
TORCH_CPU_DEVICE = torch.device("cpu")
USE_CUDA = 1
TORCH_CUDA_DEVICE = torch.device("cuda:0")
#====
weight_modulus = 1
print_modulus = 1
n_workers = 1
lr = None
ce_smoothing = None
batch_size = 4
random_seq = True
epochs = 5
rpr = False #'store_true'
max_seq = 1024
n_layers = 6
num_heads = 8
d_model = 512
dim_feedforward = 512
dropout_prob = 0.1
########################################################
def cpu_device():
return TORCH_CPU_DEVICE
def get_device():
if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):
return TORCH_CPU_DEVICE
else:
return TORCH_CUDA_DEVICE
def train(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, num_iters=-1):
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
loss_hist = []
out = -1
model.train()
with tqdm(total=len(dataloader)) as bar_train:
for batch_num, batch in enumerate(dataloader):
time_before = time.time()
opt.zero_grad()
x = batch[0].to(get_device())
tgt = batch[1].to(get_device())
y, _ = model(x)
y = y.reshape(y.shape[0] * y.shape[1], -1)
tgt = tgt.flatten()
out = loss.forward(y, tgt)
out.backward()
opt.step()
if(lr_scheduler is not None):
lr_scheduler.step()
time_after = time.time()
time_took = time_after - time_before
lr = opt.param_groups[0]['lr']
bar_train.set_description(f'Epoch: {cur_epoch} Loss: {float(out):.4} LR: {float(lr):.8}')
bar_train.update(1)
loss_hist.append(out.item())
if batch_num == num_iters:
break
return loss_hist
def compute_epiano_accuracy(out, tgt):
softmax = nn.Softmax(dim=-1)
out = torch.argmax(softmax(out), dim=-1)
out = out.flatten()
tgt = tgt.flatten()
mask = (tgt != TOKEN_PAD)
out = out[mask]
tgt = tgt[mask]
if(len(tgt) == 0):
return 1.0
num_right = (out == tgt)
num_right = torch.sum(num_right).type(TORCH_FLOAT)
acc = num_right / len(tgt)
return acc
def eval_model(model, dataloader, loss, num_iters=-1):
model.eval()
avg_acc = -1
avg_loss = -1
with torch.set_grad_enabled(False):
n_test = len(dataloader)
sum_loss = 0.0
sum_acc = 0.0
with tqdm(total=len(dataloader)) as bar_eval:
for batch in dataloader:
x = batch[0].to(get_device())
tgt = batch[1].to(get_device())
y, _ = model(x)
sum_acc += float(compute_epiano_accuracy(y, tgt))
y = y.reshape(y.shape[0] * y.shape[1], -1)
tgt = tgt.flatten()
out = loss.forward(y, tgt)
sum_loss += float(out)
bar_eval.set_description(f'Loss val: {float(out):.4} Acc: {float(sum_acc / (bar_eval.n + 1)):.4}')
bar_eval.update(1)
if bar_eval.n == num_iters:
break
avg_loss = sum_loss / n_test
avg_acc = sum_acc / n_test
return avg_loss, avg_acc
class LrStepTracker:
def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):
# Store Values
self.warmup_steps = warmup_steps
self.model_dim = model_dim
self.init_steps = init_steps
# Begin Calculations
self.invsqrt_dim = (1 / math.sqrt(model_dim))
self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))
# step
def step(self, step):
step += self.init_steps
if(step <= self.warmup_steps):
return self.invsqrt_dim * self.invsqrt_warmup * step
else:
invsqrt_step = (1 / math.sqrt(step))
return self.invsqrt_dim * invsqrt_step
# get_lr
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
########################################################
#@title Functions
class EPianoDataset(Dataset):
"""
----------
Author: Damon Gwinn
----------
Pytorch Dataset for the Maestro e-piano dataset (https://magenta.tensorflow.org/datasets/maestro).
Recommended to use with Dataloader (https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader)
Uses all files found in the given root directory of pre-processed (preprocess_midi.py)
Maestro midi files.
----------
"""
def __init__(self, midi_list, max_seq=2048, random_seq=True):
self.max_seq = max_seq
self.random_seq = random_seq
self.data_files = midi_list
def __len__(self):
"""
----------
Author: Damon Gwinn
----------
How many data files exist in the given directory
----------
"""
return len(self.data_files)
def __getitem__(self, idx):
"""
----------
Author: Damon Gwinn
----------
Gets the indexed midi batch. Gets random sequence or from start depending on random_seq.
Returns the input and the target.
----------
"""
raw_mid = torch.tensor(self.data_files, dtype=TORCH_LABEL_TYPE, device=cpu_device())
x, tgt = process_midi(raw_mid, self.max_seq, self.random_seq)
return x, tgt
def process_midi(raw_mid, max_seq, random_seq):
"""
----------
Author: Damon Gwinn
----------
Takes in pre-processed raw midi and returns the input and target. Can use a random sequence or
go from the start based on random_seq.
----------
"""
x = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
tgt = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device())
raw_len = len(raw_mid)
full_seq = max_seq + 1 # Performing seq2seq
if(raw_len == 0):
return x, tgt
start = 0
end = 0
# Randomly selecting a range
if (random_seq):
end_range = raw_len - full_seq
start = random.randint(abs(SEQUENCE_START), abs(end_range))
# Always taking from the start to as far as we can
else:
start = SEQUENCE_START
end = start + full_seq
data = raw_mid[start:end]
x = data[:max_seq]
tgt = data[1:full_seq]
return x, tgt
########################################################
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.enable_rpr = config.enable_rpr
if config.enable_rpr:
self.attn = MultiheadAttentionRPR(config.n_embd, config.n_head, config.attn_pdrop, er_len=config.er_len)
else:
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, config.dim_feedforward),
nn.GELU(),
nn.Linear(config.dim_feedforward, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, mask=None):
if self.enable_rpr:
x = x + self.attn(self.ln1(x), self.ln1(x), self.ln1(x), attn_mask=mask)[0]
else:
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class MultiheadAttentionRPR(nn.Module):
"""
----------
Author: Pytorch
Modified: Damon Gwinn
----------
For Relative Position Representation support (https://arxiv.org/abs/1803.02155)
https://pytorch.org/docs/1.2.0/_modules/torch/nn/modules/activation.html#MultiheadAttention
Modification to add RPR embedding Er and call custom multi_head_attention_forward_rpr
----------
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
add_zero_attn=False, kdim=None, vdim=None, er_len=None):
super(MultiheadAttentionRPR, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
# Adding RPR embedding matrix
if(er_len is not None):
self.Er = Parameter(torch.rand((er_len, self.head_dim), dtype=torch.float32))
else:
self.Er = None
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None,
need_weights=True, attn_mask=None):
if hasattr(self, '_qkv_same_embed_dim') and self._qkv_same_embed_dim is False:
# return F.multi_head_attention_forward(
# query, key, value, self.embed_dim, self.num_heads,
# self.in_proj_weight, self.in_proj_bias,
# self.bias_k, self.bias_v, self.add_zero_attn,
# self.dropout, self.out_proj.weight, self.out_proj.bias,
# training=self.training,
# key_padding_mask=key_padding_mask, need_weights=need_weights,
# attn_mask=attn_mask, use_separate_proj_weight=True,
# q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
# v_proj_weight=self.v_proj_weight)
return multi_head_attention_forward_rpr(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, rpr_mat=self.Er)
else:
if not hasattr(self, '_qkv_same_embed_dim'):
warnings.warn('A new version of MultiheadAttention module has been implemented. \
Please re-train your model with the new module',
UserWarning)
# return F.multi_head_attention_forward(
# query, key, value, self.embed_dim, self.num_heads,
# self.in_proj_weight, self.in_proj_bias,
# self.bias_k, self.bias_v, self.add_zero_attn,
# self.dropout, self.out_proj.weight, self.out_proj.bias,
# training=self.training,
# key_padding_mask=key_padding_mask, need_weights=need_weights,
# attn_mask=attn_mask)
return multi_head_attention_forward_rpr(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, rpr_mat=self.Er)
# multi_head_attention_forward_rpr
def multi_head_attention_forward_rpr(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
static_k=None, # type: Optional[Tensor]
static_v=None, # type: Optional[Tensor]
rpr_mat=None
):
'''
print('Query: ', query.shape, 'Key: ', key.shape, 'Value: ', value.shape)
print('Equal: ', torch.equal(query, key) and torch.equal(key, value))
print('embed_dim_to_check: ', embed_dim_to_check)
print('num_heads:', num_heads)
print('in_proj_weight: ', in_proj_weight.shape)
print('in_proj_bias: ', in_proj_bias.shape)
print('bias_k:', bias_k, 'bias_v', bias_v)
print('add_zero_attn:', add_zero_attn)
print('dropout_p: ', dropout_p)
print('out_proj_weight: ', out_proj_weight.shape)
print('out_proj_bias:', out_proj_bias.shape)
print('training:', training)
print('need_weights:', need_weights)
print('use_separate_proj_weight:', use_separate_proj_weight)
print('key_padding_mask:', key_padding_mask)
print('attn_mask:', attn_mask.shape)
print('q_proj_weight:', q_proj_weight)
print('k_proj_weight:', k_proj_weight)
print('v_proj_weight:', v_proj_weight)
print('static_k:', static_k)
print('static_v:', static_v)
print('rpr_mat:', rpr_mat.shape)
'''
"""
----------
Author: Pytorch
Modified: Damon Gwinn
----------
For Relative Position Representation support (https://arxiv.org/abs/1803.02155)
https://pytorch.org/docs/1.2.0/_modules/torch/nn/functional.html
Modification to take RPR embedding matrix and perform skew optimized RPR (https://arxiv.org/abs/1809.04281)
----------
"""
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
qkv_same = torch.equal(query, key) and torch.equal(key, value)
kv_same = torch.equal(key, value)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if use_separate_proj_weight is not True:
if qkv_same:
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif kv_same:
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask,
torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
######### ADDITION OF RPR ###########
if(rpr_mat is not None):
rpr_mat = _get_valid_embedding(rpr_mat, q.shape[1], k.shape[1])
qe = torch.einsum("hld,md->hlm", q, rpr_mat)
srel = _skew(qe)
attn_output_weights += srel
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
def _get_valid_embedding(Er, len_q, len_k):
"""
----------
Author: Damon Gwinn
----------
Gets valid embeddings based on max length of RPR attention
----------
"""
len_e = Er.shape[0]
start = max(0, len_e - len_q)
return Er[start:, :]
def _skew(qe):
"""
----------
Author: Damon Gwinn
----------
Performs the skew optimized RPR computation (https://arxiv.org/abs/1809.04281)
----------
"""
sz = qe.shape[1]
mask = (torch.triu(torch.ones(sz, sz).to(qe.device)) == 1).float().flip(0)
qe = mask * qe
qe = F.pad(qe, (1,0, 0,0, 0,0))
qe = torch.reshape(qe, (qe.shape[0], qe.shape[2], qe.shape[1]))
srel = qe[:, 1:, :]
return srel
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.enable_rpr = config.enable_rpr
self.block_size = config.block_size
self.apply(self._init_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, idx, targets=None):
b, t = idx.size()
if self.enable_rpr:
mask = generate_square_subsequent_mask(t).to(get_device())
else:
mask = None
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
if self.enable_rpr:
x = x.permute(1,0,2)
for module in self.blocks:
x = module(x, mask=mask)
x = x.permute(1,0,2)
else:
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
if self.enable_rpr:
del mask
return logits, loss
def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0, temperature=0):
assert (not self.training), "Cannot generate while in training mode"
print("Generating sequence of max length:", target_seq_length)
gen_seq = torch.full((1,target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())
num_primer = len(primer)
gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())
cur_i = num_primer
while(cur_i < target_seq_length):
logits, _ = self.forward(gen_seq[..., :cur_i])
y = self.softmax(logits)[..., :TOKEN_END]
token_probs = y[:, cur_i-1, :] / (temperature if temperature > 0 else 1.)
if(beam == 0):
beam_ran = 2.0
else:
beam_ran = random.uniform(0,1)
if(beam_ran <= beam_chance):
token_probs = token_probs.flatten()
top_res, top_i = torch.topk(token_probs, beam)
beam_rows = top_i // VOCAB_SIZE
beam_cols = top_i % VOCAB_SIZE
gen_seq = gen_seq[beam_rows, :]
gen_seq[..., cur_i] = beam_cols
else:
distrib = torch.distributions.categorical.Categorical(probs=token_probs)
next_token = distrib.sample()
gen_seq[:, cur_i] = next_token
# Let the transformer decide to end if it wants to
if(next_token == TOKEN_END):
print("Model called end of sequence at:", cur_i, "/", target_seq_length)
break
cur_i += 1
if(cur_i % 50 == 0):
print(cur_i, "/", target_seq_length)
return gen_seq[:, :cur_i]
def generate_square_subsequent_mask(sz: int) -> Tensor:
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, dim_feedforward, enable_rpr=False, er_len=None, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
self.dim_feedforward = dim_feedforward
self.enable_rpr = enable_rpr
self.er_len = er_len
for k,v in kwargs.items():
setattr(self, k, v)
import logging
logger = logging.getLogger(__name__)
########################################################
def Plot_Losses(losses):
pass
########################################################
print('GPT2-RGA loading complete!')
print('Enjoy!')
########################################################
########################################################
|
#!/usr/bin/env python3
import numpy as np
from sklearn import preprocessing
print('Label Encoding')
# Sample input labels
input_labels = ['red','black','red','green','black','yellow','white']
# Creating the label encoder
encoder = preprocessing.LabelEncoder()
encoder.fit(input_labels)
# encoding a set of labels
test_labels = ['green','red','black']
encoded_values = encoder.transform(test_labels)
print("\nLabels =", test_labels)
print("Encoded values =", list(encoded_values))
# decoding a set of values
encoded_values = [3,0,4,1]
decoded_list = encoder.inverse_transform(encoded_values)
print("\nEncoded values =", encoded_values)
print("\nDecoded labels =", list(decoded_list))
|
'''
Code for the PyTorch implementation of
"DeepFocus: a Few-Shot Microscope Slide Auto-Focus using a Sample-invariant CNN-based Sharpness Function"
Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
Written by Adrian Shajkofci <[email protected]>,
All rights reserved.
This file is part of DeepFocus.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of mosquitto nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import numpy as np
from toolbox import gaussian_kernel
from toolbox import convolve
from toolbox import rand_int
from toolbox import center_crop_pixel, scale
from toolbox import noisy
from toolbox import random_crop
from toolbox import plot_images
from toolbox import pickle_save
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import random
import scipy
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from scipy.constants import golden_ratio
from numba import jit
from lmfit.models import GaussianModel, LinearModel, MoffatModel
from lmfit.model import ModelResult
from scipy import optimize
import pywt
from skimage.filters import sobel, rank, laplace
from skimage.morphology import square
from skimage.transform import resize
from unet_detector import *
from sklearn.cluster import MeanShift
import logging
logging.basicConfig(
format="%(asctime)s [FIT] %(message)s".format(00),
handlers=[
logging.FileHandler("output_log_{}.log".format(00)),
logging.StreamHandler()
])
log = logging.getLogger('')
log.setLevel(logging.INFO)
learn = get_learner()
class Calibration:
def __init__(self):
self.gaussian2_center = None
self.peak_center = None
self.gaussian2_sigma = None
self.peak_sigma = None
self.c = None
self.calibration = None
self.params = None
self.focus_map_1d = None
self.z = None
self.mode = "model"
def load(self, input):
self.peak_center = input[0]
self.peak_sigma = input[1]
self.peak_beta = input[8]
self.gaussian2_center = input[2]
self.gaussian2_sigma = input[3]
self.c = input[4]
self.params = input[5]
self.focus_map_1d = input[6]
self.z = input[7]
#self.params['peak_center'].value = 0
#self.params['gaussian2_center'].value = 0
self.calibration = ModelResult(get_model(), self.params)
def get_width(self):
# for moffat function
return 2.0 * self.peak_sigma * np.sqrt(2**(1/self.peak_beta) - 1)
def save(self):
return [self.peak_center, self.peak_sigma, self.gaussian2_center, self.gaussian2_sigma, self.c, self.params, self.focus_map_1d, self.z, self.peak_beta]
def eval(self, x, mode = None):
if self.mode == 'model' and (mode is None or mode == 'model'):
return self.calibration.eval(self.calibration.params, x=x+self.peak_center*1.0) # I don't know if we should center it here and remove the calculations about that elsewhere
else:
inter_func = interp1d(self.z, self.focus_map_1d[:], kind='linear', bounds_error=False,
fill_value=(self.focus_map_1d[0], self.focus_map_1d[-1]))
return inter_func(x + self.peak_center*1.0)#inter_func(x + self.z[np.argmin(self.focus_map_1d)])
class ImageStack:
def _init_(self):
self.image_stack = None
self.width = None
self.height = None
self.z_positions = None
self.focus_map = None
self.downsample = None
def add_image_to_stack(self, image, z_position, update_focus_map=True):
self.image_stack = np.concatenate((self.image_stack, image), axis=0)
self.z_positions = np.concatenate((self.z_positions, [z_position]), axis=0)
log.info('z positions shape {} (last z = {})'.format(self.z_positions.shape, self.z_positions[-1]))
if update_focus_map:
log.info('Set focus map from new image ...')
focus_map = get_focus_map_from_stack(image, downsample=self.downsample, num_iterations=1, gain_sigma=0)
log.info('Format = {} Score mean of all map = {}'.format( focus_map.shape, focus_map.mean()))
self.focus_map = np.concatenate((self.focus_map, focus_map), axis=0)
def set_image_stack(self, image_stack, width, height, downsample, z_positions):
self.image_stack = image_stack
self.width = width
self.height = height
self.z_positions = np.atleast_1d(np.asarray(z_positions))
log.info('z position shape {}'.format(self.z_positions.shape))
self.downsample = downsample
log.info('Set focus map from stack ({} images)...'.format(self.get_num_z()))
self.focus_map = get_focus_map_from_stack(self.image_stack, downsample=downsample, num_iterations=1, gain_sigma=0)
def get_max_z(self):
return np.max(self.z_positions)
def get_min_z(self):
return np.min(self.z_positions)
def get_num_z(self):
return self.image_stack.shape[0]
def get_image_stack(self):
return self.image_stack, np.linspace(self.get_min_z(), self.get_max_z(), self.get_num_z()), self.focus_map
def get_focus_map(self):
return self.focus_map
def get_z_positions(self):
return self.z_positions
def get_resized_focus_map(self):
return np.asarray([resize(self.focus_map[i], (self.width, self.height), order=0) for i in range(self.get_num_z())])
def is_in_roi(self, roi, _x, _y):
coeff_x = self.width / (self.focus_map.shape[0]+1)
roi_transformed = roi // coeff_x
#print('roi trans {} x {} y {}'.format(roi_transformed, _x, _y))
return (_x >= roi_transformed[0] and _x <= roi_transformed[2]) and (_y >= roi_transformed[1] and _y <=roi_transformed[3])
def print_focus_map(self):
plot_images(self.focus_map)
def print_data(self):
plot_images(self.image_stack)
def CMSL(img, window_size):
"""
Contrast Measure based on squared Laplacian according to
'Robust Automatic Focus Algorithm for Low Contrast Images
Using a New Contrast Measure'
by Xu et Al. doi:10.3390/s110908281
window: window size= window X window"""
ky1 = np.array(([0.0, -1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]))
ky2 = np.array(([0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]))
kx1 = np.array(([0.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [0.0, 0.0, 0.0]))
kx2 = np.array(([0.0, 0.0, 0.0], [0.0, 1.0, -1.0], [0.0, 0.0, 0.0]))
dst = np.abs(scipy.ndimage.filters.convolve(img, kx1, mode='reflect')) + np.abs(scipy.ndimage.filters.convolve(img, kx2, mode='reflect'))\
+ np.abs(scipy.ndimage.filters.convolve(img, ky1, mode='reflect')) + np.abs(scipy.ndimage.filters.convolve(img, ky2, mode='reflect'))
return rank.mean(dst//dst.max(), selem=square(window_size))
def wavelet(img):
#http://tonghanghang.org/pdfs/icme04_blur.pdf
c = pywt.wavedec2(img, 'db2', mode='periodization', level=1)
# normalize each coefficient array independently for better visibility
d = np.sqrt((c[1][0]/np.abs(c[1][0]).max())**2 + (c[1][1]/np.abs(c[1][1]).max())**2 + (c[1][2]/np.abs(c[1][2]).max())**2)
return resize(d, (img.shape[0], img.shape[1]))
def wavelet_liebling(img):
#https://www.osapublishing.org/josaa/abstract.cfm?uri=josaa-21-12-2424
c = pywt.wavedec2(img, 'db2', mode='symmetric', level=pywt.dwtn_max_level(img.shape, 'db2'))
c, slices = pywt.coeffs_to_array(c)
c = np.abs(c.flatten())
c /= c.sum()
c = np.sort(c)[::-1]
_sum = 0
for i in range(c.shape[0]):
_sum += c[i]
if _sum > 0.95:
break
#i = i/c.shape[0]
i = float(i)
return np.ones((img.shape[0], img.shape[1]))*i
def LAPV(img):
"""Implements the Variance of Laplacian (LAP4) focus measure
operator. Measures the amount of edges present in the image.
:param img: the image the measure is applied to
:type img: numpy.ndarray
:returns: numpy.float32 -- the degree of focus
"""
return np.std(laplace(img)) ** 2
def SML(img, window_size, threshold):
"""
Sum of modified Laplacian according to
'Depth Map Estimation Using Multi-Focus Imaging'
by Mendapara
"""
# kernels in x- and y -direction for Laplacian
ky = np.array(([0.0, -1.0, 0.0], [0.0, 2.0, 0.0], [0.0, -1.0, 0.0]))
kx = np.array(([0.0, 0.0, 0.0], [-1.0, 2.0, -1.0], [0.0, 0.0, 0.0]))
dst = np.abs(scipy.ndimage.filters.convolve(img, ky, mode='reflect')) + np.abs(scipy.ndimage.filters.convolve(img, kx, mode='reflect'))
# sum up all values that are bigger than threshold in window
dst = np.clip(dst, threshold, dst.max())
# return thresholded image summed up in each window:
return dst
def tenengrad1(img, window_size, threshold):
"""
Tenengrad2b: squared gradient absolute thresholded and
summed up in each window
according to
'Autofocusing Algorithm Selection in Computer Microscopy'
by Sun et Al.
"""
# calculate gradient magnitude:
dst = sobel(img)
dst = np.clip(dst, threshold, dst.max())
# return thresholded image summed up in each window:
return rank.mean(dst, selem=square(window_size))
def get_hpf_image(image=None, size=128, method = 'hpf'):
if method is 'hpf':
kernel = [1, 0, -1]
output = scipy.ndimage.filters.convolve1d(image, kernel, mode='reflect')**2
elif method is 'tenengrad1':
output = tenengrad1(image, 7, 0)
elif method is 'CMSL':
output = CMSL(image, 3)
elif method is 'wavelet':
output = wavelet(image)
elif method is 'SML':
output = 1/SML(image,4,0)
elif method is 'wavelet_liebling':
output = wavelet_liebling(image)
else:
output = image
x = size
y = size
im = output[0:output.shape[0]//size * size, 0:output.shape[1]//size * size]
tile_dataset = []
y_size = 0
i = 0
while x <= im.shape[0]:
x_size = 0
while y <= im.shape[1]:
a = im[x - size:x, y - size:y]
score = a[:].sum().sum()
if method == 'LAPV':
score = 1/LAPV(a)
#elif method == 'wavelet_liebling':
# score = wavelet_liebling(a)
tile_dataset.append(score)
y += size
x_size += 1
i += 1
y = size
y_size += 1
x += size
final = np.reshape(tile_dataset, (x_size, y_size))
#plt.imshow(final)
#plt.figure()
#plt.imshow(output)
return final
def get_synthetic_image(flip = False):
image = io.imread('data/texture.png', as_gray=True)
square_size = np.min(image.shape) // 2
image = random_crop(image, square_size)
#image = image [:square_size, :square_size]
if flip:
image = np.flip(image, axis=0)
return image
def blur_image_stack(image, num_z, min_z_calib = None, max_z_calib = None, z_focus=0, noise_sigma=0.0, input_noise = 0.0, width_coeff = 1.0):
im_size = image.shape[0]
#kernels = np.zeros((im_size, num_z, num_z))
log.info('Generating a blurred stack from {} to {} with {} images and centered at z={}.'.format(min_z_calib, max_z_calib, num_z, z_focus))
kernels = []
z_coeff = 1.7*width_coeff
noise = np.random.normal(0, noise_sigma, num_z)
kernel_size = im_size // 2 + 1
if kernel_size % 2 == 0:
kernel_size += 1
if num_z == 1:
dist = abs(float(max_z_calib-z_focus) * z_coeff)
dist += noise[0]
kernels.append(gaussian_kernel(kernel_size, fwhmx=dist, fwhmy=dist) * (im_size ** 2))
else:
z_list = np.linspace (min_z_calib-z_focus+1, max_z_calib-z_focus, num_z).tolist()
for z_idx, z in enumerate(z_list):
if not isinstance(z, float):
z = z[0]
dist = np.abs(z*z_coeff)
dist += noise[z_idx]
kernels.append(gaussian_kernel(kernel_size, fwhmx=dist, fwhmy=dist) * (im_size ** 2))
#plot_images(kernels)
all_images = []
i = 0
uni = np.random.uniform(input_noise // 2, input_noise * 2, len(kernels))
for kernel in kernels:
c = convolve(image, kernel, padding='reflect')
c = noisy(c, 'gauss', uni[i])
c = c.clip(0.01,0.95)
i +=1
all_images.append(center_crop_pixel(c,image.shape[0]))
#plot_images(all_images)
#plt.show()
return np.asarray(all_images), np.linspace(min_z_calib, max_z_calib, num_z)
def get_focus_map_from_stack(stack=None, downsample=8, num_iterations=1, gain_sigma=0.0):
'''
From a stack of image, get the focus map and return it as a stack
'''
focusmap = test_unet(learn, stack, downsample=downsample)
return focusmap[:,0,:,:]
def get_model():
return MoffatModel(prefix='peak_') + GaussianModel(prefix='gaussian2_') + LinearModel(prefix='constant_')
def create_calibration_curve_stack(image_stack, z_size_high = 200, std_deviation = None, center_method = 'gaussian'):
'''
Creates the calibration curve from the input focus map
:param focus_map_4d: # z * x * y * c focus map
:param min_z:
:param max_z:
:param z_calibration:
:param z_size_high:
:param std_deviation:
:param center_method:
:return:
'''
focus_map_4d = image_stack.get_focus_map()
min_z = image_stack.get_min_z()
max_z = image_stack.get_max_z()
z_calibration = image_stack.get_z_positions()
return create_calibration_curve(focus_map_4d, min_z, max_z, z_calibration, z_size_high, std_deviation, center_method)
def create_calibration_curve(focus_map_4d, min_z, max_z, z_calibration, z_size_high = 200, std_deviation = None, center_method = 'gaussian'):
'''
Creates the calibration curve from the input focus map
:param focus_map_4d: # z * x * y * c focus map
:param min_z:
:param max_z:
:param z_calibration:
:param z_size_high:
:param std_deviation:
:param center_method:
:return:
'''
if len(focus_map_4d.shape) == 3:
focus_map_4d = focus_map_4d[:,:,:,np.newaxis]
focus_map_4d = focus_map_4d.reshape(focus_map_4d.shape[0], focus_map_4d.shape[1]*focus_map_4d.shape[2], focus_map_4d.shape[3])
log.info('Focus map 4D shape :{}'.format(focus_map_4d.shape))
## REMOVE OUTLIERS
std = focus_map_4d.std(axis=1)
mean = focus_map_4d.mean(axis=1)
focus_map_3d = []
# We filter pixels to use for the mean.
for x in range(focus_map_4d.shape[1]):
if np.all(np.abs(focus_map_4d[:,x,:] - mean) < 3*std):
focus_map_3d.append(focus_map_4d[:,x,:])
focus_map_3d = np.asarray(focus_map_3d)
focus_map_3d = focus_map_3d.swapaxes(0,1)
# we average all points in the image
focus_map_2d = np.median(focus_map_3d, axis=(1))
# We average over the features
data = focus_map_2d.mean(axis=1)
#std_deviation = np.std(focus_map_3d, axis=(1)).mean(axis=1)
log.info('Data shape: {}'.format(data.shape))
#inter_func = interp1d(z_calibration, data, kind='linear', bounds_error=False, fill_value=(data[0],data[-1]))
calibration = Calibration()
calibration.z = z_calibration
calibration.focus_map_1d = data
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
if center_method == 'gaussian':
model = get_model()
start_center = z_calibration[np.argmin(data)]
weights = np.ones(data.shape[0]) * np.array([gaussian(z_calibration[x], start_center, (max_z-min_z)/10.0) for x in range(data.shape[0])])# * 1.0/(1 + np.abs(np.arange(0, data.shape[0]) - float(np.argmin(data)))**0.5)
#weights[np.argmin(focus_map_2d[:,0])-3:np.argmin(focus_map_2d[:,0])+4] *= 3
if std_deviation is not None:
weights /= np.clip(scale(std_deviation), 0.01, 1.0)
weights = scale(weights)
#weights = np.ones(data.shape[0])
log.info('Center started at {}'.format(start_center))
params = model.make_params(constant_intercept=data.max(), constant_slope = 0.0, weights = weights,
peak_center=start_center,
peak_sigma=(max_z-min_z)/5.0,
peak_amplitude=(data.min()-data.max()),
gaussian2_sigma=(max_z-min_z),
gaussian2_amplitude = 0.0, gaussian2_center = start_center)
log.info\
('min z : {}, max z = {}'.format(min_z, max_z))
params['peak_center'].min = min_z
params['peak_center'].max = max_z
params['peak_amplitude'].max = 0.0#(data.min()-data.max())
params['gaussian2_amplitude'].max = 0.0
#params['peak_center'].vary = False
params['gaussian2_center'].min = min_z
params['gaussian2_center'].max = max_z
params['peak_sigma'].min = 10.0
params['peak_sigma'].max = max_z-min_z
#params['gaussian2_sigma'].min = 10.0
params['gaussian2_sigma'].max = max_z-min_z
params['constant_slope'].vary = False
params['constant_intercept'].min = 0
params['constant_intercept'].max = data.max()
params['gaussian2_amplitude'].vary = False
#params['peak_amplitude']
#mi = lmfit.minimize(model, params, method='Nelder', reduce_fcn='neglogcauchy')
result = model.fit(data, params, x=z_calibration, method='nelder')
log.info(result.fit_report())
sigma_1 = result.params['peak_amplitude'].value
sigma_2 = result.params['gaussian2_amplitude'].value
calibration.params = result.params
calibration.calibration = result
if abs(sigma_1) > abs(sigma_2):
calibration.peak_center = result.params['peak_center'].value
calibration.peak_beta = result.params['peak_beta'].value
calibration.gaussian2_center = result.params['gaussian2_center'].value
calibration.peak_sigma = result.params['peak_sigma'].value
calibration.gaussian2_sigma = result.params['gaussian2_sigma'].value
calibration.peak_amplitude = result.params['peak_amplitude'].value
calibration.gaussian2_amplitude = result.params['gaussian2_amplitude'].value
else:
log.info('Gaussian 2 is chosen as the peak !!')
exit()
calibration.peak_center = result.params['gaussian2_center'].value
calibration.gaussian2_center = result.params['peak_center'].value
calibration.peak_sigma = result.params['gaussian2_sigma'].value
calibration.gaussian2_sigma = result.params['peak_sigma'].value
calibration.peak_amplitude = result.params['gaussian2_amplitude'].value
calibration.gaussian2_amplitude = result.params['peak_amplitude'].value
calibration.c = result.params['constant_intercept'].value
log.info('Found mu = {}, sigma = {}, c = {}, width= {}'.format(calibration.peak_center, calibration.peak_sigma, calibration.c, calibration.get_width()))
elif center_method == 'polynomial':
yp = np.linspace(min_z, max_z, z_size_high)
fitted_curve = np.polyfit(z_calibration, focus_map_2d[:, 0], 5)
p = np.poly1d(fitted_curve)
calibration.peak_center = yp[np.argmin(p(yp))]
elif center_method == 'minimum':
calibration.peak_center = np.min(focus_map_2d[:,0])
yp = np.linspace(min_z, max_z, z_size_high)
plt.figure()
plt.plot(z_calibration, data, '.')
plt.plot(yp, result.eval(result.params, x=yp))
plt.plot(z_calibration, weights)
plt.plot(yp, calibration.eval(yp-calibration.peak_center))
plt.legend(['original calibration curve', 'gaussian fitted curve', 'weights', 'calibration'])
log.info('Calibration curve shifted by {}'.format(calibration.peak_center))
#plt.show()
return calibration
def detect_4d_acquisition_synthetic(image, min_z, max_z, num_z, noise_sigma, num_iterations, gain_sigma, best_focus=-20):
'''
From an original image, get a few "points" and blur them accordingly
:return:
'''
focus_map = []
rand_z = rand_int(min_z, max_z, num_z)
for z in rand_z:
blurred,_ = blur_image_stack(image, 1, min_z_calib=z, max_z_calib=z, z_focus=best_focus)
focus = get_focus_map_from_stack(blurred, num_iterations=num_iterations, gain_sigma=gain_sigma)[0]
log.info('Z generated : {} , focus found {} '.format(z, focus))
focus_map.append(focus)
focus_map = np.asarray(focus_map)
noise = np.random.normal(0, noise_sigma, focus_map.size)
noise = noise.reshape(focus_map.shape)
focus_map += noise
return np.asarray(focus_map) + noise, rand_z
def plot_focus_acquisition(calibration_curve, two_z, two_aqu, best_shift, values_shift):
plt.figure()
yp = np.linspace(np.min(two_z), np.max(two_z), 1000)
plt.plot(yp, calibration_curve.eval(yp-best_shift))
plt.plot(yp, calibration_curve.eval(yp-best_shift, mode='fir'))
plt.plot(yp, calibration_curve.eval(yp-values_shift, mode='fir'))
plt.scatter(two_z, two_aqu[:, 0, 0])
plt.scatter(two_z, two_aqu[:, -1, -1])
plt.legend(['Calibration curve', 'Calibration acquisition', 'Calibration with found fit', 'Acquisition for pixel (0,0)', 'acquisition for pixel (n,n)'])
plt.xlabel('Physical distance (im)')
plt.ylabel('Focus unit')
plt.title('Acquisition and fit')
def find_nearest(array, value):
'''
FIND NEAREST ARRAY INDEX FROM A VALUE
'''
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx], idx
@jit(nopython=True, cache=True)
def correlate_all_points(x_max, y_max, calib, two_acqu_p, two_acqu_p_clipped, shift_idx, correlated):
'''
Todo: use signal processing methods (i.e. convolution) to make this correlation happening.
'''
for x in range(x_max):
for y in range(y_max):
weights = two_acqu_p_clipped[x, y] # / (1 + two_acqu_p[x, y])
distance_matrix = (calib - two_acqu_p[x, y])**2
distance_to_sum = distance_matrix * weights
distance = np.mean(distance_to_sum)
correlated[shift_idx, x, y] = distance
return correlated
def fit_to_calibration_correlation(search_min_x, search_max_x, points_to_correlate_value, points_to_correlate_z, calibration_curve_y, z_size_correlation = None):
'''
USING THE CALIBRATION CURVE, SLIDE IT AND MEASURE DISTANCE (CORRELATION)
:return:
'''
if z_size_correlation is None:
z_size_correlation = z_size_high
calib_min_x = np.min(calibration_curve_y.z)
calib_max_x = np.max(np.min(calibration_curve_y.z))
yp = np.linspace(search_min_x, search_max_x, z_size_correlation)
yp_with_padding = np.linspace(search_min_x - (calib_max_x - calib_min_x) //2 , search_max_x + (calib_max_x - calib_min_x) //2, z_size_correlation)
two_acqu_p = np.zeros((points_to_correlate_value.shape[1], points_to_correlate_value.shape[2], yp.shape[0]))
two_acqu_p_clipped = np.zeros((points_to_correlate_value.shape[1], points_to_correlate_value.shape[2], yp.shape[0]))
for z_idx in range(points_to_correlate_z.shape[0]):
value_z, index_z = find_nearest(yp_with_padding, points_to_correlate_z[z_idx])
for x in range(points_to_correlate_value.shape[1]):
for y in range(points_to_correlate_value.shape[2]):
two_acqu_p[x,y,index_z] = points_to_correlate_value[z_idx, x, y]
two_acqu_p_clipped[x,y,index_z] = 1.0
## CORRELATE
correlated = np.zeros((yp.shape[0], points_to_correlate_value.shape[1], points_to_correlate_value.shape[2]))
log.info('start correlation')
calib = []
for shift_idx, shift in enumerate(yp):
calib.append(calibration_curve_y.eval(yp_with_padding-shift))
#calib = np.asarray(calib)
#print('shift {}, idx {}'.format(shift, shift_idx))
#dirac = np.zeros(yp.shape[0])
#dirac[shift_idx] = 1.0
#calib = np.asarray([convolve(dirac, augmented_curve, 'same')]).transpose().squeeze()
#plt.figure()
#plt.plot(yp_with_padding, calib)
#plt.show()
#start = time.time()
correlated = correlate_all_points(points_to_correlate_value.shape[1], points_to_correlate_value.shape[2],
calib[-1], two_acqu_p,two_acqu_p_clipped, shift_idx, correlated)
#end = time.time()
#runtime = end - start
#print('runtime {}'.format(runtime))
return correlated, yp_with_padding
def plot_correlation(py, correlated, minimum_arg, minimums):
plt.figure()
plt.plot(py, correlated[:,0,0])
plt.plot(py, correlated[:,-1,-1])
# plt.scatter(py[minimum_arg[0,0]], minimums[0,0])
# plt.scatter(py[minimum_arg[-1,-1]], minimums[-1,-1])
plt.legend(['pixel 0,0', 'pixel n,n', 'minimum 0 0', 'minimum n n'])
plt.title('cross correlation between calibration curve and pixel values')
plt.xlabel('Physical distance (im)')
plt.ylabel('Focus unit')
def plot_final_best_values(final_best_values):
plt.figure()
plt.imshow(final_best_values)
plt.title('best shift values per pixel')
plt.colorbar()
def get_best_focus_from_image_stack(image_stack, calibration_curve, research_boundaries, z_size_correlation = 5000):
################################## START OF DETECTION PART ###########################################
################ CORRELATE WITH THE CALIBRATION CURVE AND DRAW THE CORRLELATION CURVE ################
log.info('Correlate...')
correlation_results, py = fit_to_calibration_correlation(research_boundaries[0], research_boundaries[1], image_stack.get_focus_map(), image_stack.get_z_positions(), calibration_curve, z_size_correlation=z_size_correlation)
######################### GET THE MOST CORRELATED POINT AND SET THE SHIFT ############################
minimum_arg = np.argmin(correlation_results,axis=0)
#bornes_moyenne = np.asarray(research_boundaries).mean()
final_best_values = py[minimum_arg]
log.info('Minimums for px 0,0 {}, px -1,-1 {}'.format(final_best_values[0,0], final_best_values[-1,-1]))
##################################### END OF DETECTION PART ##########################################
return final_best_values, correlation_results, py
def get_gss_points(xL, xR):
log.info('xL: {}'.format(xL))
log.info('xR: {}'.format(xR))
delta = (golden_ratio - 1) * (xR - xL)
a = xR - delta
log.info('a: {}'.format(a))
b = xL + delta
log.info('b: {}'.format(b))
return a, b
def golden_ratio_search_step(gss_data_stack):
# GSS
if gss_data_stack[-4]:
d_xL_xR = (gss_data_stack[1] - gss_data_stack[0]) / (2 * golden_ratio - 3) * (golden_ratio - 1)
if gss_data_stack[3] < gss_data_stack[2]:
gss_data_stack[2] = gss_data_stack[3]
gss_data_stack[0], gss_data_stack[1] = get_gss_points(gss_data_stack[0],
gss_data_stack[0] + d_xL_xR)
gss_data_stack[4] = 1.
else:
xL = gss_data_stack[1]
gss_data_stack[3] = gss_data_stack[2]
gss_data_stack[0], gss_data_stack[1] = get_gss_points(gss_data_stack[1] - d_xL_xR,
gss_data_stack[1])
gss_data_stack[4] = 0.
return gss_data_stack
def get_focus_mean(focus_map):
return focus_map.min()
def synth_data():
'''
Generate synthetic data (triangle wave) and correlate
Unused
'''
# Generate triangle curve
search_min_x = 500
search_max_x = 1100
calib_min_x = -200
calib_max_x = 200
calib_min = -100
calib_max = 100
num_points = 200
z_size_correlation = 500
calibration_curve_x = np.linspace(calib_min, calib_max, num_points)
dy = np.linspace(calib_min_x, calib_max_x, num_points)
calibration_curve_y_data = np.abs(calibration_curve_x)/50.0 + 0.01
calibration_curve_y = interp1d(calibration_curve_x, calibration_curve_y_data, kind='linear', bounds_error=False,
fill_value=(calibration_curve_y_data[0], calibration_curve_y_data[-1]))
ypp = np.linspace(-(search_max_x-search_min_x)+calib_min, (search_max_x-search_min_x)+calib_max, z_size_correlation)
plt.figure()
plt.plot(dy, calibration_curve_y(dy))
plt.title("Calibration curve")
# generate 3 points
#points_to_correlate_z = np.asarray([600, 1000, 800])
#points_to_correlate_value = np.asarray([2.0, 2.0, 0])[:, np.newaxis, np.newaxis]
points_to_correlate_z = np.asarray([850])
points_to_correlate_value = np.asarray([0])[:, np.newaxis, np.newaxis]
yp = np.linspace(search_min_x, search_max_x, z_size_correlation)
yp_with_padding = np.linspace(search_min_x - (calib_max_x - calib_min_x) //2 , search_max_x + (calib_max_x - calib_min_x) //2, z_size_correlation)
two_acqu_p = np.zeros((points_to_correlate_value.shape[1], points_to_correlate_value.shape[2], yp.shape[0]))
two_acqu_p_clipped = np.zeros((points_to_correlate_value.shape[1], points_to_correlate_value.shape[2], yp.shape[0]))
for z_idx in range(points_to_correlate_z.shape[0]):
value_z, index_z = find_nearest(yp_with_padding, points_to_correlate_z[z_idx])
for x in range(points_to_correlate_value.shape[1]):
for y in range(points_to_correlate_value.shape[2]):
two_acqu_p[x,y,index_z] = points_to_correlate_value[z_idx, x, y]
two_acqu_p_clipped[x,y,index_z] = 1.0
# ADD BORDERS TO CURVE
#augmented_curve = calibration_curve_y(ypp).flatten()
augmented_curve = calibration_curve_y(ypp).flatten()
plt.figure()
plt.plot(ypp, augmented_curve)
plt.title('Smooth calibration curve centered at zero and with augmented boundaries')
## CORRELATE
correlated = np.zeros((yp.shape[0], points_to_correlate_value.shape[1], points_to_correlate_value.shape[2]))
for shift_idx, shift in enumerate(yp):
#print('shift {}, idx {}'.format(shift, shift_idx))
#dirac = np.zeros(yp.shape[0])
#dirac[shift_idx] = 1.0
#calib = np.asarray([convolve(dirac, augmented_curve, 'same')]).transpose().squeeze()
calib = calibration_curve_y(yp_with_padding-shift)
#plt.figure()
#plt.plot(yp_with_padding, calib)
#plt.show()
correlated = correlate_all_points(points_to_correlate_value.shape[1], points_to_correlate_value.shape[2],calib,
two_acqu_p,two_acqu_p_clipped, shift_idx, correlated)
minimums = np.min(correlated,axis=0)
minimum_arg = np.argmin(correlated,axis=0)
plot_correlation(yp, correlated, minimum_arg, minimums)
plt.show()
def synth_image(method='cnn', min_points_acquisition = 3, max_points_acquisition = 6):
'''
Comparison between different scoring function and simulation of autofocus.
'''
################################ BOUNDARIES AND PARAMETERS ###########################################
num_calibration_acquisitions = 1 # number of times the calibration curve is computer with a bit of random error
num_iterations = 1 # number of detections with different random light
bornes_research = 600, 1000 # RESEARCH BOUNDARIES (THE POINTS WILL BE GUESSED THERE
bornes_calibration = 750, 850 # CALIBRATION BOUNDARIES (THE TRUE FOCUS IS IN THIS RANGE)
num_z_points_calibration = 111 # NUMBER OF POINTS FOR INITIAL CALIBRATION
noise_sigma=0.05
gain_sigma=0.1
downsample = 40
z_size_high = 1500
range_param = 2.0
criterion = 2.0
absolute_z_limit_min = bornes_research[0]
absolute_z_limit_max = bornes_research[1]
################################## CREATE A STACK OF BLURRY IMAGES ###################################
real_focus = rand_int(bornes_research[0]+150, bornes_research[1]-150)
calibration_focus = rand_int(bornes_calibration[0]+80, bornes_calibration[1]-80)
log.info('Aquisition focus : {}. Calibration focus : {}'.format(real_focus, calibration_focus))
log.info('The real best shift point is {}'.format(real_focus-calibration_focus))
# GET SYNTHETIC BLURRED IMAGES
log.info('Get image stack...')
image = get_synthetic_image()
stack, z_calibration = blur_image_stack(image, num_z_points_calibration, min_z_calib=bornes_calibration[0], max_z_calib = bornes_calibration[1], z_focus=calibration_focus, noise_sigma=0.2, width_coeff=0.9)
################################# START OF CALIBRATION PART ##########################################
##################### DETECT THE FOCUS MAP FOR THE CALIBRATION CURVE #################################
# GET A FEW FOCUS MAPS WITH DIFFERENT GAINS
if method is 'cnn':
log.info('Get focus maps with {} different gains...'.format(num_calibration_acquisitions))
focus_maps = []
for i in range(num_calibration_acquisitions):
log.info('Calibration ...')
rn = np.random.normal(0,gain_sigma,1)
focus_map = get_focus_map_from_stack(stack+rn, num_iterations=1, gain_sigma=0, downsample=downsample)
focus_maps.append(focus_map)
# AVERAGE ALL THE FOCUS MAPS
focus_map_mean = np.asarray(focus_maps).mean(axis=0)
#focus_map_mean = focus_map_mean[:,np.newaxis]
focus_map_std = np.asarray(focus_maps).std(axis=0)
# PLOT
#plt.figure()
#py = np.linspace(bornes_research[0], bornes_research[1],z_size_high)
#plt.plot(z_calibration, focus_map_mean[:, 0, 0],'-o')
#plt.plot(z_calibration, focus_map_mean[:, -1, -1], '-o')
#plt.fill_between(z_calibration, focus_map_mean[:, 0, 0] - focus_map_std[:,0,0], focus_map_mean[:, 0, 0] + focus_map_std[:,0,0], alpha=0.5)
#plt.fill_between(z_calibration, focus_map_mean[:, -1, -1] - focus_map_std[:,-1, -1], focus_map_mean[:, -1, -1] + focus_map_std[:,-1, -1], alpha=0.5)
#plt.legend(['focus map X+Y 0 0', 'focus map X+Y 32 32'])
#plt.xlabel('Physical distance (im)')
#plt.ylabel('Focus unit')
#plt.title('Calibration curve for 2 pixels')
##################### SHIFT AND CREATE INTERPOLATED CALIBRATION CURVE FUNCTION #########################
log.info('Create calibration curve...')
focus_map_mean = focus_map_mean[:,:,:,np.newaxis]
std = focus_map_std.mean(axis=1).mean(axis=1)
calibration_curve_real = create_calibration_curve(focus_map_mean, bornes_research[0], bornes_research[1], z_calibration, z_size_high, std_deviation = std)
################################# END OF CALIBRATION PART ##############################################
################################## START OF DETECTION PART #############################################
#plt.show()
################### GET A FEW POINTS FOR ACQUISITION WITHIN THE RESEARCH BOUNDARIES ####################
log.info('Get {} acquisitions points...'.format(min_points_acquisition))
image = get_synthetic_image(flip=True)
image_stack = None
current_z = random.choice([real_focus-20, real_focus+20])[0]
while i < min_points_acquisition:
i+=1
#two_aqu_real,two_z_real = detect_4d_acquisition_synthetic(image, bornes_research[0], bornes_research[1],num_points_acquisition, noise_sigma, num_iterations, gain_sigma, real_focus)
blurred, _ = blur_image_stack(image, 1, min_z_calib=current_z, max_z_calib=current_z, z_focus=real_focus, width_coeff=0.9, noise_sigma=noise_sigma)
if image_stack is None:
image_stack = ImageStack()
image_stack.set_image_stack(blurred, blurred.shape[0], blurred.shape[1], downsample=downsample, z_positions = current_z)
else:
image_stack.add_image_to_stack(blurred, current_z)
#plot_focus_acquisition(bornes_calibration[0], bornes_calibration[1], focus_map_mean, two_z_real,two_aqu_real, real_focus)
#plt.show()
################ CORRELATE WITH THE CALIBRATION CURVE AND DRAW THE CORRLELATION CURVE ##################
log.info('Correlate...')
correlated, ypp = fit_to_calibration_correlation(bornes_research[0], bornes_research[1], image_stack.get_focus_map(), image_stack.get_z_positions(),
calibration_curve_real, z_size_correlation=z_size_high)
######################### GET THE MOST CORRELATED POINT AND SET THE SHIFT ##############################
minimums = np.min(correlated,axis=0)
minimum_arg = np.argmin(correlated,axis=0)
final_best_values = ypp[minimum_arg]
log.info('Minimums for px 0,0 {}, px -1,-1 {}'.format(final_best_values[0,0], final_best_values[-1,-1]))
#plot_correlation(ypp, correlated, minimum_arg, minimums)
#plot_focus_acquisition(calibration_curve_real, image_stack.get_z_positions(), image_stack.get_focus_map(), real_focus, final_best_values.mean())
#plt.show()
message = 1
# For the first image
if image_stack.get_num_z() == 1:
init_half_range = range_param * calibration_curve_real.get_width()
xL, xR = image_stack.get_min_z() - init_half_range, image_stack.get_min_z() + init_half_range
optimizer_data = [0, 0, 0, 0, 0, False, True, 0, 0, 0]
optimizer_data[0], optimizer_data[1] = get_gss_points(xL=xL, xR=xR)
new_point = optimizer_data[0]
elif image_stack.get_num_z() == 2:
optimizer_data[2] = get_focus_mean(image_stack.get_focus_map()[-1])
new_point = optimizer_data[1]
elif not optimizer_data[5]:
if image_stack.get_num_z() == 3:
optimizer_data[3] = get_focus_mean(image_stack.get_focus_map()[-1])
else:
focus_mean = get_focus_mean(image_stack.get_focus_map()[-1])
if optimizer_data[4] == 1:
optimizer_data[3] = focus_mean
else:
optimizer_data[2] = focus_mean
if not (optimizer_data[-2] == optimizer_data[-1] - 1):
optimizer_data = golden_ratio_search_step(optimizer_data)
if optimizer_data[4] == 1:
new_point = optimizer_data[1]
else:
new_point = optimizer_data[0]
if ((optimizer_data[1] - optimizer_data[0]) / (2 * golden_ratio - 3) < criterion):
log.info('Criterion Satisfied => Best Focus Point Found')
new_point = (optimizer_data[0] + optimizer_data[1]) / 2
optimizer_data[5] = True
elif image_stack.get_num_z() >= min_points_acquisition and not optimizer_data[5]:
log.info('Criterion not Satisfied but too many images yet. Convexity tests...')
score = 0.0
i = 0
for _x in range(image_stack.get_focus_map().shape[1]):
for _y in range(image_stack.get_focus_map().shape[2]):
x = image_stack.get_focus_map()[:, _x, _y]
y = image_stack.get_z_positions().reshape((-1, 1))
x_lin = PolynomialFeatures(degree=1, include_bias=True).fit_transform(y)
x_poly = PolynomialFeatures(degree=2, include_bias=True).fit_transform(y)
model_lin = LinearRegression().fit(x_lin, x)
model_poly = LinearRegression().fit(x_poly, x)
#plt.plot(y[:, 0], model_poly.predict(PolynomialFeatures(degree=2, include_bias=True).fit_transform(y)))
#plt.plot(y[:, 0], model_lin.predict(PolynomialFeatures(degree=1, include_bias=True).fit_transform(y)))
score += model_poly.score(x_poly,y) - model_lin.score(x_lin, y)
i += 1
plt.plot(ypp, model_poly.predict(PolynomialFeatures(degree=2, include_bias=True).fit_transform(ypp.reshape((-1, 1)))))
plt.plot(ypp, model_lin.predict(PolynomialFeatures(degree=1, include_bias=True).fit_transform(ypp.reshape((-1, 1)))))
score /= i
log.info('Final score = {}'.format(score))
if score > 0:
log.info('convex function found')
optimizer_data[5] = True
message = 2
elif image_stack.get_num_z() <= max_points_acquisition:
log.info('not convex function found, add one point')
min_points_acquisition += 1
else:
log.info('not convex function found, but too many images')
optimizer_data[5] = True
message = 2
#fb = final_best_values[
# (final_best_values > bornes_research[0]) & (final_best_values < bornes_research[1])]
#clustering = MeanShift(bandwidth=calibration_curve_real.get_width())
#clustering.fit(fb.reshape(-1, 1))
#print('new point center available : {}'.format(clustering.cluster_centers_))
#new_point = clustering.cluster_centers_[np.argmax(np.bincount(clustering.labels_))]
new_point = np.clip(new_point, absolute_z_limit_min, absolute_z_limit_max)
else:
log.info("Comparing focus values")
best_focus = get_focus_mean(image_stack.get_focus_map()[0])
best_focus_idx = 0
log.info("Index: ", best_focus_idx)
log.info("Focus: {}".format(best_focus))
for i, focus_map in enumerate(image_stack.get_focus_map()[1::]):
temp = get_focus_mean(focus_map)
log.info("Index: ", i + 1)
log.info("Focus: {}".format(temp))
if temp < best_focus:
best_focus = temp
best_focus_idx = i + 1
log.info("Current Best")
log.info("Index: ", best_focus_idx)
log.info("Focus: {}".format(best_focus))
#new_point = image_stack.get_z_positions()[best_focus_idx]
message = 2
current_z = new_point#np.random.normal(image_stack.get_z_positions()[np.argmin(image_stack.get_focus_map().mean(axis=(1,2,3)))],calibration_curve_real.peak_sigma / 5.0)
current_z = float(np.round(current_z*4.0))/4.0
log.info('Current points {}'.format(image_stack.get_z_positions()))
else:
# brent method for other
################### GET A FEW POINTS FOR ACQUISITION WITHIN THE RESEARCH BOUNDARIES ####################
log.info('Get {} acquisitions points...'.format(min_points_acquisition))
image = get_synthetic_image(flip=True)
image_stack = None
def f(current_z):
nonlocal image_stack
current_z = float(np.round(current_z*4.0))/4.0
blurred, _ = blur_image_stack(image, 1, min_z_calib=current_z, max_z_calib=current_z, z_focus=real_focus,
width_coeff=0.9, noise_sigma=noise_sigma)
if image_stack is None:
image_stack = ImageStack()
image_stack.set_image_stack(blurred, blurred.shape[0], blurred.shape[1], 128, current_z)
else:
image_stack.add_image_to_stack(blurred, current_z, update_focus_map=False)
score = 1/get_hpf_image(image_stack.image_stack[-1],size=128, method = method).mean()
log.info('score for z={} is {}'.format(current_z, score))
return score
result = optimize.minimize_scalar(f, method='bounded', bounds = bornes_research, options= {'maxiter':max_points_acquisition})
final_best_values = result.x
log.info('Found : {}'.format(result.x))
log.info('Current points {}'.format(image_stack.get_z_positions()))
#plot_final_best_values(final_best_values)
#plt.figure()
#plt.imshow(image)
#plt.title('Original image')
#image_stack.print_data()
#image_stack.print_focus_map()
if not np.isscalar(final_best_values):
final_best_values = final_best_values[(final_best_values > bornes_research[0]) & (final_best_values < bornes_research[1])]
log.info('Best values {}'.format(final_best_values))
clustering = MeanShift(bandwidth=calibration_curve_real.get_width())
clustering.fit(final_best_values.reshape(-1,1))
log.info('Centers available : {}'.format(clustering.cluster_centers_))
center = clustering.cluster_centers_[np.argmax(np.bincount(clustering.labels_))]
#stds = []
#for i in range(len(clustering.cluster_centers_)):
# point = final_best_values[clustering.labels_ == i]
# stds.append(point.std())
## au lieu de choisir le cluster le plus gros il faudrait peut être regarder le cluster le plus compact = avec la moins de std
else:
center = final_best_values
log.info('Found value = {}, Real value = {}'.format(center, real_focus))
error = (np.abs(real_focus-center)).mean()
log.info('Error : {}'.format(error))
##################################### END OF DETECTION PART #############################################
#plt.show(block=True)
return error
if __name__ == '__main__':
methods = ['cnn', 'hpf', 'tenengrad1']
number_of_images = 100
range_of_acquisitions = np.arange(3, 8, 1).tolist()
for method in methods:
all_errors = []
for idx, max_acqu in enumerate(range_of_acquisitions):
errors = []
for i in range(number_of_images):
errors.append((synth_image(method=method, max_points_acquisition=max_acqu, min_points_acquisition=max_acqu)))
all_errors.append(errors)
for idx, max_acqu in enumerate(range_of_acquisitions):
log.info('For {} acquisitions, the error is {} +- {}'.format(max_acqu, np.mean(all_errors[idx]), np.std(all_errors[idx])))
pickle_save('errors_simulations_{}.pkl'.format(method), all_errors)
|
import datetime
dt_utc = datetime.datetime(2018, 12, 31, 5, 0, 30, 1000,
tzinfo=datetime.timezone.utc)
print(dt_utc.tzinfo)
# UTC
print(type(dt_utc.tzinfo))
# <class 'datetime.timezone'>
print(dt_utc.utcoffset())
# 0:00:00
print(type(dt_utc.utcoffset()))
# <class 'datetime.timedelta'>
dt_jst = datetime.datetime(2018, 12, 31, 5, 0, 30, 1000,
tzinfo=datetime.timezone(datetime.timedelta(hours=9)))
print(dt_jst.tzinfo)
# UTC+09:00
print(type(dt_jst.tzinfo))
# <class 'datetime.timezone'>
print(dt_jst.utcoffset())
# 9:00:00
print(type(dt_jst.utcoffset()))
# <class 'datetime.timedelta'>
|
from System import Random
rand = Random()
def randomBool():
if rand.Next(0,2) < 1:
return "True"
else:
return "False"
def run_script():
print("(" + randomBool() + ", " + randomBool() + ", " + randomBool() + ")" )
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.14.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x12\xe4\
\x00\
\x00\x66\x71\x78\x9c\xcd\x1d\x6b\x73\xdb\x36\xf2\xf3\xe9\x57\xa0\
\xce\x97\x38\x27\xc5\x92\xfc\x66\x2e\x37\x23\xdb\x72\xac\x39\xdb\
\x72\x64\xa5\xb9\x4e\xa7\x93\x21\x25\xc8\x62\x43\x93\x2a\x49\xc5\
\x76\x3b\xfd\xef\xb7\x00\x01\x10\x2f\xbe\x64\x37\x3d\x3b\xb1\x23\
\x12\x58\xec\x0b\x8b\xdd\xc5\x02\xd9\x79\xd3\x42\x6f\xd0\x74\x89\
\xd1\xd5\x68\x8a\x2e\xfd\x19\x0e\x13\x8c\x5e\xc3\x87\x6d\x78\x41\
\xde\x9d\x46\xab\xa7\xd8\xbf\x5b\xa6\xe8\xf5\x6c\x1b\xfd\xab\xdf\
\xed\xed\x76\xe0\xc7\xde\xbf\xd1\xbf\x4e\xa3\xc0\x0f\xd1\xd9\xfa\
\xb7\x35\x4e\xc2\xe8\xe9\xdf\xac\xc7\x0d\x8e\xef\xfd\x24\xf1\xa3\
\x10\xf9\x09\x5a\xe2\x18\x7b\x4f\xe8\x2e\x76\xc3\x14\xcf\xdb\x68\
\x11\x63\x8c\xa2\x05\x9a\x2d\xdd\xf8\x0e\xb7\x51\x1a\x21\x37\x7c\
\x42\x2b\x1c\x27\xd0\x21\xf2\x52\xd7\x0f\xfd\xf0\x0e\xb9\x68\x06\
\x23\x13\x78\xd0\x38\x5d\x02\xa4\x24\x5a\xa4\x0f\x6e\x8c\xa1\xfd\
\x1c\xb9\x49\x12\xcd\x7c\x17\x40\xa2\x79\x34\x5b\xdf\xe3\x30\x75\
\x53\x32\xe4\xc2\x0f\x70\x82\x5e\xa7\x40\xd2\xd6\x2d\xeb\xb1\xb5\
\x4d\xc7\x99\x63\x37\x20\x00\x01\x69\xf2\x9a\xbf\x45\x0f\x7e\xba\
\x8c\xd6\x29\x8a\x71\x92\xc6\xfe\x8c\x80\x69\x43\xa3\x59\xb0\x9e\
\x13\x4c\xf8\xeb\xc0\xbf\xf7\xd9\x20\xa4\x3b\x65\x4a\x42\xe0\x01\
\xe8\x75\x02\xa4\x10\x84\xdb\xe8\x3e\x9a\xfb\x0b\xf2\x1b\x53\xfa\
\x56\x6b\x2f\xf0\x93\x65\x1b\xcd\x7d\x02\xdd\x5b\xa7\xf0\x30\x21\
\x0f\x29\xaf\xdb\x84\x9a\x9d\x28\x46\x09\x0e\x28\x72\x00\xc4\x07\
\x02\x28\xd1\x39\x8e\xb4\x19\x19\x68\x45\x98\x9b\x32\x76\x25\xe4\
\xc9\xc3\x32\xba\x57\xe9\xf1\x29\x56\x8b\x75\x1c\xc2\xc0\x98\x76\
\x9b\x47\xc0\x3e\x3a\xee\xaf\x78\x96\x92\x27\xa4\xc7\x22\x0a\x82\
\xe8\x81\xd0\x38\x8b\xc2\xb9\x4f\x48\x4b\x9c\x16\xd7\x08\xd7\x8b\
\xbe\x61\x4a\x54\x26\xff\x30\x4a\x01\xe7\x0c\x11\x22\x8f\x55\x2e\
\x67\xf6\x2a\x59\xba\x41\x80\x3c\xcc\x98\x07\x43\xfb\x21\x81\x46\
\x9e\x72\xba\x62\x82\x44\x92\x82\x36\xf8\x6e\x80\x56\x51\x4c\x47\
\xd5\xe9\x7d\x9b\x61\x71\x31\x44\xb7\xe3\xf3\xe9\xe7\xc1\x64\x88\
\x46\xb7\xe8\x66\x32\xfe\x71\x74\x36\x3c\x43\x5b\x83\x5b\xf8\xbc\
\xd5\x46\x9f\x47\xd3\x8b\xf1\xa7\x29\x82\x16\x93\xc1\xf5\xf4\x27\
\x34\x3e\x47\x83\xeb\x9f\xd0\x7f\x46\xd7\x67\x6d\x34\xfc\xef\xcd\
\x64\x78\x7b\x8b\xc6\x13\x02\x6d\x74\x75\x73\x39\x1a\xc2\xe3\xd1\
\xf5\xe9\xe5\xa7\xb3\xd1\xf5\x07\x74\x02\x5d\xaf\xc7\xa0\xf8\x23\
\xd0\x78\x80\x3b\x1d\xd3\x31\x19\xb4\xd1\xf0\x96\xc0\xbb\x1a\x4e\
\x4e\x2f\xe0\xe3\xe0\x64\x74\x39\x9a\xfe\xd4\x26\xb0\xce\x47\xd3\
\x6b\x02\xf9\x7c\x3c\x41\x03\x74\x33\x98\x4c\x47\xa7\x9f\x2e\x07\
\x13\x74\xf3\x69\x72\x33\xbe\x1d\x02\x12\x67\x00\xf9\x7a\x74\x7d\
\x3e\x81\x81\x86\x57\xc3\xeb\xe9\x5b\x18\x18\x9e\xa1\xe1\x8f\xf0\
\x01\xdd\x5e\x0c\x2e\x2f\xc9\x68\x04\xdc\xe0\x13\x90\x31\x21\x88\
\xa2\xd3\xf1\xcd\x4f\x93\xd1\x87\x8b\x29\xba\x18\x5f\x9e\x0d\xe1\
\xe1\xc9\x10\xf0\x1b\x9c\x5c\x0e\xb3\xd1\x80\xba\xd3\xcb\xc1\xe8\
\xaa\x8d\xce\x06\x57\x83\x0f\x43\xda\x6b\x0c\x80\x28\x91\xa4\x65\
\x86\x26\xfa\x7c\x31\x24\x4f\xc9\xa8\x03\xf8\x73\x3a\x1d\x8d\xaf\
\x09\x3d\xa7\xe3\xeb\xe9\x04\x3e\xb6\x81\xdc\xc9\x54\xf4\xfe\x3c\
\xba\x1d\xb6\xd1\x60\x32\xba\x25\x9c\x39\x9f\x8c\xaf\x28\xa5\x84\
\xbb\xd0\x69\x4c\xe1\x40\xd7\xeb\x61\x06\x88\x70\x5e\x15\x10\x34\
\x21\x9f\x3f\xdd\x0e\x05\x4c\x74\x36\x1c\x5c\x02\x38\x90\xd6\xb5\
\x2e\xd0\xb7\xf0\x60\xa7\xd5\xfa\x78\x13\x47\x77\x30\xf3\x92\x13\
\x37\x76\x96\x51\xec\xff\x1e\xc1\x54\x0e\xd0\x1f\x2d\x04\x5f\x5e\
\x14\xcf\x71\xec\xa0\xde\xea\x11\x14\x38\xf0\xe7\xe8\xd5\xee\x60\
\xf7\x78\xf7\xf8\x1d\x7d\x9d\xe2\xc7\xb4\xe3\x06\xfe\x5d\xe8\x20\
\x98\x4e\x29\x8e\xb3\xe7\x2b\x77\x4e\xa6\x2e\xed\x97\x3d\xf1\xdc\
\xd9\xd7\xbb\x38\x5a\x87\x73\x07\xbd\x02\xfb\x75\xde\x3b\x7f\xd7\
\xfa\x53\x1d\xdd\x99\x2d\xd7\xe1\x57\x0b\x12\xa2\x6f\x67\x16\x05\
\x11\xa0\xf3\x1b\x18\x3e\x0c\xf6\x2b\x76\xe7\x3e\x0c\xfb\x3a\x59\
\xc5\xd8\x9d\x3b\x31\x5e\x04\x30\xbd\xda\xe8\xb1\xe7\xf4\xda\xe8\
\xa9\xe7\x74\xdf\xee\xef\xed\xc3\xe7\x3e\xfd\xdc\x77\xba\x30\x07\
\xd3\x68\xe5\x74\x51\x7c\xe7\xb9\xaf\xfb\x47\x6d\x74\x70\xd0\x46\
\xbd\x1e\xbc\xee\xef\xef\x6f\xb3\xd7\xbd\xec\xf5\xee\x61\x1b\x1d\
\xc1\xdf\xde\xde\x41\xf6\x7a\x9b\xe0\xdc\xfa\x38\x8d\xa2\x60\xea\
\xaf\x5a\xb5\x78\x64\x62\x0f\xb0\x5f\x1f\x03\x26\xbd\x6e\x9f\x0c\
\x7d\xb8\xfd\x2e\x6b\xc9\x5e\x3f\x2c\xfd\x14\x17\xf1\x31\x5a\xb9\
\x33\x3f\x7d\x72\x50\xbf\xdb\xcd\x90\xf9\xec\xcf\xef\x70\xca\x70\
\x61\x20\x12\x3f\xf8\xc6\x65\x61\x8e\xff\x6a\xb7\xdb\x3f\xef\x9f\
\x67\xaf\xc1\xee\x61\x6a\x72\x3b\x46\xc3\x57\xbb\xf3\x23\x3c\x3b\
\xd6\xdb\x31\x28\x5e\x00\xed\xcd\x21\x02\x7f\xe5\x30\x96\xbc\x93\
\xd8\xd3\xf1\xef\xdd\x3b\xec\x80\xa5\x0a\x19\x6d\x60\xd2\x89\x14\
\x1d\xa4\xd0\xe1\x00\xed\xf7\xa0\x02\x80\x7e\xab\x48\xfa\xaf\x0e\
\x8f\x8e\x0e\x8f\x3d\x85\x69\x0c\x1b\x1d\x50\x86\x35\x9e\x17\xc3\
\xe2\x34\x92\x9e\xa7\x4b\x3c\xfb\x7a\x12\x3d\xb2\xd6\x09\xe1\x35\
\xe1\xfe\xbe\xe0\x3e\xc7\x39\x27\x83\xc3\xf1\x3c\x86\xd0\x3d\xe8\
\xa5\x0f\xdc\x8c\xd2\x34\xba\x07\x39\x91\xbe\x32\x70\x07\x16\x22\
\xd7\x0b\x04\x4e\x82\x28\xfa\x45\x27\x85\x68\xea\xf8\xb0\x2c\xcc\
\xdc\x34\x8a\xdb\xad\x8f\x1f\x00\xf1\x95\xfa\x94\x81\x78\xf0\xe7\
\xe9\x12\x94\xe4\x88\xe3\xb9\xc4\x64\xc5\xe0\x4f\xfe\x2c\xeb\xcb\
\xd0\x0d\xf0\x22\xb5\x21\x9b\xb7\x77\xd6\xe1\x8c\x3c\x05\x27\xa2\
\xe2\x7d\x26\x3e\x3b\xc6\x0a\x94\xd2\xf7\x8a\x12\x30\xed\x59\xc7\
\xc1\x6b\x67\x67\xee\xc6\x5f\xbf\x78\xc1\x1a\xef\xf8\xf7\x77\x3b\
\xb4\xb9\x17\x3d\x7e\x11\x3d\xdf\xae\xc2\xbb\xed\x1a\x74\x38\x0b\
\xf0\x5c\x92\x4a\x6a\x56\xc4\x3e\xd5\xc0\x97\x43\x2b\x6f\xc5\xa0\
\x51\xba\xb8\xe5\xc8\xb5\xa9\x11\x9d\x5f\xe8\x88\x15\xd4\x96\xcb\
\xac\x8e\xc4\xca\xe5\xb5\xa9\xb4\xea\xc9\xaa\x8e\xa4\xea\xc9\xa9\
\x8e\x94\x5e\x4e\x46\x4d\x24\x04\xff\xc2\x29\xf1\xe5\x42\x70\xa7\
\x0b\x68\x54\xda\x08\x69\x55\xb7\xe4\x94\x58\xc9\xd5\x07\xae\x6c\
\x53\xaa\x26\xf6\x81\xeb\x2b\x84\xd2\xbf\x09\xcb\x4a\x45\x6a\x69\
\xb9\x29\x4e\x4d\xa6\x9b\x30\xf2\x15\x9a\xa6\xad\x05\x4d\x54\x8b\
\x77\xad\x6d\xed\x2a\x50\x32\x1b\x6e\x62\x7b\x6d\x68\x4d\xc0\x4b\
\x8b\x4e\xd6\xb0\x1e\x86\x7f\xc9\xca\x2a\xc1\xaf\xb1\xb8\xca\xad\
\x8b\x56\xd2\x7e\x4f\x5f\x49\xb3\x27\xfa\x68\x05\xeb\x5a\x55\x93\
\xba\xc6\x92\xb8\xb7\x91\x75\x5d\xab\x1c\x81\xcf\x89\xca\x86\x65\
\xb6\xce\x22\x94\xfa\xd8\x1a\xd3\xa5\x00\x95\x4a\xb6\x99\x4c\x7b\
\x09\x3c\x6b\xf3\xb4\x16\x47\x1d\xe7\x1e\x87\xeb\x8e\x1b\xc7\xd1\
\x43\xbd\x75\xe4\x19\x38\xd7\xe5\xac\x6e\xdf\xeb\x34\x13\x26\xbe\
\x56\x63\xd5\xca\xd7\xa2\xa1\xc8\xcc\x57\x70\x5f\x99\xd7\x06\xef\
\x6a\x73\xae\xdc\x3a\x3d\xd7\x1c\xea\x33\xc0\x1c\xad\xf5\xf1\x0a\
\xf4\x04\x82\xdd\x92\x80\x44\x0a\xce\xb4\x70\xee\xcf\xbc\xbf\x43\
\xe3\x1b\x03\x8a\x83\xd2\xd8\x0d\xc1\xbc\xc6\x10\x16\x5b\x3a\x14\
\x07\x44\x5a\xcf\x8a\xc0\xd6\x04\xac\xaa\x42\xe3\x98\x58\x89\x35\
\xcd\x08\x53\x35\xfd\x1d\x61\x9e\x59\x80\xcc\x5f\x08\x23\x4d\x70\
\xab\x87\x8a\x25\x62\xce\x06\x63\x41\x37\x87\x06\x64\xce\xc4\xfa\
\x55\xd4\x22\x17\x89\x88\xdc\x61\x95\xca\xff\x8a\xe5\x4e\x89\xba\
\xc4\x53\x13\x55\x59\x26\x68\xe7\x0d\x49\x91\xe2\xf8\x1b\xa6\x0b\
\x28\xc9\x1f\xc6\x79\x5c\xce\x7a\x93\xa4\x8e\x8a\x91\x2e\x73\x33\
\x62\xce\xda\x26\x18\x06\x22\xba\xcf\xd2\x2e\x62\xe9\xb3\xe5\x70\
\x02\xf2\x8e\x28\xbe\x85\x9e\x5e\x57\x23\x33\xce\x00\xed\x6b\xdc\
\xe2\x73\x0d\xa9\x51\xec\x81\x11\xc5\x1e\xb0\x8e\xc0\x00\x98\xf1\
\x1d\xfc\x38\x0b\xd6\x89\xff\x8d\xe4\x39\x39\x88\xf7\x88\x4e\x3a\
\x60\x02\xb0\x2e\x7d\x0a\xa4\x77\x04\xd6\xeb\x04\x63\xf4\x71\x40\
\x19\x45\x9d\x1f\x42\x6e\x3a\xe4\x80\xb6\x09\xd7\x74\xb4\x1c\x65\
\xac\xdc\x20\xa0\xe7\x85\xa4\x35\x47\x11\x52\xdb\x64\x38\x9b\x9d\
\x2b\x1f\xb7\x39\x6d\x4d\x29\x7b\x06\x5d\xc5\x54\x81\x46\xd8\xb5\
\x81\x9a\x62\xe4\x51\xb3\xae\x2b\xc4\x66\xda\xb0\x91\x26\x14\x7b\
\x70\x35\xc0\x37\x60\x55\xf5\xca\x53\x32\x60\x43\x6a\x9a\xd0\xb2\
\x29\x25\x95\x74\x50\x93\x92\xf9\x5b\x48\xb5\xc9\xc2\x9a\x96\x0d\
\x42\x7a\x7f\xa1\xbd\x29\xe4\x6c\x69\x66\x89\xc3\x82\x10\xe2\xa8\
\x4b\xbe\x2b\x73\xaa\x04\xc5\x81\x97\x80\xe1\x9e\xa5\x23\xb0\xbe\
\x3f\xfa\xf8\x81\x41\x72\x03\x70\x7c\x88\xdb\x63\xa6\x5a\x2b\xd7\
\x24\x73\x69\x50\xd6\xd3\x2c\xc7\x4a\x78\xb7\x4e\x24\x8b\xad\xe6\
\x8f\xa5\xe4\x68\xe6\xcc\x22\xb1\x86\xcb\x81\xb1\x65\xc1\xe4\x09\
\x57\x9a\xfc\x76\x3d\x0d\x88\x08\x3b\xf9\x03\xd9\xa5\xe2\xcf\x6e\
\x01\x10\xb6\x0f\x94\x79\x71\x04\xf6\x25\x78\x75\xc3\xb9\x9f\x16\
\x7b\x47\x7c\xeb\x40\x5f\x63\x15\x3e\xd0\xf9\xee\x64\xc8\x57\xba\
\x32\x65\xfc\xb3\xf8\x5f\x3c\x74\x56\x37\x48\x1a\xc2\x65\xeb\x22\
\xd9\x6a\x40\xfd\xae\xb9\xbe\xd6\x75\x06\xf3\x38\x3e\xf5\x53\x30\
\x71\x2c\xba\x5e\x7b\xe0\xaa\xa4\x71\x14\x74\x22\xd0\x74\x32\x29\
\xb2\x01\xdf\xe9\xaf\x57\x51\x42\x77\x1d\xc1\xf9\x8b\x56\xb6\x1d\
\x1c\x63\x45\xe7\xcf\xd9\x92\x6e\xbe\xa0\x34\x65\x8f\xe5\xa9\x70\
\x3b\x83\xf1\x82\x41\x8c\x5d\x45\xf8\x26\x6b\x1a\x7b\x8e\x86\xc3\
\x9b\x0d\xa5\x6e\x67\xb5\x54\x97\xa6\xa7\x39\x62\x0e\xda\x85\xe1\
\xc8\x53\xf1\x0f\x13\x1b\x69\x20\xd0\xc3\x41\xff\xb8\x6f\x97\xf2\
\x5e\x89\x3c\x79\x37\x15\x4f\x67\xe9\x86\xf3\x00\x9b\xf8\x5a\x20\
\x1c\x74\xf7\xcf\xf7\x99\x46\x40\x10\xd5\x61\x5e\x93\x86\xb1\x8a\
\x8c\x36\x1a\x48\xaa\x43\xe3\x27\x63\x3c\xce\x8d\x2e\x63\x04\xfb\
\x6d\xdb\xc8\xa9\x30\xab\xba\xe5\x96\x1d\xbc\xae\xe1\xe0\x89\x27\
\x56\xdd\xa4\x60\x8d\xf7\xba\x6a\x6b\x44\x42\xcb\xbf\x86\x48\x32\
\x23\x8a\x69\x34\x49\x32\xa8\xb6\xd2\x48\xa0\x36\x25\xd1\x22\x47\
\x16\xbe\x57\xb5\x12\x81\x4c\x23\x99\xbe\x00\x99\xb5\x45\x59\x25\
\x4b\x46\x28\xaa\x6a\xd6\x80\xd2\x5c\xb0\x7f\x93\x3c\xd7\xab\xcc\
\xa1\x91\xf0\x57\x09\x9c\x47\x0f\xa1\xd1\xc4\x12\xc9\x8b\x05\xd5\
\x50\x97\x15\xa1\xbe\x08\x3c\xe1\x9f\xd6\xa0\x0c\xb8\xd4\x15\x44\
\x91\x82\xef\x57\x62\xb6\x64\x7b\xc9\x39\x68\x18\x61\xc5\x00\xeb\
\x73\x72\x03\x23\x6c\xb7\xb2\xd5\xc8\xea\x36\x96\x2b\x41\x13\x23\
\x2b\x94\x51\x1b\x4e\x5e\x70\x24\x0b\x54\xd7\xfa\xac\x57\x2f\x6f\
\x7b\x60\xc9\xde\xd8\xf4\xbc\x2c\x75\x44\xc1\x5f\x9e\xbe\x2c\x39\
\xb4\xf1\x02\xc2\x49\xb4\xd8\x56\xb3\x0d\xb5\x37\x0d\x65\xf9\xfd\
\x44\x58\x2e\x43\x9b\x4d\x35\x1b\x35\xb0\xa8\xb9\x38\xff\x36\x29\
\x0a\x9b\xca\xf1\x2f\xb4\xa8\x85\x66\xa1\xd2\x9e\xda\x41\x0b\x6b\
\x5a\x07\xf0\xc7\x29\x7e\x4c\xeb\x47\x3e\xb5\x02\x44\x25\x59\x7b\
\x13\xb8\x7e\x58\x7b\x90\x8a\x51\x1a\x79\xed\x64\xf4\x0b\xec\xc2\
\x6b\x12\x0a\x93\x4c\x0b\xcd\xbb\x94\x24\xc0\xcb\x23\x61\x35\x32\
\xd9\x2b\xc1\xe0\x60\x46\xbe\x99\x4a\xf8\xbf\xe3\x0f\xb1\xbf\xaa\
\x4e\x40\x24\xd0\xf2\x0e\x5a\x5a\x1c\xd7\xbe\xe1\xb8\x8a\x3d\xc8\
\x2b\x60\xef\x67\x3f\x04\x7d\xaa\xce\xec\x6b\x1d\xa4\xcc\x6b\xa3\
\x5d\x01\xb3\x4e\x4c\xe7\x89\xd8\x66\xb5\xca\x69\xee\xd2\xea\x54\
\x35\xa7\x6f\xc3\x4a\xec\xbc\x15\x21\x77\x78\x04\xdf\x07\x0d\x91\
\xab\x08\xf4\x54\xdc\xc5\xd6\x89\xc9\x2c\x21\x8b\xb2\x30\xda\x54\
\xa9\x6a\x04\x1b\xe5\xb4\x5b\x1f\xcf\x63\xf7\x1e\x37\x0f\x6e\xf7\
\xf6\xf6\x32\xd6\xd3\xfe\x3f\x2f\xc8\xcf\xdb\xa5\xbb\xc2\xef\xb7\
\xba\x5b\xbf\x34\x80\xa7\x78\x46\x02\xea\x6d\xea\x92\x44\x9a\x52\
\x24\x58\xa1\x5e\x45\x40\xa5\x0d\x03\x52\x00\x09\x46\xce\x52\x24\
\xaa\x60\x01\x4c\x3f\xda\x3d\xd2\xa5\xa2\x8a\x5c\x1a\x78\x01\x26\
\xbd\xf3\xc0\xc4\xe9\x45\xc1\x5c\x19\xcc\x12\x25\x57\x4f\xe6\x8b\
\x7b\x50\xdd\x14\x00\x78\x6e\x2c\x72\x88\x06\x44\x6e\xa1\xab\xe1\
\xfd\x58\x0e\x4f\x9e\x32\x0d\x90\xa4\xdd\xea\x40\x6d\x80\xa8\x0c\
\x53\xde\x2b\xbe\x59\x27\x4b\xa5\x16\xa3\x79\xc9\x28\xd3\x46\x5e\
\x2b\xa1\xb9\x77\xbc\xc7\xde\x60\xef\x78\x4f\x75\xce\xcd\xbc\x9c\
\x92\x35\xd2\xd3\x96\x6a\xa5\x87\x39\x53\x6b\x64\xa5\x8a\x52\x33\
\x3b\x6f\xb4\x4d\x64\x92\xf2\x67\x2f\xa4\xac\xca\x1e\x01\xc4\x77\
\xd4\x72\xce\xe9\x29\xe2\x17\xe1\x97\x25\x6f\xf8\xfd\xf9\xb5\xf3\
\xc6\x62\x6a\x18\x6b\xf4\x44\xb8\xc6\x13\x9a\xd9\x2d\xac\xd8\xb6\
\xed\xef\x32\x13\x4c\x2b\x97\xa2\x7b\x2f\x92\x2a\x6f\x8b\x8b\x92\
\x55\x48\x15\x7e\xd2\x4b\xe7\x83\xcd\x8c\xb3\xa4\x2c\x87\x62\x7f\
\x53\xe2\x0a\xdb\xc8\x28\x64\x8b\x65\x96\x88\x28\x74\x70\x70\x7c\
\x70\x6c\x40\xa4\x8b\xb1\x66\x79\xfb\x66\xaa\xde\x32\xb5\x65\x46\
\x0b\x5f\x5f\x24\x68\x57\x7e\x98\x3f\x17\x99\x78\xfe\x99\xbb\x8e\
\xfc\xb3\xe2\x4f\xf2\x87\x1c\x16\x75\xf4\x78\xc7\x18\xe3\xfc\x73\
\xf5\x16\x43\x15\xde\x65\x9e\xe3\x41\xff\xe0\xe8\x70\xd7\x32\x53\
\x76\xed\x13\x22\x77\x94\xca\xf4\x6d\xcf\x25\xdf\x2a\x1a\xa8\x68\
\x8f\xa7\x9e\x42\xd6\x76\x0c\x1a\x4d\x86\x32\xb6\x39\xf3\x38\x5a\
\x75\x48\xb8\xc3\xa7\x98\x19\x42\x31\xe6\x94\x87\x99\x72\x0a\x4f\
\x49\xe4\xc8\xf4\x11\xf6\xf2\x69\xa1\xdb\x62\xfa\x8e\xa1\x4a\xd6\
\xac\xbb\xd8\x7d\x32\x1b\x14\x4d\x59\x22\xcf\xcc\x6c\x09\x26\xea\
\xb9\xdb\xcc\x0a\xda\x1a\x69\x1c\x11\xb1\x5f\x75\x65\x4f\x71\x62\
\xa2\x08\xa6\x43\x0e\xb1\xd9\xdf\x88\xf2\x5d\xdb\xcb\x9a\x25\xaa\
\x46\x68\xad\x59\x09\xad\x1c\xc7\xa2\xd5\x47\xf0\x7d\xa0\x6d\xd2\
\x64\x36\x80\x19\x97\x86\x6b\x4d\xf3\x92\x9f\x5a\x11\xad\x65\xf3\
\xcc\x66\x72\x75\x33\x06\x71\xbf\x27\x7b\x39\x15\xfb\x46\x05\x33\
\x42\x3e\xc7\x52\x35\x21\x6c\x58\x50\xb9\x7e\x0f\x3c\x32\x69\x94\
\xa1\x22\x32\x21\xed\xe2\x57\x79\x89\x70\x49\x9b\x68\xb1\xa8\x76\
\x3f\xcb\xd2\x94\x15\xdb\x40\x7f\x96\xe0\x5e\xb7\x70\x56\xcb\xac\
\xd1\xf8\xcc\x00\x9a\x4f\x3a\x0b\xb9\xd2\x8c\x2c\x61\x8a\x3c\xdd\
\x17\x8b\xe7\x98\x91\x0d\x19\xa3\x5b\x95\xcd\x0c\x47\xeb\xe3\xa5\
\xeb\xe1\x40\x5b\x9c\xbb\x62\x22\xcb\x91\x1f\xdf\xfd\x2f\x0f\xfd\
\x6c\x3d\x1c\x67\xe5\x86\xb8\xf8\x60\xa1\x58\xf0\xb4\x89\xaf\x2f\
\xde\x92\x31\x07\xd8\x79\x2d\xe5\x6f\x2b\x58\xe2\x20\x40\x7a\xea\
\xcc\x63\xf7\xe1\xc4\x4d\xb2\xb3\x65\xe4\x55\x5e\x67\x47\x1d\x7c\
\x72\xae\x96\x1d\xc1\xcd\x8e\xd6\x7a\x4f\xe4\x25\x77\xf4\x33\xa8\
\xd6\x52\x85\x6e\x29\x9d\xd9\x21\xc6\x20\x4a\x30\x9b\xf4\xa8\x46\
\x91\x13\x69\x2e\x29\x41\x69\x2d\xa7\x6d\x8c\xda\x27\x6f\x68\x1f\
\xda\xf8\x59\xc3\xb1\x95\xa5\x26\x65\x1d\xd6\xbc\xe6\x90\x20\x9d\
\xe9\xf8\x06\x4d\x07\x27\xb7\xb4\x08\x8b\x63\x90\xba\x9e\x43\x6c\
\xad\x12\xb2\xbe\xf2\x7a\xe4\xbb\xd8\x83\x32\x7d\x6b\xbe\x6a\x15\
\xe8\x6b\xc1\x42\x25\x07\x73\x42\x09\xf5\xcd\x20\xe2\x98\x50\xd7\
\xa5\xc0\xb9\xb3\x78\x2e\x7d\x55\x8f\x05\x99\xce\x0f\xf6\x92\x4d\
\x95\xe0\x3a\x9e\xa6\x2d\x35\x54\x9f\x2b\xb6\xb6\x06\x99\xba\x7f\
\x67\x92\xd9\xad\x24\x53\x0d\x6a\x6c\xce\xca\xfe\xe1\x81\xc7\x35\
\xe4\x64\x3c\x9d\x8e\xaf\xec\x4a\xc2\x16\xc2\xe7\xea\x49\x56\xc5\
\xf2\xf2\x4a\xc2\x7c\xd3\x52\x3d\xb1\xfa\xaf\x56\x55\x61\x72\xfb\
\xce\xda\x62\xe5\x4d\xb1\x5a\x95\x6b\x8b\x95\x58\xab\xc2\xe8\xc4\
\x56\xe9\x8c\x54\xb3\x06\x3a\x73\x39\x3c\x9f\xda\x35\x86\xa0\xf7\
\x6c\x7d\x61\x09\x96\xbf\xc6\xaa\x58\x34\x61\x23\x75\xa1\x48\x7e\
\x67\x65\x89\x45\x72\xbe\x96\x65\xb1\xe8\xc1\x46\xca\xa2\x92\xda\
\xc0\xbc\x10\x5d\xc9\xae\x7f\xb0\x2a\x4b\xe6\x2e\x3c\x57\x5b\x0a\
\x98\xf2\x3d\x16\xa1\x42\x03\xa4\x73\x30\xc3\xf1\xff\x5b\x5b\xea\
\x58\x16\xb3\x4d\x05\xa5\x0d\x94\x85\x81\x41\x59\xaa\x9e\x1d\x21\
\x92\x2a\x93\x1d\x1c\x52\x1f\x9f\x01\x6b\x52\x91\x0c\xa4\xc0\x00\
\xc8\x3a\x02\x25\xa9\xe9\x00\x66\x75\x53\x1d\x02\x78\x94\xd2\x8c\
\x02\x23\xb8\xa9\x43\x4a\x83\xa1\x4a\x4a\xf0\x68\x34\x73\x16\xcd\
\xbe\x66\x31\x47\x49\xb4\xd1\xdd\x3d\xdf\x65\x2a\x4a\x8b\x67\x3d\
\x37\xee\x64\x2e\x2b\x39\xe8\x53\xcf\x59\x17\x1d\xc3\x28\xbe\x77\
\x83\x92\x9e\xa0\x41\x80\x94\x94\xab\xc9\x71\x54\xfd\xeb\x36\x52\
\x5e\x2d\x82\xc8\x4d\x79\x34\x51\x44\x4c\xc1\xc9\x2d\x8b\x11\x28\
\xf3\xf6\x8b\x30\x12\xa9\xe2\x22\xbc\x0a\x66\x8c\xc3\xee\x58\xd9\
\xdf\xa7\x97\xa7\xb0\x1f\xbd\x6e\x05\x03\xc4\x39\xfe\x92\x01\xd5\
\x18\x44\xae\x72\x47\x1d\xf1\xc3\x7a\xf7\x4c\x29\x4a\x3c\x57\x0d\
\x43\x5f\xfa\x49\xca\xfe\x49\xb2\xdc\x27\xa0\x6b\x09\x61\xc2\x20\
\xe5\x99\x71\xf9\xdf\x8e\x35\x2b\x6f\x8d\x6d\x0d\x83\x52\x92\xf2\
\x12\x91\xaf\xb2\x15\x2d\x9e\x9a\x07\xb9\x44\xb2\xdd\x03\xe1\xce\
\x96\xe2\xd8\x05\xa1\x42\x7b\x55\x24\x33\x9b\xf2\x4a\xaa\x22\x69\
\xb0\x80\x28\x40\xba\x49\x27\xf1\xbd\x00\x64\x91\x38\x3f\xb8\xf3\
\x5f\x23\x3f\x4c\x3a\xe4\x08\x9a\xc2\x98\xb2\x8a\xa1\x4d\x46\xfa\
\x2b\x07\xfa\x81\x8c\x34\x5b\xfa\xc1\x1c\x5a\x66\x9f\xbe\xcb\xb8\
\x25\xc3\xd2\xb9\x42\x8e\x4b\x1b\x9d\xb2\x37\x6a\x5f\xb9\x6b\x75\
\xc8\x9e\x01\xfa\x92\x01\x2a\xc3\x2f\x5a\x31\xd8\x76\x24\x6d\xd8\
\x99\x5d\x14\xdc\x6a\x23\x47\xe0\x3c\x8b\x75\x22\xff\xde\x98\x81\
\xca\xa4\xa9\xcb\xc6\x4e\x14\x4a\x2b\xc6\x26\xdc\x2c\x46\xb8\x9c\
\xa7\x0c\xdd\x46\x9c\x35\xb1\xe5\x96\x90\x1d\x26\xd5\xfc\x24\xc9\
\x54\x16\x36\xc8\xd1\xb6\x35\x40\x45\x0b\x47\xb7\x8d\xd8\x1f\x86\
\x8d\x7c\x63\x94\xec\x8c\x9e\xd3\xaf\x96\x05\xd9\x2a\x5c\xab\x50\
\xad\xc6\xd4\xb6\x01\xc7\x10\x62\x05\x38\xd9\xb9\x27\x07\x3a\x00\
\x8c\xc6\x37\xac\xf1\x14\xf0\x91\x6d\x31\x53\xfc\x6c\x5e\x7a\x4b\
\xf6\x82\xbb\xc5\x6e\x80\x8c\x53\x51\x41\x8d\x3c\x06\xdd\x09\x76\
\xe3\x0f\xfc\xaa\x35\x72\xb7\x1a\x11\xca\x53\xf6\xeb\xb1\x9f\x7d\
\x82\x5f\xbd\x76\xe6\xb8\x65\x57\xa8\xa1\xee\xdb\x2e\x5b\xdb\xda\
\xe2\x49\x1f\xbd\x72\x8f\xc8\x37\x7f\xd4\x83\x38\xbd\x4f\xbe\xb7\
\xdf\xd5\xe1\x06\xcf\x90\xef\x19\x19\x72\xad\x6a\xcb\x41\x9d\xbd\
\xfa\x7c\x60\xb2\xd1\xaa\x6b\xea\xe1\x52\x5f\x30\x5d\x23\x44\xa9\
\x12\x8c\x8e\xd0\x86\x62\x31\x05\xd2\x92\xc5\xf4\x1d\x85\xd2\xa5\
\x62\x29\xe7\x41\x1e\x1e\x30\xb2\x21\x22\xaf\xd8\x36\xe3\xc5\x3b\
\x72\x39\x44\x65\xa8\x59\x7d\x5a\x6b\x0e\xb6\x51\x6c\x4e\x4b\xd3\
\x4b\x8b\xc1\x2d\x98\xff\xbc\x8a\x56\xeb\xd5\x55\x34\xc7\xef\xb7\
\x7a\x5b\xbf\xa0\x3f\x68\xc1\x51\x18\x3c\xd1\x23\xfb\xf4\xd0\x25\
\x6d\x77\x43\x9a\x51\xec\xb5\x8a\x1c\x7a\x28\x90\xee\x55\xb8\x5f\
\x31\x7a\x70\xb3\x8e\x64\xc3\x82\x42\xe6\x07\x9b\x49\xcf\x6a\xa2\
\x8b\x4e\x2b\xd8\xf1\xed\x1b\xf8\x8e\x42\x7a\x5f\x68\x21\xb2\xbd\
\x17\x44\x96\xc6\x6f\x52\x84\x28\x0c\xb3\x14\x34\xd2\xab\x56\x6c\
\xa1\x47\x71\x35\x8c\x4a\xae\xb8\x73\x46\x81\x2b\xee\x0f\x23\xb0\
\x0a\x07\x54\x43\x8f\x5a\x15\x3d\x45\x18\x01\xc7\x08\x93\xf2\x0d\
\x5d\xe4\xe1\x20\x7a\x20\xb7\xf6\xae\xc9\x10\x54\x00\xec\xb2\x5c\
\x45\x06\xc0\xdd\x33\x1c\xb8\x4f\x78\x9e\x7d\xbe\x07\xc1\x65\xa9\
\x2a\x1d\x69\xfd\x8e\x85\xa6\xc5\xfa\xd4\x1e\x74\x0e\x89\x7c\xb3\
\xe0\xa3\xd3\x67\xb2\x4e\x96\xfe\x22\x45\x7e\x8a\x5c\xe4\xc1\xcf\
\x6c\xeb\xcc\xa0\x28\x61\x24\x91\x6b\x71\x0d\x9a\xf4\x79\x50\x4c\
\x46\x61\xfc\xdb\x3c\xa7\x78\x50\x27\xa7\x28\x1a\x01\x41\xe4\xf6\
\x89\xcc\xc6\xa1\x7f\x92\xb9\x43\x95\x9b\xdd\xb5\xf1\x9e\x4e\x54\
\x72\xbf\x6e\x34\xa3\x77\x21\x67\x77\xf6\x32\x73\x64\x5c\x68\xa1\
\xdd\x5b\xa3\x29\xa5\x74\x85\xd0\x06\xd2\x2a\x06\x46\xbd\x44\x06\
\x91\xe7\xee\xb9\x38\x7b\x85\xd2\xac\x30\xfc\x5a\xb9\x89\xae\x6d\
\xc5\x27\x91\xab\x8b\x8c\xd4\x5a\x06\xd2\x4c\xaa\xd6\x62\x49\xab\
\x00\xcb\x35\x57\x25\xf1\xf6\x5d\xec\xcf\x09\xcf\xf3\x1a\x31\x76\
\x28\xa0\x60\xf6\xe6\x57\xd3\xca\x03\x81\xa1\xc8\x8f\x30\xd8\x6b\
\xb2\xbb\x06\x76\xea\xc5\x38\xa6\xf3\x99\xbf\xd0\xbc\x4e\x6e\x62\
\x6c\xee\xa6\xa5\x4c\x4e\x71\x37\xf5\xd1\x85\x07\xeb\xce\x52\xff\
\x1b\x2e\x71\x71\x45\x83\x22\x1f\x39\x6b\xb0\x89\x13\xdc\x2a\xe2\
\x9e\x31\x89\xcb\xd3\x57\xd2\xc9\x5d\x6d\x0d\x16\xdc\xb7\x9c\x34\
\x31\x31\x6e\x78\xd4\xa4\xde\x29\x93\x22\x9d\xa0\xd3\xce\x72\x4f\
\x72\x01\xb6\x4e\x7e\xd2\xca\x59\xf8\x71\x92\x2a\xba\x67\x6d\x46\
\x6c\x2a\xc4\x6d\xda\xe1\x03\xb1\x4d\x67\x39\x0d\x53\x0a\xd0\x02\
\xc6\xc8\x15\x5a\x01\x48\xc7\x6e\xcb\x51\x97\x1b\xda\x91\xcf\xf7\
\xd3\xea\x62\x6f\x9e\x59\x95\x21\xe9\x04\xd8\x81\x30\xb7\xa0\xa5\
\x6e\xf0\x48\x27\x45\x2c\x2a\xb4\x3f\xd8\x3f\xde\x3f\x66\x39\x6e\
\x62\x49\xe9\x5d\x33\x74\x15\x8c\xe2\x54\xba\x74\x86\x2c\x6c\xf2\
\xa0\x79\xe9\xce\x86\xd6\x5e\x06\xc6\x0b\xa4\xea\x57\x66\xc9\xd5\
\x3f\xd4\x68\x9c\x46\x71\x88\x63\x6e\xcf\xf9\xdc\xa9\x35\x75\x36\
\xa8\xa6\x16\xcb\x55\xf4\xc8\xe7\xa7\x5a\xde\x53\xb6\xd2\xeb\x27\
\x51\xe8\x05\x18\xae\x57\xb6\x2f\x57\xff\xc8\x4b\xf3\xe2\x11\x5b\
\xb1\xbf\xb1\x3d\x55\xbd\xb1\xcb\x6b\x19\x91\x42\x94\x74\x75\x0e\
\x51\x2f\x1f\x14\xdc\x9f\xf9\xbf\x63\x24\x9e\x43\xa3\x84\xaf\xd8\
\xe4\x08\x8d\xc3\x1a\x35\xa0\xdc\x2c\x27\x66\x67\x87\xd2\x75\x22\
\xae\x78\xab\x17\x16\x5b\x6d\x77\x7e\x40\xea\xe7\x2c\x38\x7c\xbf\
\xb5\xbb\xf5\x0b\x98\x87\xec\x19\x75\x94\xe8\xa3\x62\x7d\x1b\x0c\
\x06\xfc\x08\xe6\x2a\xf0\xd3\x34\x0f\x93\x2d\x78\xd9\x8e\xb6\xe9\
\xdd\xaa\xeb\x07\xf2\x43\x6d\x15\x8e\x90\x05\xb4\x96\x51\xe1\xae\
\xa0\x08\xf4\xf5\x1e\x5a\xa8\xaf\x1c\x6a\xe3\x89\x2d\x75\x4b\xab\
\xc9\x46\x42\xc9\xbd\x73\x2a\x70\x45\xd2\xb6\x8b\xe2\x92\x04\x2c\
\x4b\x5e\x23\x7c\x9f\x3d\xf0\xa2\xc7\xce\x2c\xf6\x29\x05\x74\x13\
\xec\x1f\x05\xfb\x67\xac\x8d\x14\x61\x48\x10\xfc\x70\x41\xb6\xd1\
\x68\xa9\x7b\x09\x10\xa9\x99\x1d\x0e\xf9\xff\x50\xaa\x80\xf0\x36\
\x76\x08\x0f\x6e\x4c\xfe\x13\x14\xb6\x9f\x07\x5f\x76\x20\xac\x59\
\x6e\x4a\x4f\x09\xc7\xb8\x09\x55\xf7\x66\x8b\x5c\x03\xbb\xf3\xca\
\xcf\xef\x29\x00\xb5\x1d\xd2\x06\x10\x07\xf4\x8b\x40\xfc\x1f\xe6\
\xe4\x2c\xaf\
\x00\x00\x01\xe3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x60\x49\x44\
\x41\x54\x58\x85\xed\xd7\x3b\x4e\x42\x61\x10\x86\xe1\x77\x40\x45\
\xbc\x2c\x00\xdd\x82\x98\x90\x00\x46\x05\x57\x21\xa5\x17\x62\x2c\
\xb0\x43\x12\x5d\x83\x0d\xd8\x88\x15\xde\x62\x89\x7b\x30\x39\x60\
\x14\x49\x4e\x08\x7b\x10\x7b\x2f\x08\x08\x63\xa1\x87\x40\xa0\x3c\
\xc4\x44\xcf\xd7\xfd\x53\xfc\xdf\x53\xce\xc0\x7f\x8f\xf4\xbd\x54\
\x25\x92\x79\xd8\x16\x95\x04\x82\x1f\x98\xb4\xa9\xe7\x03\xa5\x0a\
\x92\x35\xf6\x43\x97\x88\xe8\x20\x40\x55\xd6\x8e\x4b\x17\xaa\x6c\
\x02\x0d\x01\x53\xd1\x57\x3b\xda\x05\x99\x51\x08\x00\x1e\x90\x73\
\x23\x19\xda\xb1\x10\x5d\x40\x24\x7d\x1f\x17\xe4\x0c\xb4\x88\x8c\
\xc5\x8c\x64\xb0\x66\x47\xb9\x95\x68\xa6\xec\x43\xdb\x79\x60\x45\
\x95\xad\x42\x6a\xe9\x0a\xc0\xd5\x55\xaa\x24\x80\x86\xfb\xd3\xb5\
\x6e\x77\x39\x80\x91\x0c\xd6\x3a\xad\x56\x0c\x68\x8a\xb0\x67\xcd\
\xbb\x00\x84\x05\x01\xf3\xf6\x20\xfc\x6c\x77\xb9\x95\xe2\x61\xe4\
\x09\x30\x01\xff\x20\x00\xbc\x0a\xef\xa3\x2a\xef\xc9\x1b\x30\x35\
\x0c\xf0\x2b\x71\x00\x0e\xc0\x01\x38\x00\x07\xe0\x00\x1c\x80\x03\
\xe8\x05\xd4\xa5\x67\x53\x19\x61\xa6\x81\xfa\x10\x80\x56\x15\x02\
\xd1\x4c\xd9\x37\xaa\xe6\xe5\xf4\xdd\x3c\x10\x10\xa8\x0c\x02\xd4\
\x75\x0a\x78\xd0\xf6\xcd\xea\x51\x61\x6e\x14\xe5\xe3\xb8\xf3\xc0\
\x44\x47\x34\x6b\xcd\xfb\x0e\x93\x68\xe6\x31\x07\x1a\x07\x9a\x80\
\x09\xfa\x62\x4f\xbd\xcc\xf2\x7d\x98\x4c\x28\xe4\x0a\xc9\xf0\xee\
\xc0\x61\x62\x21\x22\xe9\xd2\xc6\xcf\xde\xbe\x08\x78\xed\x01\x50\
\x17\xa8\xa8\xca\x89\x91\x0a\x5f\xdb\xf4\xe7\x1f\xc9\x17\xa4\x29\
\x70\x23\xfc\x8b\x13\x87\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x00\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\x9c\x53\x34\xfc\x5d\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x0b\x02\x04\x6d\
\x98\x1b\x69\x00\x00\x00\x29\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x00\x8c\x0c\x0c\xff\xcf\xa3\x08\x18\x32\x32\x30\x20\x0b\x32\x1a\
\x32\x30\x30\x42\x98\x10\x41\x46\x43\x14\x13\x50\xb5\xa3\x01\x00\
\xd6\x10\x07\xd2\x2f\x48\xdf\x4a\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x01\xeb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x68\x49\x44\
\x41\x54\x58\x85\xed\x97\x4d\x4e\xc2\x40\x18\x86\x9f\xaf\x10\x14\
\xd4\x03\xa0\x57\x10\x13\xb6\x9e\x43\x76\xc8\x58\x8c\x26\x70\x1f\
\x31\x31\xa1\x74\x48\x97\x78\x0c\xd7\xc4\x78\x07\x71\xef\x0f\x02\
\x91\xcf\x85\x94\x20\xa0\x2c\x1c\x5c\x68\xdf\xdd\x4c\xdf\xf4\x79\
\xa6\x4d\xd3\x19\xf8\xef\x91\xf9\xb1\x6f\xcc\x09\x50\x03\x0a\xc0\
\xa6\x23\xce\x2b\x70\x27\x22\x8d\x20\x0c\x2d\xa0\xcb\x04\xc4\x37\
\x26\x04\x2a\xc0\x00\xe8\x02\x4f\x8e\x04\xb6\x81\x22\xb0\x01\xb4\
\x5a\xd6\x9e\xc6\x12\x53\x01\xdf\x18\x1f\x08\x04\x6e\xd2\x6f\x6f\
\xa5\xab\x28\xea\x39\x82\x03\x70\x5e\x2e\xe7\x47\x9e\xd7\x41\xe4\
\x50\xc0\x04\xd6\xb6\x01\xbc\x99\x4e\x0d\x18\x8c\x45\x8e\x5c\xc3\
\x01\xae\xa2\xa8\x27\xe9\x74\x09\x18\xaa\x48\x3d\x9e\x9f\x15\xd8\
\x07\xba\x61\x18\x3e\xb8\x86\xc7\x09\x82\xe0\x1e\x91\x2e\xaa\x85\
\x65\x02\x59\x54\x5f\xd6\x05\x9f\x66\x3c\x7e\x06\x72\xf1\x30\xbd\
\xaa\xef\x1b\xa3\xab\x3a\xdf\xa5\x65\xed\xfc\x97\xf6\x29\xde\x77\
\x17\x7f\x23\x89\x40\x22\x90\x08\x24\x02\x89\x40\x22\x90\x08\xac\
\xdc\x0f\xac\xfa\x9f\xff\x34\xb3\x4f\xa0\x8f\x48\xee\xcb\xa6\x33\
\xa2\xb7\x05\xf4\x17\x04\x14\xee\x80\xe2\x79\xb9\x9c\x5f\x17\xbb\
\x52\xa9\xec\xa1\x5a\x04\x6e\x17\x04\x3c\x91\x4b\x60\x63\x94\x4a\
\x5d\x57\xab\xd5\xdd\x75\xc0\x53\x22\x1d\x20\xa3\x22\x8d\x78\x7e\
\xfe\x60\xd2\x04\x7c\x60\x38\xd9\xbd\x3e\x3a\xa1\x8b\xec\x4c\x56\
\x9e\x51\x68\x86\xd6\x9e\x31\x7f\x30\x89\xab\x55\x63\x8e\x55\xa4\
\x8e\xea\x01\x90\x75\x22\xf0\xf1\xce\x6f\x51\xbd\x68\xb5\xdb\x91\
\xa3\x7b\xfe\x91\xbc\x03\x16\x71\x6a\x27\x44\x74\xfe\x4f\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xec\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x69\x49\x44\
\x41\x54\x58\x85\xed\x97\x3b\x4e\xc3\x40\x10\x86\xbf\xb1\xa2\x84\
\xe7\x01\x02\x57\x00\xa4\xdc\x85\x94\x8e\xed\x44\x14\x70\x1f\x42\
\x65\x2d\x1b\x53\x86\x3b\xd0\x50\x47\x51\xee\x40\xe8\x79\x84\x3c\
\xe4\xa1\x70\x8c\x8c\x2c\x25\x05\x36\x05\xf8\xaf\x76\xb5\x23\x7f\
\x9f\xad\x95\x3c\x03\xff\x3d\x92\xdd\xa8\xaa\x58\x63\x7c\x47\xe4\
\x52\xe1\x14\xd8\x29\x88\xf3\x21\x30\x01\xfa\xae\xef\x5b\x11\xd1\
\x9c\x80\xaa\x4a\x64\xcc\xad\x8a\x74\x80\x39\x30\x42\xe4\xb5\x10\
\xbc\xea\x01\xd0\x02\x1a\x88\x98\x8e\xe7\xf5\x52\x89\x5a\x5a\x63\
\x8d\xf1\x25\x81\x3f\x3a\xb5\x5a\xdb\x75\xdd\x69\x21\xf0\x75\xa2\
\x28\x6a\xc6\xab\xd5\x10\xd5\xc0\x5a\xfb\x00\x0c\x00\x9c\xb4\xc0\
\x11\xb9\x04\xe6\x31\x9c\x17\x0d\x07\x70\x5d\x77\xba\x8a\xe3\x36\
\xb0\x10\xd5\xab\x2f\x6e\xba\x50\x38\x01\x46\x41\x10\x3c\x17\x0d\
\x4f\xd3\xeb\xf5\x9e\x80\x11\xc9\xfd\xfa\x2e\x00\xec\x02\xef\x65\
\xc1\x33\x79\x03\xf6\xd2\x4d\x6d\x43\x21\x00\xd6\x18\xdd\x56\xb3\
\x29\x5e\x10\xc8\xa6\x73\x67\xd3\xe1\x6f\xa4\x12\xa8\x04\x2a\x81\
\x4a\xa0\x12\xa8\x04\x2a\x81\xad\xfd\xc0\xb6\xff\xf9\x4f\x93\xfd\
\x02\x33\x32\x9d\x4a\x89\xd9\x5f\xb3\x72\x02\x13\xa0\x15\x45\x51\
\xb3\x2c\xb2\xb5\xf6\x98\xa4\x3d\x1f\xe7\x04\x04\x6e\x80\x46\xbc\
\x5c\xde\x87\x61\x78\x54\x0a\x3c\x8e\x87\x40\x5d\xa0\x9f\xe1\x26\
\x51\x55\x19\x58\x1b\xa2\x1a\x00\x0b\x92\xc1\xe4\xa5\x10\xba\xea\
\x21\xc9\x9b\xd7\x15\x42\xcf\xf7\x2f\xd2\xc1\x24\x3f\x9a\x59\xeb\
\xae\xfb\xf6\x33\x92\x4e\xb9\x88\xcc\x80\x31\xaa\xd7\x5e\xb7\x7b\
\x57\xd0\x33\xff\x48\x3e\x01\xac\x18\x7a\x56\x83\xd7\xe8\x6e\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1c\x1f\x24\
\xc6\x09\x17\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\x63\x60\x40\
\x05\xff\xcf\xc3\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\xc5\x70\x0e\
\xa3\x21\x9c\xc3\x68\x88\x61\x1a\x0a\x00\x00\x6d\x84\x09\x75\x37\
\x9e\xd9\x23\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x08\x15\x3b\xdc\
\x3b\x0c\x9b\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x00\x8c\x0c\x0c\x73\x3e\x20\x0b\xa4\x08\x30\x32\x30\x20\x0b\xa6\
\x08\x30\x30\x30\x42\x98\x10\xc1\x14\x01\x14\x13\x50\xb5\xa3\x01\
\x00\xc6\xb9\x07\x90\x5d\x66\x1f\x83\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\xbb\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x3f\x00\x00\x00\x07\x08\x06\x00\x00\x00\xbf\x76\x95\x1f\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x09\x35\x2b\x55\xca\x52\x6a\x00\x00\x00\x3b\x49\x44\x41\x54\x38\
\xcb\x63\x60\x18\x05\x23\x13\x30\x12\xa3\xa8\xbe\x7d\x2a\x25\x76\
\xfc\xa7\x97\x3b\xd1\xc1\xaa\xa5\x73\x18\xae\x5f\x39\x8f\x53\x9e\
\x69\x34\xe6\x09\x00\x4d\x1d\xc3\x21\x19\xf3\x0c\x0c\x0c\x78\x63\
\x7e\x14\x8c\x54\x00\x00\x69\x64\x0b\x05\xfd\x6b\x58\xca\x00\x00\
\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xd4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x51\x49\x44\
\x41\x54\x58\x85\xed\x96\x41\x4b\x54\x51\x14\xc7\x7f\xe7\x8d\xb8\
\xd0\x26\x30\x77\x69\x84\xe1\xaa\x29\x90\x41\xc7\x92\x5e\xa0\x1b\
\xa1\x8d\x0a\xf5\x19\x5a\x3b\x33\xda\xd8\x6a\x16\x41\x36\x83\xf3\
\xbe\x87\x41\x8d\xad\xc2\x4d\xf6\x14\xf4\x0d\x99\x48\x0e\x11\xe2\
\xaa\x11\xdb\x18\x34\xa8\x0b\xc3\x77\x5a\xcc\x48\x10\xf3\x74\xee\
\xe8\xae\xf9\x6f\xef\x39\xfc\x7f\xf7\xdc\x7b\xcf\x3d\xd0\x54\x53\
\xff\xbb\xc4\x24\x38\x92\x2e\xb6\x76\x86\x0f\x27\x54\x18\x07\x8d\
\x02\x5d\xd5\xa5\x12\xca\x67\x11\xc9\xef\x97\xdb\xf3\xc5\x74\xe4\
\xf8\xd2\x01\x6c\x67\xed\x31\x2a\x19\xa0\x07\xe4\x0b\xaa\x4b\x58\
\x94\x00\x44\xb5\x4b\xb1\x86\x41\xef\x22\xec\x08\x32\xed\x4e\xc6\
\xde\x5c\x0a\xc0\x93\xf9\xf9\xd0\x8f\xdd\x9b\x19\x94\x38\xf0\x5e\
\x95\xd4\x4a\x62\x70\xb3\x56\xec\x90\x53\xe8\x0b\xf9\x3a\x8b\x30\
\x0a\x64\x97\xcb\xb1\x14\x69\xf1\xeb\xdd\x64\x4d\xd9\x8e\x37\x67\
\xe7\xbc\x93\x87\xce\x5a\xb2\xee\x9c\x9c\x37\x65\xe7\xbc\x13\x3b\
\xe7\x65\xce\x8b\x3d\xb3\x02\xd5\xb2\xbf\x16\x24\xe9\xc6\x63\x73\
\xf5\x02\x54\x72\xbd\x69\x94\x57\x08\x13\xcb\x93\x83\x79\x63\x80\
\x48\xba\xd8\x7a\xed\xea\xc1\x57\x41\xbf\xb9\xf1\x7b\x8f\x4c\xcc\
\x4f\xf5\xc0\x29\x2c\x8a\x6a\xcf\xcf\xf2\x95\x48\xd0\xc5\xb4\x82\
\x92\x3b\xc3\x87\x13\xc0\x2d\x5f\x65\xa6\x11\x73\x00\xcb\x97\x67\
\x40\x6f\x47\xf8\x60\x2c\x30\x26\x68\xa1\xf2\xd4\xd8\x0c\xba\x70\
\xf5\xc8\x4d\x0c\x6c\xa8\xb2\x25\x60\x0e\x00\x1a\x15\xf4\x63\xa3\
\xe6\xa7\x12\xf8\x80\xd0\xdf\x00\x00\xd7\x15\x29\x5d\x14\x40\x61\
\x97\xbf\x0d\xcb\x08\x00\xc4\xac\x53\xd6\x34\x10\x11\x20\xb0\x17\
\x9c\x05\xb0\x87\x4f\xf7\x45\x01\x14\xed\x02\xf6\xcc\x01\x94\x4f\
\x0a\xc3\x17\x05\x00\x46\x80\x82\x31\x80\x88\xe4\x45\xb8\x33\xe4\
\x14\xfa\x1a\x75\xb6\x9d\xd5\x28\x70\x1b\xd1\x77\xc6\x00\xfb\xe5\
\xf6\x3c\xc2\x4e\xc8\xd7\xd9\x86\xdc\x55\x05\xb5\x32\xc0\xf6\x51\
\x5b\xcb\x82\x31\x40\x31\x1d\x39\x56\x65\x0a\x61\xd4\xce\x79\x53\
\xa6\xfe\x76\xce\x4b\x01\x23\xa2\x7e\x72\xfd\x69\xff\x6f\x63\x00\
\x80\x95\xf8\xe0\x5b\x20\x0b\xcc\xd6\x0d\xa1\x2a\xf6\xdc\xda\x0c\
\x22\x2f\x44\xc8\xb8\x89\xfb\x81\xe5\x87\x7a\xe6\x81\xb4\x5a\x76\
\xb8\xf0\x12\x61\x1a\x58\x14\xb5\x52\x6e\x62\x60\xa3\x56\xa8\xed\
\xac\x46\xab\x65\x1f\x11\x21\xe3\xfe\x8a\x3d\x3f\xef\x3b\x36\x18\
\x48\xbc\x71\x94\x2c\xd0\xab\xca\x96\x08\x4b\x08\xdf\x01\x50\x6e\
\x50\x79\x31\x11\x60\x5b\xd4\x4f\x9e\xb7\x73\x63\x00\xa8\xfc\x90\
\x1d\xe1\x83\x31\xaa\x23\x99\x20\xdd\x15\x7f\x2d\x89\xca\x3a\x96\
\xe6\x8f\xda\x5a\x16\xce\x3a\xf3\xa6\x9a\x6a\xea\x5f\xfd\x01\xd3\
\x1c\xd9\x7f\x5e\xb9\x33\xcd\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x00\xe0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x51\x00\x00\x00\x3a\x08\x06\x00\x00\x00\xc8\xbc\xb5\xaf\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x0b\x29\x1c\x08\x84\x7e\x56\x00\x00\x00\x60\x49\x44\x41\x54\x78\
\xda\xed\xd9\xb1\x0d\x00\x20\x08\x00\x41\x71\x50\x86\x63\x51\xed\
\x8d\x85\x25\x89\x77\xa5\x15\xf9\x48\x45\x8c\xa6\xaa\x6a\x9d\x6f\
\x99\x19\x1d\x67\x9d\x03\x11\x45\x14\x11\x11\x45\x14\x51\x44\x44\
\x14\x51\x44\x11\x11\x51\x44\x11\x45\x44\x44\x11\x45\x14\x11\x11\
\x45\x14\xf1\x5b\xd1\x75\xb0\xdb\xdd\xd9\x4f\xb4\xce\x88\x28\x22\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x36\xce\x69\x07\x1e\xe9\
\x39\x55\x40\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xcc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x49\x49\x44\
\x41\x54\x58\x85\xed\x96\xcd\x6b\x5c\x55\x18\xc6\x7f\xcf\x9d\x99\
\x98\xe9\x64\x16\xd2\x9d\xa9\x92\x0e\xa1\x0b\xd3\xd8\x76\xf0\x1f\
\x68\x11\x14\x2b\x34\x81\xde\x55\xca\xcc\xbd\xa5\x54\x5c\x04\x44\
\x6d\x3a\xd5\x4d\x16\x2e\xe2\x44\x57\xb3\x1b\xea\x78\xa7\x18\xb2\
\x08\xc8\x54\xb0\x88\x1b\xeb\xc6\x85\x68\xf3\x55\x53\xa4\xb4\x55\
\x9a\x52\x70\x25\x99\x30\xa5\x36\xb9\xaf\x8b\xf9\x68\xc1\xcc\x0c\
\x53\xba\x6b\x9e\xdd\x39\xe7\x39\xef\xfb\xbb\xef\x7d\xef\x39\x17\
\x76\xb5\xab\xe7\x5d\xea\xc5\xec\xba\x6e\xdf\x40\x3c\x3e\x2e\x69\
\x0c\x48\x1b\x0c\x02\x60\xb6\x8e\x74\x4d\x50\xa9\xd6\x6a\x95\x85\
\x85\x85\x7f\x9f\x39\x80\x9f\xc9\x9c\x34\x29\x2f\xd8\x0f\xac\xca\
\xec\xaa\x49\xeb\x8d\xe5\x41\xe0\x28\x30\x0a\xdc\x32\x69\x2a\x08\
\x82\x6f\x9e\x09\x80\xeb\xba\x91\x64\x22\x91\x37\xb3\x0f\x04\xdf\
\x13\x89\xe4\x4a\xa5\xd2\xf2\x4e\x5e\xcf\xf3\x0e\x0b\x66\x30\x7b\
\xd3\xcc\x66\x87\x52\xa9\xdc\xf4\xf4\x74\xd8\x29\x7e\xb4\x1b\x40\
\x23\xf9\xfb\xc0\xb9\x52\xb9\xfc\x79\x27\x6f\x10\x04\x4b\xc0\x5b\
\xa7\x3d\xef\x1c\x30\xf3\xe7\xed\xdb\x00\x53\x9d\xf6\x74\xac\x80\
\x9f\xc9\x9c\x44\x5a\x10\x7c\x54\x2a\x97\xbf\x00\x98\x9c\x9c\x7c\
\x61\x73\x63\xe3\x5d\x83\x09\xd5\x4b\x0e\x66\x2b\xe6\x38\x73\xc9\
\x64\xb2\x58\x28\x14\x1e\x02\xf8\xd9\xec\x14\xf0\x99\x49\xe3\x41\
\x10\x54\x7a\x06\x70\x5d\xb7\x6f\x60\xcf\x9e\x1b\xc0\x1f\x5f\x95\
\xcb\x6f\x03\x9c\x99\x98\xd8\xb7\x1d\x8b\x5d\xc1\x6c\x14\x08\x01\
\xa7\x61\x0f\x01\x47\xb0\xe2\x6c\x6d\x1d\xbf\x38\x37\xb7\xde\x80\
\xf8\x01\xd8\xbf\x59\xab\x8d\xb4\x6b\x4c\x67\xa7\x49\x80\x81\x78\
\x7c\x1c\x48\x29\x12\xb9\xd0\x7c\xf2\xed\x58\xec\x8a\x99\x1d\xdc\
\x61\xaf\xd3\xa0\x18\x0d\xa3\xd1\xef\x5c\xd7\xed\x03\x70\xcc\xce\
\x03\xc3\x89\x44\xe2\x44\xbb\x3c\x6d\x01\x24\x8d\x61\xb6\xdc\x6c\
\xb8\x6a\xb5\x7a\x16\xb3\x51\x75\xa8\x9a\x40\x06\xaf\x0d\xc4\xe3\
\x67\x01\xbe\xbc\x74\x69\x11\xb8\x8e\x59\xef\x00\x40\x1a\xe9\xa7\
\xd6\xc8\xec\x14\xf5\x52\x77\x96\x14\x02\xa7\x5a\x43\xb3\x1f\x65\
\xf6\x7a\xcf\x00\x06\x2f\xe9\xf1\x77\x8e\x60\xa4\x0b\x70\x13\xd4\
\x91\x34\xd2\x1c\x86\x70\x0f\x69\xb0\x67\x80\x7a\x2c\xeb\xe9\xa4\
\xdc\x31\x81\xe3\x88\x0e\x95\xeb\x04\x70\x5f\x66\xfb\x5a\x30\xf0\
\x7b\xa7\x40\x2d\x49\x61\x08\xd7\x5b\xfb\xcc\x06\x31\xbb\xff\x34\
\x00\xbf\x9a\x74\xf4\x89\xc0\x5f\x77\xf1\x37\x33\x3a\x32\x9b\x7b\
\x62\xe6\x98\xe0\x97\x9e\x01\x04\x15\xe0\xa0\xe7\x79\x87\x01\x92\
\xc9\x64\x51\xb0\x62\x60\x6d\x73\x83\x21\x2d\x6d\x3e\x78\x50\x04\
\xf0\x7d\x3f\x0d\xbc\x6a\xf0\x6d\xcf\x00\xd5\x5a\xad\x02\xdc\x12\
\xcc\x00\x14\x0a\x85\x87\xce\xd6\xd6\x71\x07\x56\x1b\x96\xc7\xaf\
\xa3\xde\xf9\x48\x5a\xde\x0e\xc3\x77\x1a\x87\x8e\x14\x86\x79\xe0\
\x66\xac\xbf\xff\x72\xbb\x3c\x91\x76\x0b\x6b\x6b\x6b\xdb\xe9\x43\
\x87\xee\x02\x9f\xa4\x8f\x1c\xa9\x2d\x2e\x2d\xfd\x7c\x6d\x75\x75\
\x63\xf8\xc0\x81\x52\x5f\x34\xfa\xb7\x49\x7b\x05\x2f\x02\x8f\x0c\
\x16\x1d\x98\xd9\xac\xd5\xde\x9b\x9f\x9f\xff\x07\xc0\xcf\x66\x2f\
\x00\x67\x04\xa7\x2f\x96\x4a\x37\xda\xe5\xe9\xda\xe5\x5e\x26\x93\
\x97\xf4\xa1\xa4\x5c\x29\x08\x66\xbb\xf9\x01\xf9\xd9\x6c\x0e\xf8\
\x54\xd2\x6c\x29\x08\x72\x9d\xcc\x5d\x6f\xc3\xa1\x54\x2a\xf7\xd7\
\x9d\x3b\x66\x66\x79\x2f\x9b\x7d\x23\x62\x96\x6b\x9c\x70\xff\x93\
\xef\xfb\x69\x85\x61\xde\xe0\x98\xa4\xfc\x2b\x43\x43\x1f\x77\xa5\
\xed\x66\x68\xca\xf3\xbc\x31\x99\xcd\x02\xc3\xd4\x3f\xb3\xab\xc0\
\xdd\xc6\xf2\xcb\xd4\x7f\x48\x46\x80\x9b\x8d\xdb\xb3\x6d\xe3\x3d\
\x15\x00\xd4\x6f\xc8\x44\x22\x71\x42\x61\x38\x86\x94\x06\x9a\xe7\
\xc4\xba\xc1\x6f\x32\xab\xc4\xfa\xfb\x2f\x17\x8b\xc5\x47\xbd\xc4\
\xdd\xd5\xae\x9e\x6f\xfd\x07\xb0\xd0\x3c\xea\x1c\xa0\xa5\x5f\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xef\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x51\x00\x00\x00\x3a\x08\x06\x00\x00\x00\xc8\xbc\xb5\xaf\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x0b\x2a\x32\xff\x7f\x20\x5a\x00\x00\x00\x6f\x49\x44\x41\x54\x78\
\xda\xed\xd0\xb1\x0d\x00\x30\x08\x03\x41\xc8\xa0\x0c\xc7\xa2\x49\
\xcf\x04\x28\xba\x2f\x5d\x59\x97\xb1\xb4\xee\xbe\x73\xab\xaa\xdc\
\xf8\xf5\x84\x20\x42\x84\x28\x88\x10\x21\x42\x14\x44\x88\x10\x21\
\x0a\x22\x44\x88\x10\x05\x11\x22\x44\x88\x82\x08\x11\x22\x44\x41\
\x84\x08\x51\x10\x21\x42\x84\x28\x88\x10\x21\x42\x14\x44\x88\x10\
\x21\x0a\x22\x44\x88\x10\x05\x11\x22\x44\x88\x82\x08\x11\x22\x44\
\x41\x84\x08\x51\x10\x21\x42\xfc\xaa\x07\x12\x55\x04\x74\x56\x9e\
\x9e\x54\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x56\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x14\x2d\x80\x7a\x92\xdf\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xba\x49\x44\x41\x54\x78\xda\xed\x9b\x5b\
\x92\x02\x21\x0c\x45\x4d\x16\xa6\x1b\xd0\xd5\x8e\x1b\xd0\x8d\xe9\
\x9f\x65\x39\xda\x3c\x92\x7b\x13\x68\xf2\x3d\x95\xe6\x1c\x1e\x43\
\x10\x0e\x87\x15\x2b\x56\xec\x39\x84\xf9\xb1\xbf\xe3\xf1\x51\xf3\
\x77\x97\xfb\x5d\xa6\x10\x50\x0b\x1c\x29\x44\xb2\x42\xb3\x64\xc8\
\x28\xe0\x28\x11\x32\x22\xbc\xa7\x04\x19\x11\xdc\x53\x84\x8c\x0e\
\x6f\x95\x20\xa3\x83\x5b\x45\xc8\x4c\xf0\x3d\x12\x64\x36\xf8\x56\
\x09\xba\xb6\xc2\x13\xf6\x7e\xcb\x28\x10\x2b\xfc\xf9\x76\x7b\xe5\
\xb8\x9e\x4e\x14\x51\xef\xdf\x2c\x7d\xb7\x24\x41\xbd\x1b\xf6\xd9\
\x38\x34\xbc\x35\x14\x31\xf4\x51\x12\x7a\xf2\x96\x18\x14\x35\xef\
\xbd\x25\x58\xf2\x6d\xb1\x98\xa7\xc0\xd6\xfc\xf3\x92\xb0\x95\xc7\
\xba\xee\x88\x57\xef\xa3\x1a\xe9\x99\xf7\xdb\x82\xe8\xb6\x08\x22\
\x46\x02\xb2\xe7\x21\xff\x05\x3c\x25\x30\xe0\xbf\x4e\x01\x8f\x4d\
\x8f\xb5\xf1\x48\xf8\xcf\x69\x00\xd9\x0a\x5b\x46\x02\xab\xe7\xe1\
\xb5\x40\x8f\x04\x36\x3c\xbc\x18\x6a\x91\x10\x01\xff\x6f\x0d\x40\
\x15\x3d\x25\x38\x36\xfc\xfb\x3a\x40\x29\x87\x7b\xd7\x04\x46\x71\
\x45\x3b\x0f\x68\x85\x61\x55\x96\xd4\x03\x91\x5a\x28\x16\x3c\x5d\
\x40\x0d\x1c\x13\x3e\x44\x80\x65\x1f\x30\xbc\x80\x5a\x38\xa6\x04\
\xcd\x06\xcf\x96\xa0\xd1\xf0\x8c\xf3\x84\x50\x01\x35\xf0\x91\x12\
\x20\xd5\x60\x6f\xcf\x33\x36\x45\x94\x6a\xb0\x17\x26\x62\x24\x68\
\xa6\x39\x1f\x21\x41\x33\xc1\x47\x48\x70\x3b\x14\x45\xcc\x61\xef\
\x7c\xd0\x43\x51\xc4\x02\xc6\x18\x09\x9a\x15\x9e\x25\xe1\x67\x82\
\xda\x69\xc0\xaa\xe7\xad\xdf\xf9\xf5\x23\x69\xc8\x99\x60\x86\x7c\
\x45\x01\x96\x9b\x57\xa8\xc6\xf6\xe6\xdd\x62\xd1\xec\x3d\x8f\xce\
\x6f\xbe\x20\x91\x3d\x4a\x23\x79\x5d\x91\xa9\x4d\xb6\x6e\x89\x4d\
\x1a\xeb\xa2\x64\x6b\xf2\x5d\x5f\x95\xcd\x2c\x82\x76\x59\x3a\xa3\
\x84\x90\xeb\xf2\x59\x24\x58\x1f\x4d\xac\x27\x33\xde\x0d\xdb\xed\
\xa3\x29\xa4\x8c\xa1\x9e\xcd\x79\x08\x61\x3e\x9c\x5c\xb1\xf7\x78\
\x02\x51\xa0\x5a\x91\x77\xd2\x02\x23\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x03\xa5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x22\x49\x44\
\x41\x54\x58\x85\xed\x96\x4d\x6c\x54\x55\x14\xc7\x7f\xe7\x0d\xa9\
\x09\xcc\x90\x50\x76\xb6\xc6\x60\x60\xe3\xa3\x86\x34\xf4\xc3\xc6\
\x67\xa4\x1b\xa2\x98\x40\x13\x5d\xc9\x1a\x36\xda\x84\x7e\x59\x5c\
\xcd\xce\x3a\xa4\x33\x09\xcb\xae\x65\x83\x89\x19\x4c\x04\xc3\xc6\
\x3a\x98\xb4\x6f\x22\x62\x4b\x27\xc6\x34\xac\x9c\x06\x37\x94\x74\
\x98\x92\x50\x3a\xef\xef\xe2\x4d\xa7\x35\x99\xe9\xcc\x43\x76\xf4\
\xbf\xba\xe7\xbd\x73\xef\xf9\xdd\x73\xee\x17\xec\x69\x4f\xaf\xba\
\x2c\x8a\xb3\x9b\x2c\xb4\x1d\x4e\xac\x0f\xc9\x38\x07\xea\x06\x3a\
\xaa\xbf\x8a\x88\xdf\xcd\x2c\xfb\xa8\x74\x20\x5b\x48\xba\x1b\x2f\
\x1d\xc0\xcb\xcc\x7f\x82\x2c\x05\x1c\x01\xbb\x8f\x34\x8b\x43\x11\
\xc0\xa4\x0e\xe1\x9c\x02\x75\x61\x3c\x30\x6c\x22\x77\xa9\xf7\xfb\
\x97\x02\xf0\xe9\xf5\xeb\xb1\x7f\x56\xde\x4c\x21\x46\x80\x9f\x24\
\x26\x7f\x1d\xed\x5b\xa8\xe7\x3b\x90\xc9\x9f\x88\x05\x9a\xc2\x38\
\x0d\x5c\xb9\x53\xea\x9d\x24\x69\x41\xab\x93\xac\x2b\x2f\xe3\x4f\
\x7b\x69\xbf\xf2\x7e\x66\x7e\xac\xe5\x3e\x69\x7f\xdc\x4b\xfb\x15\
\x2f\xed\xa7\x9a\xf9\xee\x9a\x81\x6a\xda\xbf\x33\x6c\x2c\x37\xd2\
\x3b\x0d\xf0\xe1\xd5\xe5\xd7\x9e\x3c\x7f\x7c\xd1\xe0\x33\x59\xd0\
\x15\x0e\x62\x8b\x18\xd7\xe2\xb1\xf6\x99\x5b\xc3\xc7\x9e\x55\xc1\
\x27\x10\xdf\x60\x0c\xdd\xb9\xd4\x97\x8d\x0c\xe0\x26\x0b\x6d\xed\
\x07\xcb\x7f\x1a\xfa\x2b\x37\xd2\xff\x11\xc0\x07\x57\xe7\x3b\x2b\
\x9b\xce\x4d\x50\x17\x58\x00\x72\xaa\xc3\x84\x6d\x63\x31\x16\xd3\
\x99\xd9\xe1\xfe\x22\xc0\x7b\x99\xfc\x6d\x93\x8e\xac\x96\xe2\x6e\
\xa3\x85\xe9\x34\x02\x38\x9c\x58\x1f\x02\xde\x0a\x64\x97\xb7\x66\
\x5e\xd9\x74\x6e\x62\x3a\x1e\x7a\x68\x47\xdf\x5a\xbb\xab\xb2\xc9\
\x8f\x6e\xb2\xd0\x06\xe0\x04\xf6\x25\x70\xf4\x50\xa2\x7c\xb6\x51\
\x9c\x86\x00\xe1\x56\x63\x61\x6b\xc1\x95\x2b\xab\x17\x40\x5d\x68\
\x97\xb2\x09\x03\x7b\xa7\xfd\x60\xf9\x02\x40\x6e\xb4\xe7\x9e\xc4\
\x92\x41\x74\x00\x50\xb7\xa1\x5f\x6a\x66\x60\xe7\xc3\x54\xef\x2e\
\x41\x00\x9c\xdf\xb2\x0d\x7e\xc6\x38\xf9\x02\x00\xbc\x2e\xac\x58\
\xb3\x4c\xee\x7f\xd3\x5e\x5f\x06\x0e\xc8\xdd\x01\xb4\xc2\xf6\x81\
\x15\x09\x00\x2c\xda\x49\x59\x37\x80\x99\x11\x66\x25\x32\xc0\x43\
\x02\x3a\x6b\x96\xac\xd0\x6a\x09\x24\x96\xb6\x6d\x75\x00\x0f\xa3\
\x03\x88\xdf\x04\xa7\xb6\x3d\xf5\x6d\xab\x25\x30\xb3\x6b\x3b\x3e\
\x0d\x02\xf9\xc8\x00\x66\x96\x35\xe3\xf8\x40\x26\x7f\x02\x20\x1e\
\x6b\x9f\xc1\x58\xc4\xd0\x2e\xd1\x25\xe3\x8f\xd5\x52\x7c\x06\xc0\
\xcb\xcc\x75\x03\x6f\x63\xfa\x21\x32\xc0\xa3\xd2\x81\x2c\xc6\x83\
\x58\xa0\x29\x80\x5b\xc3\xc7\x9e\xc5\x62\x3a\x03\xdc\xaf\x46\xab\
\x95\xa3\xba\xf2\x11\x2c\x54\x54\xf9\xb8\x90\x74\x37\x90\x0c\x39\
\x29\x60\xf9\xe9\xfe\x7d\x37\x22\x03\x14\x92\xee\x86\xc4\x38\xc6\
\x69\x2f\xed\x8f\x03\xcc\x0e\xf7\x17\x57\xd7\xe2\x3d\xc0\x17\x52\
\x90\x07\xd6\x81\x75\xa4\xbc\x99\x3e\x7f\xbc\x16\xef\x9b\x1b\x19\
\x58\x01\xf0\xd2\xfe\x24\x30\x68\x0a\xc6\xee\x5e\x3c\xf9\xbc\x51\
\x9c\xa6\xf2\xd2\x7e\xaa\x7a\xb1\x8c\xb7\xd4\x41\x32\x6f\x7a\xfe\
\x72\x78\x81\xf9\x53\xcd\xdc\x9b\x6f\xb3\xa4\x1c\x2f\x91\xff\x1a\
\x63\x02\xb8\x6d\x72\x26\x73\xa3\x3d\xf7\xea\xc2\x66\xe6\xba\xab\
\x69\x1f\x34\x23\x95\x5b\xeb\xfd\xaa\xd9\x75\x1c\xe1\x41\xe2\x9f\
\x43\x5c\x01\x8e\x4a\x2c\x99\x31\x8b\xf1\x37\x00\xe2\x0d\xc2\x1d\
\xe3\x02\xcb\xa6\x60\x2c\x37\xfa\x6e\xc3\x85\xf7\x42\x00\x10\xde\
\x90\x87\x12\xe5\xb3\x54\x9f\x64\x86\x75\x86\xf1\x55\x34\xd9\x5d\
\x1c\x65\x9f\xee\xdf\x77\xe3\x7f\xd5\x7c\x4f\x7b\x7a\xe5\xf4\x2f\
\x95\x3f\x47\xac\x6d\xe5\x30\x73\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x15\x75\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x15\x17\x49\x44\x41\x54\x78\xda\xec\
\x5d\xd1\x71\xdb\x48\x12\x1d\xb0\xf4\x6f\x64\x60\x38\x02\x53\x5f\
\xf7\x75\x25\x28\x02\x51\x11\x98\x8c\x40\xe2\x25\x20\x31\x81\x13\
\x15\x81\xa8\x08\x4c\x45\x20\xb8\xf6\xeb\xbe\x44\x47\x20\x38\x03\
\x3a\x83\x43\x9b\x8d\x35\x2d\xcb\x2b\x82\x9c\xe9\xe9\x1e\xbc\x57\
\xa5\x62\xdd\xd5\xee\x12\x9c\xe9\xf7\xe6\x75\xcf\xa0\xc7\x39\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcc\x23\xc3\x10\xa4\x8d\x7f\
\xff\xf7\x7f\x45\xf3\x51\xec\xf9\xaf\xaf\xfe\xfa\xcf\xbf\xd6\x18\
\x45\x08\x00\xa0\x8f\xd8\x79\xf3\x31\x6c\xfe\xda\xcf\x77\xfc\xe9\
\xb6\xfe\x7f\x9f\x20\x21\x58\xb5\xc2\xd0\xfc\x7d\xe7\xcf\x35\x84\
\x02\x02\x00\x84\x25\x7b\xc9\xa4\x7e\xcf\x9f\x21\x08\xee\x4b\x20\
\xe8\xef\x1b\x8b\x42\x85\xd9\x83\x00\x00\xdd\xc8\xde\x12\xfc\x64\
\x8b\xec\x96\xd1\x8a\xc2\x97\xe6\xaf\x6a\x44\xa1\xc6\x2c\x43\x00\
\x80\x5f\x73\x74\x5a\xe1\xcf\xf8\x33\x4f\xfc\x27\x93\x53\x20\x67\
\xf0\x00\x41\x80\x00\xf4\x95\xf4\x23\x5e\xe1\xe9\xb3\xe8\xf9\x70\
\x90\x00\x2c\xc9\x21\x34\x62\xb0\x44\x74\x40\x00\x52\x26\xfd\x19\
\x93\x3e\xc7\x88\xfc\xd1\x1d\x90\x08\x3c\x40\x0c\x20\x00\x29\x90\
\x9e\x2c\xfd\x27\x90\x1e\x62\x00\x01\xe8\x57\x4e\x3f\x66\xe2\x17\
\x18\x11\x6f\x69\xc2\x7d\xf3\xb7\x40\xcd\x00\x02\xa0\xd9\xe2\x5f\
\xb8\x4d\x21\x0f\x08\x87\xaa\xf9\xbb\x85\x2b\x80\x00\x68\x20\x7d\
\xce\xab\xfd\x05\x56\xfb\x28\xae\xe0\x96\x5d\x01\x0e\x21\x41\x00\
\xc4\x6d\xfe\x05\x93\x1f\xb9\x7d\xfc\x5a\xc1\x82\x5d\x01\xd2\x03\
\x08\x40\x70\xe2\x5f\x31\xf1\x01\x7d\x20\x21\x98\x41\x08\x20\x00\
\x20\x3e\x84\x00\x42\x00\x01\xf0\x92\xe3\xdf\x80\xf8\xa6\x85\x60\
\x8a\x1a\x01\x04\x60\x1f\xe2\x5f\x72\x9e\x8f\x1c\xdf\x7e\x8d\x80\
\x8a\x85\x73\x08\x01\x04\x60\x17\xf2\x8f\xd9\xee\x17\x18\x8d\xa4\
\x50\x73\x5a\xb0\xc0\x50\x40\x00\x5e\x23\xfe\x90\xed\x7e\x89\xd1\
\x48\x1a\x15\xa7\x05\x2b\x0c\x05\x04\xa0\xb5\xfb\x57\x6c\xf9\x81\
\xfe\x60\xce\x8e\x60\x0d\x01\xe8\x2f\xf9\x69\xb5\xbf\x83\xdd\xef\
\x75\x5a\x30\xe9\x73\xe3\x92\xac\xa7\xc4\xc7\xaa\x0f\xc0\x0d\xf4\
\x51\x00\xb0\xea\x03\x70\x03\x3f\x31\xe8\x19\xf9\xa9\xc8\xf7\x08\
\xf2\x03\xaf\x80\x62\xe2\x91\x63\x04\x0e\x20\x31\xe2\x0f\x79\xd5\
\x1f\x22\xce\x81\x1d\xb0\x62\x37\xb0\x82\x00\xd8\x27\xff\xd8\x6d\
\xb6\xf7\xf2\x1e\x07\xf3\xfa\x85\xd5\xfd\xf6\x87\x7f\xf6\xdd\x0b\
\x91\xcc\x7b\x2c\x9a\x34\x66\xd3\xd4\xcf\x0d\x64\x89\x93\x9f\x88\
\x7f\xd9\x03\x82\x13\xa9\xbf\xf2\x27\xfd\xad\x7d\xaf\x5e\xec\xa2\
\x72\xb6\xca\xf4\xf7\x91\x3f\x53\x17\x08\x7a\xdd\x78\x02\x01\xb0\
\x45\xfc\x9c\x73\xfd\x61\x82\x64\x6f\x5b\x6c\xd7\x5a\x0a\x56\x5c\
\x58\x25\x31\x48\xa5\x95\xf9\x6b\xe3\x7e\x9a\xe2\x2e\x41\x96\x20\
\xf9\x87\x4c\xfe\x3c\x91\xc0\xab\xdc\xcf\x9e\xfa\x6b\x23\x73\x40\
\x63\x5f\xb2\x20\x94\x89\x08\xc2\x9a\x45\x60\x05\x01\x40\xbe\x1f\
\x32\xc8\x88\xf0\xd4\x33\x7f\x99\xca\x8a\xc3\x82\xd0\x76\x45\x2e\
\x8d\xcf\x4f\x52\x75\x81\x2c\x21\xf2\x5f\xbb\xcd\xe1\x1e\x8b\x41\
\xd5\xab\xee\xb7\x09\xb4\x48\xa7\x43\x43\xd7\x10\x00\x3d\x01\x45\
\x5b\x7c\x63\x63\x8f\xbd\x4c\x6d\xa5\x3f\xc0\xb5\xb5\x62\x60\x09\
\x49\x14\x07\x33\xe3\xc1\x63\xad\x61\x47\xfb\x6e\x3a\xda\x5b\xff\
\x3e\x97\x85\xfb\xd9\x60\xd5\x8a\x2b\x58\x38\xe3\x0d\x47\x32\xe3\
\xe4\xb7\x52\xe9\xa7\xc2\xd1\x2d\xde\x45\xef\xe4\x0a\x2e\x0c\xcd\
\xad\xd9\x1d\x82\x0c\xe4\x0f\x8a\x8a\xf3\xc5\x0a\xb4\xde\x6b\x9e\
\x4b\xb7\xa9\xeb\x94\x10\x01\x08\x80\x25\xf2\x83\xf8\xfd\x13\x02\
\x93\x22\x90\x81\xfc\x20\x3e\x84\xa0\xbf\x22\x90\x81\xfc\x5e\x50\
\xbb\x4d\x31\x08\xd7\x55\xc9\x09\x81\xd6\x57\xba\x4d\x89\x40\x06\
\xf2\x1f\x84\x35\xaf\xf8\x73\xd0\x32\x4a\x5c\x5c\xb2\x23\xc8\x21\
\x02\x89\x0a\x80\x62\xf2\xd3\x6a\x3f\x41\xab\x69\x15\xf1\x41\x6e\
\x60\x04\x11\xe8\x8e\x23\x03\x73\x7c\xa3\x8c\xfc\xb5\xeb\x79\x1f\
\x39\x4d\x60\x82\x9d\x2b\x4c\x0b\xda\x1e\x14\xe7\x9a\xc7\x4f\x75\
\x47\x20\x85\x27\xfc\xc8\xea\x1f\x83\xfc\x2a\x85\x80\xe6\xe4\x98\
\xe7\x48\x0b\x46\x1c\xc3\x48\x01\xf6\x20\xff\xb5\xd3\x73\xb6\xff\
\xc7\x2a\x03\xe2\x9b\x49\x0b\xc8\x0d\x7c\x56\x54\x1b\x50\xfb\xee\
\x40\xa6\x74\x02\xc7\x6c\x9f\x90\xeb\x03\xa9\xd4\x06\x26\x1a\x4f\
\x82\x66\x0a\x27\x4e\xd3\xfb\xfc\x53\x54\xf8\xcd\x0b\x01\xed\x14\
\x68\x68\xf4\xa9\xb2\x9f\x40\xa6\x6c\xb2\x88\xf4\xcf\x0a\xc8\x5f\
\xb3\xe5\xc7\xf5\x51\x69\x88\xc0\x90\x53\x82\x42\x81\x08\x7c\xd0\
\xe4\x26\xb5\x15\x01\x35\xac\xfc\x44\xfa\x63\x90\x3f\x1d\xf0\x5c\
\x52\x81\xb0\x8a\xfc\x28\xed\x96\x36\x1c\xc0\x2b\x2a\xad\xa1\xe2\
\x9f\x74\x03\x48\x00\x71\xa6\xd2\x01\x70\xd1\x2f\xf6\xa4\x4c\x41\
\xfe\x5e\xb8\x01\x9a\xe3\xd8\xf3\x3c\xe6\x98\x87\x03\x50\x52\xf4\
\x9b\xa4\xf6\xae\x3e\x37\xd8\x68\x3b\xf4\xbe\xe7\xfc\x77\x97\x3e\
\xff\xed\x3d\x02\xb5\xdb\xdc\x1f\xf0\xa3\x13\x71\x6a\x0d\x4c\x14\
\xf4\x8f\x54\x51\x14\xd4\x20\x00\x4f\x2e\xde\x49\xbf\x64\x3a\xbd\
\x32\xe1\x69\xcb\xab\xed\xc4\x9b\x07\x18\x2b\xca\xa1\xa9\x43\xf1\
\x32\x05\x41\x50\xb0\xf8\x90\xb0\x1e\xf7\x56\x00\x22\x5f\xdc\x61\
\x9e\xfc\x5b\x6d\xb4\xce\x22\x88\x28\x8d\xdb\x83\x33\xde\xde\x4c\
\x81\x08\xcc\x9b\xf1\x9b\xf6\x4e\x00\xf8\xb4\xd6\x23\xc8\xbf\xb7\
\x7d\xfd\xe4\xf4\xbc\x17\x4f\xce\xe0\xde\x6a\x1a\xa5\x40\x04\x4e\
\x63\x9d\x32\xcd\x22\x0d\x38\x0d\xf4\x93\x8b\xb3\x2f\x6b\x92\xfc\
\x3c\x66\x97\x4c\xfc\x42\xe9\x63\x92\x13\xb8\xe7\x55\x6d\x0d\x11\
\xe8\x34\x6e\xc7\x31\xc6\x2c\xd6\x2e\xc0\x15\xc8\xdf\x29\x38\x89\
\xf8\xcf\x11\xc7\x6d\x57\x14\xfc\x8c\xcf\xfc\xcc\x66\xc0\x31\x71\
\xea\x7e\xbd\x48\x55\x72\xdc\xa2\x1c\x7d\xcf\x22\x04\x73\x2c\xeb\
\x6f\x8e\xfc\xca\x3b\xdf\xec\xba\xb2\x99\xea\x94\x14\xd9\x09\x88\
\xa7\x02\x99\xf0\xe0\xc6\xb4\xfe\x66\x4e\xf7\x29\x6e\x72\xb1\x2f\
\x4c\xbd\x50\x15\x51\x04\xc4\x53\x01\xe9\x14\x20\x96\x85\x9d\x18\
\x22\xff\x88\xed\x7e\x2a\xe4\x77\xfc\x5b\x9e\xf9\xb7\x59\x49\x07\
\x62\x54\xe6\xdb\x14\x2a\x3d\x07\xc0\xaa\xfa\x14\x61\x50\xcd\xbc\
\xd1\x67\xf4\x8a\xb3\xae\x58\x38\x23\xb7\xe9\x44\x7c\x2d\x5d\xcc\
\xad\x4a\x3a\x80\x18\xaf\x64\x2e\x2c\x90\x9f\x2c\x3f\x1f\x88\x4a\
\x9d\xfc\x8e\x7f\xe3\x23\xa7\x39\xda\x9d\xc0\x82\x05\x4b\x1a\x62\
\x5c\xc9\x84\x02\x3c\x86\x92\x46\x3f\x65\xa5\x3c\xdf\x8c\x0d\x33\
\x45\xd9\x66\x8e\x68\x7e\xca\x08\x69\x6b\x70\xf1\x19\x08\x0c\x5e\
\xee\xe4\x5b\x7b\xd5\x6e\xb3\xa5\x03\xf2\xeb\x45\xce\x4e\xa0\x34\
\xf0\xac\xe7\x1c\x53\x92\xb8\x92\x70\x49\x12\x29\x00\xed\x07\x17\
\xd2\x13\xa6\x3d\xc7\x64\x57\xf4\xd4\x53\xf2\xbf\x14\x01\xd5\xa9\
\x4f\xdb\x79\x58\xf8\x6b\x0b\x27\x70\x4c\x3e\x0b\x1c\xe4\x31\x3a\
\xfc\xa8\x2f\xfa\x45\x3e\x06\xad\x76\x95\xd5\x7e\x5e\x20\x42\x7b\
\xb1\xe0\x1d\x84\x42\x3b\x00\xe9\xd7\x2d\x97\x06\xc8\xdf\xb6\xa7\
\x02\x7e\xc5\x1d\x8f\x8d\x66\x27\x40\xb1\x25\x29\x52\x79\x68\xc1\
\x19\x04\x0c\x74\xb2\x30\x92\xd6\x8e\x54\x72\x62\x80\xfc\x7d\xcd\
\xf9\x77\x4d\x07\xb4\x5f\xf9\x3e\x11\xae\x07\x8c\x99\x4b\xe6\x1c\
\x80\x74\xe1\x4f\x75\xde\xbf\x75\xba\x0f\xe4\xff\x67\x11\xb8\xd3\
\xbc\x45\xc8\x31\x26\xbd\xd0\x5c\x99\x12\x80\x08\xab\xff\xdc\xc0\
\xa5\x1d\x44\xfe\x21\x38\xfe\x26\xda\x2b\xb5\x34\xa7\x02\x14\x6b\
\x92\xa9\x66\x30\x17\x10\xca\x01\x48\xae\xfe\x64\xc7\x66\xca\xad\
\x3f\x15\x8f\x46\xe0\xf6\xce\x18\x19\x78\x9b\x70\x26\x9c\x0a\x04\
\xe1\x54\x16\x20\xd8\x49\xa9\x9e\x05\x07\xe6\x54\xf3\xea\x1f\xf1\
\x08\x74\x0a\x50\xfd\x02\x57\x84\xdd\x9c\x0f\xbe\xbb\x2f\x85\x70\
\x00\x17\x82\x03\xb2\x34\x62\xfd\x81\x04\xc7\x8e\x63\x4f\x72\x57\
\xc0\x3b\xb7\xbc\x0a\x00\x17\x6f\xa4\x72\x7f\x0b\x55\xff\x4b\xe4\
\xfd\x87\xd5\x03\x0c\xa4\x02\x13\x27\xd7\x44\x64\xec\xbb\x40\xea\
\xdb\x01\x10\xf9\xa5\x2a\xb8\x33\x03\x55\xff\x2b\xc5\x81\xdb\x76\
\xf9\xad\x5c\x9c\x2e\x38\x3b\xe7\xbe\x06\x76\x05\xa4\x6a\x50\x6d\
\x5b\x38\x6f\x38\x32\x6a\xff\x6b\x03\x6f\xf9\xc5\xec\x39\xff\x12\
\x6d\x07\xdf\x8a\xc7\xae\xfe\x83\x68\x15\x6e\x73\x04\x95\x72\xdb\
\x18\x9d\x86\xff\x14\xf4\x37\x9a\xdd\x1e\xc5\x62\x33\x76\x17\x4e\
\xe6\xc8\x3b\xf5\x84\xbc\xf6\xf5\x1f\xf3\x56\x04\xe4\x66\x0f\x52\
\x27\xdc\x54\x1f\x1b\x8d\x50\x08\x7d\x55\x24\xdd\xa6\x41\xe7\xde\
\x6d\xbb\xb7\xb6\x73\x35\x34\x22\xfd\xa0\xb9\xfd\xb8\x70\x41\xd0\
\x5b\xfc\xfb\x74\x00\x9f\x84\x7e\x7c\x65\xa0\xc7\xdc\x55\x64\xe2\
\xcf\x7c\xbc\x4a\xca\x84\xa3\xd5\xe6\x9a\x5f\xd8\x89\xd9\x94\xf4\
\x4a\xb9\x0b\xa8\x9a\x31\xaa\x9c\xcc\x6b\xc3\xe4\x36\xbc\x70\xc0\
\x4b\x0d\x60\xeb\x56\x1a\x91\xdc\x5f\x33\xf3\x23\x1c\x82\x7a\x39\
\x36\xc7\x21\xde\x23\xe7\xff\xe6\x71\xc4\xf1\x0f\x7a\x24\xd6\x58\
\x6c\x96\xbe\xc6\xc2\x57\x11\x50\x2a\xe0\x2b\x03\xdb\x7e\x17\x11\
\xbe\xb3\x66\xe2\x5f\x87\x2c\x8c\xd2\x7f\x9b\xbe\x83\x85\xa0\xee\
\xc9\xd8\x76\x72\x01\x4e\xee\x0a\xf2\x0b\x4d\x02\x20\x65\xff\xb5\
\xaf\xfe\x92\xdb\xa0\x2d\x56\x4e\xf8\xc0\x0c\x7f\xd7\x31\x7f\xb7\
\xb4\x0b\xd0\xfe\x2e\x85\x54\x8c\x8e\x54\x08\x00\x17\xff\x24\xac\
\x99\x85\xd5\x9f\xc6\x42\x32\x40\xa9\xc0\x17\xe5\x46\x19\x76\x03\
\x24\x02\x0b\xc1\xaf\xcd\x9d\xf2\x23\xd5\x82\x2e\xa0\xf0\xd1\x65\
\xd9\x87\x03\x38\xc3\xea\x2f\x3e\x16\xad\x20\x4e\x14\x04\xfc\x44\
\xd0\xf6\x4a\x8f\xb1\xf6\x58\x3d\xd3\x20\x00\x12\x8a\xbc\xd2\xbe\
\xfa\xb3\x35\x95\x5a\x9d\xc8\x7a\x9f\x2b\xfa\xf9\xe7\x82\xe9\xc0\
\x48\x7b\x1a\xc0\xb1\x2a\x31\x1e\x71\x1d\x00\x5b\x10\x89\xc9\xb8\
\x35\xa0\xfa\x52\xe4\xff\x71\x04\x5a\xd3\x29\xc8\xad\x9e\x79\xeb\
\xc4\xc6\x5a\x7b\xcc\xe6\x87\xa6\x01\x87\x3a\x00\x09\x3b\xb6\x36\
\x72\xed\xb4\x58\x2a\xa4\xf1\x0d\x39\x3e\x33\x30\x4b\x6c\xac\x0f\
\x19\x0f\x8a\xd9\x5a\xfb\x58\x1c\x2a\x00\x12\x4a\x6c\x61\xf5\x27\
\x94\x42\x79\xff\x5c\x71\xd0\xcf\x85\xea\x01\xa5\x91\x98\xb8\xd7\
\xee\x86\xf6\x16\x00\x3e\xfa\x28\x61\xff\xd5\xaf\xfe\xfc\xce\xbf\
\xc4\x58\x58\x28\x84\x4a\x3c\x63\x6e\xa0\x77\xa0\x54\xec\x1e\x94\
\x06\x1c\xe2\x00\x24\x6c\xd8\x52\xf3\xf9\xef\x08\xab\x7f\xa5\x7d\
\x20\x04\xb7\xc1\x4a\x03\x63\x41\xb1\x2b\x71\x6c\xfd\x24\x86\x00\
\x48\xd8\xff\x07\x23\x56\xef\x23\x56\x7f\xf1\x67\xfd\x68\x64\x2c\
\x24\x62\x58\xd6\x01\x6c\xbd\x36\x1a\x12\x56\x8a\x7f\x4e\x60\x2c\
\x6a\x0b\xab\xff\x0b\x17\x50\x1b\x1f\x73\x5f\x63\x41\x31\x1c\x7a\
\x77\xa4\xd8\xf7\xdd\x80\x7d\x1d\x80\x84\xfd\x5a\x3a\x3b\x28\x31\
\x16\xe2\xcf\x5c\x62\x2c\x0e\x77\x01\xfb\x0a\x80\x44\xfe\xff\xe0\
\x00\xcb\x63\x81\xf9\x93\x1d\x8b\x13\x49\x01\x08\xad\xbe\x6b\x03\
\xef\xfc\xb7\xe9\x50\xf0\x95\xc8\x92\xfd\x97\x7c\x66\x23\x37\x0b\
\x3b\x8e\xe5\xd0\x69\x40\x29\x22\x00\x9c\x6b\x84\xde\xf2\x32\x17\
\xf0\x01\xb1\xc2\xb3\x27\x81\xd0\x0b\xda\x5e\x5b\xa3\xfb\x38\x00\
\x09\xd5\x85\x7d\xdc\x72\x43\x86\x9f\xbd\xc6\xf4\xfd\x8d\x2f\x02\
\xdf\x21\x22\x00\x27\x02\x3f\xc4\x52\xd1\x6b\x98\x40\xe0\x84\xc2\
\x57\xe3\x63\x6f\x2d\xa6\x4f\x24\x04\x20\xf4\xa0\xaf\x34\xb7\xfb\
\x7e\xcd\x7a\x61\x71\xc3\xd8\xef\x50\x07\x58\x0b\xa4\x44\x9d\xb9\
\x79\xa4\x50\x00\xac\xe5\xff\x15\xc6\x03\xe8\x30\x97\x43\xb3\x02\
\x20\x54\x75\x35\x65\x79\x85\xfb\xc0\x01\xb6\x41\xb5\xad\xa0\x37\
\x1d\x11\x47\xbb\xec\xc0\x74\x4d\x01\x24\x72\x2e\x90\x29\x1d\xbc\
\x0b\x9d\x2e\x1a\x1b\x0f\x89\xe7\xed\xc4\xd1\xae\x02\xf0\x1e\xf9\
\x3f\xa0\x68\xc1\x30\x15\x2b\x42\x75\x80\xf7\x21\x05\x20\x78\x01\
\x10\x9c\x81\x00\xf4\xa0\x0e\x60\xd6\x01\x60\xcb\x0b\xd8\x35\x17\
\x2d\x5c\xe0\x2a\xbd\xc5\x13\x92\x4e\xd9\xd6\xe8\xa0\xc3\x84\xe6\
\x2e\xfc\xb6\x4b\x0d\xea\x24\x83\x11\x62\x25\x8a\xcb\xcd\xbb\x34\
\x4d\xed\xe2\x00\x82\xdb\x39\xa3\x8a\x0e\xbc\x8e\x4f\xc6\x89\x14\
\x2a\xc6\x55\x15\x02\xbb\x08\x40\x8e\x09\x05\x76\x74\x8b\x25\xd2\
\xc5\xa8\xb1\x5e\x58\x74\x00\xb0\xff\xe9\x40\xe2\x76\x64\xcb\x6e\
\xb1\xb6\x28\x00\xa1\xf7\x74\xbf\x82\x37\x49\xac\xfe\x94\xfb\x97\
\xa1\x09\xa4\xb1\x35\xba\xa2\x58\x7f\x17\x42\x00\xe0\x00\x80\xb7\
\xc8\x4f\x69\xe2\x9d\xc0\x57\xdd\x1b\x1f\xaa\xd0\xb1\x1e\xa4\x06\
\xe0\x20\x00\xc0\x1b\x20\xf2\xa3\x55\xbc\xa1\x58\xd7\xe4\x00\x70\
\x02\xd0\xf6\xea\x7f\xe3\x64\x3a\x45\x5b\x69\x15\x9f\x9c\x03\x08\
\x7d\xa8\x03\xbb\x00\x76\xc9\x3f\x76\x81\x5f\x72\xd9\xc2\xad\xf5\
\xf1\x12\x10\xb0\x20\xe7\x00\x00\xe0\x4f\xe4\xbf\x13\xfa\xba\x25\
\xce\x8a\x44\x48\x01\x04\xae\x63\xc6\xea\x6f\x93\xfc\xd7\x82\xe4\
\x27\x4c\x13\x1a\xbe\x55\xe0\xb9\x29\x7c\x3a\x00\xe4\xff\xc0\xcb\
\x00\x23\xe2\x5f\x09\x7e\xe5\x2c\x81\xdc\x5f\x32\xe6\x77\x12\x80\
\x23\x84\x32\xd0\x91\xf8\xe4\x06\x1f\x9d\xec\x9b\x7e\xf4\x9a\xf8\
\x35\x46\x3f\x52\x0a\x00\x00\x4c\x7e\x22\xfd\x93\x30\xf9\x69\xa5\
\x9c\x60\xf4\xd3\x16\x00\xd4\x00\xf4\x93\x7f\xcc\xe4\x2f\x84\xbf\
\x7a\x9a\xe8\x0e\x91\x8a\xdf\xa4\x25\x05\xf8\x0e\x8a\xa9\xb6\xfc\
\xb4\xc7\x3f\x8e\xf0\xf5\x73\x43\x17\xc4\x9a\x8c\x79\xd4\x00\x80\
\xb7\x2c\xff\x9d\x8b\xd3\xd9\x67\xd1\x90\x7f\x8a\x59\x40\x0d\x00\
\x88\x43\x7e\x3a\xd5\xf7\x18\x89\xfc\x64\x8f\x41\x7e\x08\x00\x10\
\x89\xfc\xd7\xcd\xc7\x67\x17\xe7\xe2\x0d\x22\xff\x29\x9a\xc3\x22\
\x05\x00\xe2\xe4\xfb\x64\xf9\x47\x91\x1e\x01\xe4\x87\x00\x00\x91\
\xc8\x5f\xf0\xaa\x3f\x04\xf9\x91\x02\x48\xe3\x3d\xa6\x22\x2a\xf9\
\x63\xec\xef\x6f\xa3\xea\x21\xf9\xdf\x69\x78\x08\x2d\x0e\xa0\x00\
\x0d\xa3\x92\xff\xd1\xc5\xbb\x68\x93\xaa\xfd\x7d\x3c\xe8\xa3\xe2\
\xce\x04\x14\x01\x41\x7e\x90\x1f\x35\x00\x00\xe4\x17\xc7\x24\xe1\
\x43\x3e\xc9\xd5\x00\xea\xc0\xcf\x91\x63\x2a\x7a\x43\xfe\x35\xc8\
\x2f\x12\xf3\xb5\x37\x01\x10\x78\x0d\x13\x77\xc8\xc9\x91\x3f\x8f\
\x4c\xfe\x53\x90\x3f\x7c\xcc\xef\xca\x59\xd4\x00\xfa\x87\x58\xe4\
\x6f\xb7\xf9\xf0\xe2\x97\xc1\x14\xa0\x55\xef\xd0\xb6\x14\x08\x3b\
\xc6\x37\x2e\xde\xd1\x5e\x90\xff\xe7\x3c\x14\x02\x4e\xcb\xbb\x00\
\x04\xbf\xd4\x10\xa1\x11\x34\xe8\xe8\x74\xdf\x65\x84\xaf\xae\x1c\
\x0e\xf8\xbc\x44\x68\x01\xd8\x99\xab\x47\x3d\x1a\x94\xbe\xe7\xfd\
\x37\x11\xbe\x1a\xdb\x7c\xca\x63\x5d\x93\x03\x80\x00\x84\xc3\x55\
\x84\xf1\x05\xf9\x0d\x38\x80\x2e\x02\x10\xba\x81\xc1\x47\xc4\x45\
\xb0\x7c\xf3\x12\xe4\x57\x85\xd0\xb1\xfe\x3d\x84\x00\xc0\x01\xd8\
\x5d\xfd\x41\xfe\x7e\x39\x80\x3a\x84\x00\x84\x2e\xe2\x60\x17\x20\
\xcc\xea\x3f\x16\xfc\xca\x15\xc8\xaf\x22\xd6\x83\x08\xc0\x4a\x20\
\x60\x4b\xc4\x86\xd9\xd5\xff\xc7\x56\x1f\x86\xfc\xcd\x18\x1f\x0a\
\xcd\x85\x5f\x01\xe0\x6d\x1c\x15\x97\x19\x00\x3b\x05\x1a\x55\xfe\
\xa5\x1a\x7b\xb4\xc7\x7b\xb1\xd5\x17\x7f\xf5\x5f\x77\x99\x87\xae\
\x27\x01\x43\xbb\x80\x13\xc4\x87\x37\x10\xf9\xa5\xce\x56\x4c\x71\
\xc8\x67\x67\x84\x2e\x00\x76\x9a\x07\x6d\x02\x80\x3a\x80\x3f\x9c\
\x09\x7d\xcf\x12\x67\xfb\x3b\xa1\xb4\x2c\x00\xdf\x42\x0b\x80\xc0\
\x45\xa4\xb0\xff\x9e\xad\x3f\x46\xbc\xd3\xbc\x84\x5e\xe4\xbe\x85\
\x14\x00\x09\x9b\x57\x22\x54\xbc\xd8\x7f\x29\xeb\x8f\xbc\x5f\x97\
\xc3\x0d\xe7\x00\x84\xee\x66\x47\x1d\xc0\xc6\x18\xd6\xb0\xfe\xfa\
\xd2\xb2\xae\x1c\xdd\xe7\x75\xe0\xd0\x2e\x00\x0e\xc0\xc6\x4a\x33\
\xc3\x30\xdb\xce\xff\xb5\x0a\x00\xea\x00\xfa\x05\x00\xab\xbf\xce\
\xfc\x5f\x44\x00\xbe\x24\x94\xc3\xa6\x18\x68\x12\x0e\x6a\x89\x91\
\x56\x19\xd3\x5f\x24\x04\x40\xa2\x10\x78\x86\x78\x51\x6d\xff\xef\
\x31\xcc\x9d\x21\x51\x97\x09\xef\x00\xf8\xc0\x47\xe8\xca\x2f\xea\
\x00\xfb\x23\x78\xb3\x49\x1c\xfa\x51\xe9\x00\xd6\xfb\xcc\xcb\xbe\
\x3d\x01\xab\xd0\x41\xcc\x1d\x6c\x80\xee\x08\x7d\xd2\xac\xc2\x10\
\x77\x4e\xcb\x24\x4e\x65\xee\x35\x2f\xfb\x0a\xc0\x03\xd2\x80\xde\
\x3a\x80\xaf\x18\x62\x95\xb1\xfc\x20\x29\x00\x12\xab\xc0\x08\xbb\
\x01\x2a\x01\xfb\xaf\xcf\xfe\xcb\x3a\x00\xee\x39\x5e\x0b\xac\x64\
\x48\x03\xba\xa3\x08\x5d\x03\xc0\x10\x77\xb2\xff\x63\x27\x53\x97\
\xa9\xc5\x04\x80\x21\xb1\x15\x84\x34\x40\x99\x00\x08\x5c\x12\x03\
\xfb\x2f\xc8\xc5\x43\x04\x40\xe4\x3c\x80\x40\x0f\x75\x00\x08\xb5\
\xfa\x17\x4e\xe9\xfe\x7f\x8b\xbd\xdb\x82\x37\x2b\xc1\xb2\xf9\x81\
\x6b\x01\x7b\x43\x16\xea\x1a\xe1\xb4\x33\x70\x44\x57\x0f\xc6\x02\
\xdf\x41\xdb\x7f\x4b\x71\x01\xd8\xb2\x1e\xa1\x7f\xe4\x05\x04\xa0\
\x93\x30\x63\xac\xf4\xe0\x93\x66\xfb\x7f\x68\x0a\x40\x90\xd8\x0e\
\xcc\xb9\x90\x02\x00\x96\xec\x3f\xc5\xac\x44\xfa\xfa\x10\x4d\x00\
\xd8\x7a\x48\xbc\x0f\x7e\x81\x90\x02\x8c\x41\x22\x66\x0f\xb2\xff\
\x3e\x1c\xc0\xc1\x16\x64\x47\x0c\xd1\x31\x18\x30\xb4\xfa\x53\xac\
\x4a\xbc\x93\x71\x30\xf7\x7c\x08\xc0\x83\xd0\xb8\x5e\x21\xb4\x00\
\x23\x90\x8a\xd5\x83\xb9\x97\x79\x52\xbc\x67\xa1\x7c\xe7\x54\xa8\
\x2b\x11\x00\x1c\xb2\xfa\x3f\x0a\x7c\x15\x1d\xfe\xf9\xa0\xc1\x01\
\x10\xa4\x5e\x0f\x85\x0b\x00\xb0\xfa\x7b\x4c\xbd\x7d\x09\xc0\x42\
\xe8\x47\x97\xa8\x05\x00\xca\x57\x7f\xa9\xf8\xbc\x55\x23\x00\x7c\
\x3c\x54\xca\x9a\xc3\x05\x00\x7d\x5f\xfd\x2b\x5f\x47\xb2\x07\x1e\
\x1f\xea\x56\xe8\xc7\xc3\x05\x00\x58\xfd\x3d\xc1\x9b\x00\xf0\x7e\
\x64\x2d\x34\x00\x77\x08\x39\x40\x19\xa4\x62\xb2\x3e\x74\xef\x7f\
\x1b\x47\x9e\x1f\x8e\x94\xe9\x46\x60\x10\x8a\x46\x71\x2f\x9b\x81\
\x98\x23\xee\x7e\x5b\x89\x82\x56\xa0\x9b\x31\xc7\x0d\xc0\xbf\x8f\
\xf9\xa5\x93\xbb\xd8\xd6\x6b\xc1\xdd\xb7\x00\x2c\x38\x0f\x92\x68\
\xe4\x71\xd5\x0c\xfc\x02\x37\xd3\xfc\x9e\x22\x61\x08\x44\xc9\x9f\
\x0b\xe6\xfe\x14\xeb\x5e\x17\x3d\x9f\x35\x80\xf6\x0a\xf1\x85\xd0\
\x60\xe4\x48\x05\x00\x05\xb8\x71\x72\xb7\x30\x2f\x7d\x2f\x78\x83\
\x00\x0f\x79\x2b\x38\xf8\x23\x14\x04\x81\x88\xab\x3f\xc5\xde\x58\
\xf0\x2b\xbd\xbf\xea\xed\x5d\x00\x78\x7b\x62\x21\x38\x28\x77\xe8\
\x1d\x08\x44\xb2\xfe\x92\x0e\x74\x11\xa2\x1b\xd3\x20\xd0\xc3\x4a\
\x36\xa5\x28\x1c\xce\x06\x00\xf2\xb8\x72\x72\x85\xbf\x60\x9c\x0a\
\x22\x00\x11\x5c\xc0\x25\x52\x01\x40\xd8\xfa\x5f\x5a\x5f\xfd\x43\
\x3a\x00\x69\x17\x40\xf8\x8c\x54\x00\x48\xd0\xfa\x07\xe5\x52\x30\
\x01\x88\xe0\x02\xb0\x2b\x00\x48\xe0\x4e\xd8\xfa\x2f\x42\x76\x62\
\x1e\x04\x7e\xf8\xa9\x93\xe9\x18\xd4\x62\xc4\x87\x32\x00\x20\xc4\
\xea\x4f\xb1\x25\x79\x57\xc5\x3a\xb4\x93\x0e\x2a\x00\xbc\x67\x79\
\x2b\x3c\x4f\x37\xcd\x44\x0d\x11\xae\x80\x67\xf2\x53\x4c\xdd\x08\
\x7f\xed\x6d\xe8\x7b\x18\x06\x02\x3f\x82\x4e\x2e\xd5\xc2\x03\x87\
\x7a\x00\xe0\x3b\xef\xff\x2c\xfc\xb5\xb5\xf3\x7c\xea\x2f\x8a\x00\
\xb0\x0b\x90\x2e\x08\x16\x11\x26\x0c\x48\x17\x9f\x85\xf3\x7e\xc2\
\x4c\xe2\x98\xbb\x84\x03\x20\x11\x58\x38\xf9\x6b\xa5\xe9\xb5\x61\
\x14\x05\x81\x43\x57\x7f\x8a\xa1\x52\xf8\x6b\x2b\xe6\x8c\x4b\x42\
\x00\x18\xd3\x08\xf3\x37\xc6\x9d\x02\xc0\x01\xe4\xa7\xd8\x89\x11\
\x3f\x62\x5c\x11\x13\x80\x46\xd1\x56\x12\x39\xcd\x2b\xb8\x83\x08\
\x00\x7b\x92\x3f\x86\x83\x9c\x33\x57\xd2\x12\x80\x36\xaf\x71\x71\
\xae\x97\xc6\xce\x00\xd0\x85\xfc\x31\x2a\xfe\x8e\xb9\x21\x5a\x2f\
\x13\x15\x00\x2e\x6a\x4c\x22\x0c\x2c\x55\x71\x1f\x21\x02\xc0\x8e\
\xe4\x7f\x74\x72\xaf\xf8\x6e\x63\x22\xdd\xdf\x42\xda\x01\x38\xee\
\xeb\x3f\x87\x08\x00\x20\xff\x2f\x58\xc6\xb8\xf3\x62\x10\x69\xac\
\x63\xa5\x02\x10\x01\x40\x23\xf9\xeb\x48\xce\x38\x8e\x00\x44\x4c\
\x05\x20\x02\x80\x36\xf2\x47\xb1\xfe\xb1\x1d\x40\xcc\x54\x00\x22\
\x00\x68\x22\xff\x3c\xe6\x75\x77\x83\x98\x83\xdf\xfc\x70\xda\xef\
\x5c\x41\x04\x80\x9e\x92\x7f\xc5\x1c\x70\xbd\x14\x80\xd6\xfe\x38\
\xd9\x37\x06\x5f\x13\x81\x31\xe8\xd0\x3b\xf2\x8f\x23\x93\x3f\x66\
\x1a\xfc\x37\x32\x45\x93\x11\xfb\xd8\xee\x44\xea\xf8\x25\x80\x78\
\xd3\x12\x6f\x99\xa2\x49\xa1\x09\x89\xbd\x12\x53\xf3\x85\x09\x28\
\x92\x34\xf9\x11\x67\x1a\x05\x80\x27\xe7\xa9\xf9\x88\x9d\x93\x57\
\xcd\xdf\x39\x2e\x1c\x49\x8e\xf8\xed\x2b\xbd\x65\xe4\x47\xa1\xbc\
\xff\x58\xcb\xb8\x0c\x94\xcd\xd3\x69\xc4\x7a\x40\x0b\x0a\x90\x27\
\x14\x07\x93\x22\x3f\xcd\xe5\x93\x02\xf2\xaf\x39\xc6\x1d\x04\xe0\
\x15\xf0\xaa\xab\x41\x04\x0a\x16\x01\xb4\x17\xb3\x4f\xfe\x4b\x26\
\x7f\xa1\x61\x81\xd3\xe6\x2c\x33\xa5\x93\x46\x39\x9a\x96\x77\xf9\
\xe9\x26\xd6\x09\x52\x02\x93\x96\x9f\x62\x68\xa4\xe4\x91\x54\x16\
\x99\x07\x1a\x27\x8f\x07\x6a\xa6\xe4\x71\x46\xec\x06\x4a\xd0\xca\
\x0c\xf9\x4b\x5e\xf5\xb5\x90\x7f\xa6\x75\x87\x29\x53\x3e\x91\x1a\
\x2a\xb6\xdb\x98\x3b\xa1\x56\x4d\xc0\xde\xab\x3e\xdd\xd8\xa3\x29\
\x75\x53\xbd\xb3\x94\x19\x98\x54\x6d\x22\x50\xb3\x9d\xab\x40\x39\
\x75\xab\xbe\x74\xcf\xfe\x37\xd3\xc7\x26\x4e\xce\x35\x8f\xdb\x91\
\x81\xb9\xa5\xa3\x92\x43\x17\x7f\x7b\xb0\x05\x05\x18\x9d\x1e\x24\
\x4b\x37\x85\x1b\x50\xb1\xea\xdf\x28\x5b\x24\x08\x74\xc4\x5d\xfd\
\x99\x92\x81\xf6\x07\xdc\xda\x19\x58\x29\x7b\x34\x0a\xb8\x67\xec\
\x14\x44\x25\x3f\x8d\xfd\xb3\x52\xf2\x9f\x5a\x58\x1c\x32\x43\x93\
\xfd\xe3\xdc\xbe\x22\x27\x80\xb4\x00\x76\xdf\x34\xf9\x4d\x09\x80\
\x01\x11\x20\x90\x00\xcc\x20\x04\x41\x89\x4f\x45\xbe\x52\xe9\x23\
\x9a\x22\xbf\x39\x01\x30\x22\x02\x10\x82\xfe\x11\xdf\x24\xf9\x4d\
\x0a\x80\x21\x11\x80\x10\xf4\x83\xf8\x66\xc9\x6f\x56\x00\x8c\x89\
\x40\x1b\x20\xb7\x78\xdd\x78\xe7\xb9\x1d\x37\x1f\x17\x86\xe6\xf6\
\xd4\xea\x6e\x50\x66\x3c\x50\xb4\x1d\xf7\x7c\x0b\x75\xf3\x77\xef\
\x02\xdf\xf9\x6e\x74\x2e\x0b\xb7\xa9\xe6\x7f\x72\x3a\x8b\x7b\xaf\
\xc1\xfc\x31\xf1\x2c\x91\xe0\xd1\x76\x58\x68\xd7\xe0\x79\xe8\xbb\
\x2b\xe0\xd5\xfe\xcc\x90\x88\xb7\x48\xa2\x77\x44\x96\x50\x20\x5d\
\x73\xbe\x68\x0d\xeb\x2d\x31\x58\xf6\x84\xf4\xa3\x2d\xd2\x5b\xbc\
\xc6\x9d\xea\x3a\xd7\x29\xcc\x45\x96\x58\x60\xd1\x6a\x72\x63\x34\
\xa8\xb6\xc5\xe0\x8b\xdb\x1c\x23\x5d\x27\x32\x2f\x39\x93\xfd\xc4\
\x30\xe9\x5b\x24\xd5\x3a\x2e\x29\x01\xe0\x60\x8b\xdd\xe9\xd5\x27\
\xa8\xc0\x54\x91\x3b\x70\x9b\x4e\x32\x6b\x43\x84\x1f\xf2\x2a\x5f\
\x3a\x1b\xc5\xbc\x5d\xc4\xf9\x54\xf2\xe2\x4e\x08\xc0\x61\x01\xf8\
\x98\x48\xe0\xbd\x26\x08\x5f\x59\x10\x56\x4a\xc6\xbb\x7d\x57\xe3\
\x63\x42\x84\x7f\x39\xee\xa7\x29\xbe\xf7\x91\xa4\x00\x6c\x05\xa6\
\xc5\xe2\xe0\x3e\xc1\x59\xb3\x28\xd4\xed\x9f\xef\x5d\x06\xae\xd2\
\x6f\xff\x7d\xe4\xcf\xd4\x5b\xa7\x25\xdd\x28\x36\x4b\x7c\xf2\x52\
\xa8\x0b\xf8\x10\x88\xf5\x8b\xff\xfd\xfd\x0f\xff\xec\xbb\x17\x84\
\xce\x7b\x40\xf0\x7f\xb2\xfc\xd3\xd4\x77\x69\xb2\x3e\xcc\x24\x5b\
\xd4\xbb\x1e\x07\x33\xd0\x5d\x34\x27\xa9\xe5\xfb\xbd\x15\x80\x2d\
\x21\x20\x27\x80\xd7\x77\x81\x7f\xc2\x3c\xf6\x75\x5d\x10\x80\xb0\
\x22\x50\x3a\xbd\xaf\x92\x02\xf1\x50\xbb\x1e\xbe\xd2\x9d\xf5\x71\
\xa6\x0d\x1e\x21\x06\xc2\xa2\xb7\x9d\x9f\xb3\x3e\xcf\x3a\xdc\x00\
\x56\x7d\xd7\xf3\x46\x2e\x83\x3e\xcf\x3e\x4f\x3c\x5d\xd3\x34\x07\
\x17\xfa\x97\xeb\xd3\xdc\xf7\xfd\x55\xed\x0c\x71\xf0\xb7\x1b\xa0\
\x1d\x02\x2a\x12\x96\x18\x8d\xa4\x41\x84\x9f\xf6\xa1\xc2\x0f\x01\
\xd8\x4f\x08\xc6\x6e\xf3\x52\x11\xd2\x82\xf4\xec\xfe\x0c\x3d\x19\
\x20\x00\xbb\x88\x00\x15\x09\x69\xbb\x90\x9a\x52\xe4\x18\x11\xd3\
\xa0\xc2\xde\xad\xdb\x6c\xef\xa1\x85\x3b\x04\xa0\xb3\x10\x68\xec\
\x39\x0f\xec\x86\x85\xc3\xdd\x0d\x10\x00\x0f\x42\x50\x70\x5a\x00\
\x21\xb0\x43\xfc\x19\xba\x2e\x41\x00\x20\x04\x20\x3e\x00\x01\x08\
\x22\x04\x17\x2c\x04\xa8\x11\xc4\xcf\xf1\x97\x20\x3e\x04\x20\x56\
\x8d\x80\x8a\x85\x96\x1a\x59\xa6\x02\x22\x3b\x35\x58\x45\x71\x0f\
\x02\xa0\x42\x0c\x46\xec\x0a\x4a\x8c\x46\x50\x54\x6e\xd3\x62\x7d\
\x89\xa1\x80\x00\x68\x4e\x0f\x46\x70\x05\x5e\x57\xfb\x25\x13\x1f\
\x36\x1f\x02\x60\xca\x15\x58\xee\x7e\xab\x21\xb7\x7f\xc0\x6a\x0f\
\x01\x80\x18\x80\xf4\x00\x04\x20\x29\x31\x38\x41\x9a\xf0\x8b\xbd\
\xff\x02\xd2\x43\x00\xfa\x5a\x33\x28\xdd\xcf\x16\xda\xa9\xbb\x03\
\x5a\xe5\x2b\xf7\xf3\xee\x03\xe4\xf4\x10\x00\x60\x4b\x10\xda\x16\
\xdb\x27\xfc\x69\xbd\x8f\xe1\x8a\xff\x88\xf0\x2b\xbc\x85\x07\x01\
\x00\xba\x8b\x42\xc9\x42\xf0\x7e\x4b\x14\xb4\x39\x85\xf5\x16\xd9\
\xbf\x31\xd9\x2b\xcc\x1e\x04\x00\x08\x23\x0a\x6d\xcb\xee\x82\xff\
\xb6\x5b\x7a\x87\x10\x88\x96\xe0\xed\xaa\xfe\xdd\xfd\xbc\x87\x60\
\x85\xc3\x38\x10\x00\x40\x6f\x8d\xa1\xd8\xf3\x5f\xaf\x91\xa3\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\xc7\xff\x05\x18\x00\
\x17\x4e\x54\xcc\xd9\xff\xaf\x07\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1f\x20\xb9\
\x8d\x77\xe9\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x06\xe6\x7c\x60\x60\x60\x42\x30\xa1\x1c\x08\x93\x81\x81\x09\xc1\
\x64\x60\x60\x62\x60\x48\x11\x40\xe2\x20\x73\x19\x90\x8d\x40\x02\
\x00\x23\xed\x08\xaf\x64\x9f\x0f\x15\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x12\x0f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x11\xb1\x49\x44\x41\x54\x78\xda\xec\
\x9d\xe1\x51\x1b\x49\x13\x86\x67\xa9\x0b\x40\x19\x78\x2f\x02\x8b\
\x5f\xf7\xeb\x8a\x25\x02\x8b\x08\x2c\x22\x00\x2e\x01\x20\x81\x33\
\x44\x80\x88\x00\x1c\x81\x97\xba\x5f\xf7\xcb\x72\x04\x5e\x67\xa0\
\x10\x4e\x0d\xa3\x3a\xe1\x02\x23\x69\x77\x7a\x7a\x66\x9e\xa7\x4a\
\xc5\xd9\xdf\x57\x66\xb5\xd3\xfd\xce\xdb\x3d\xb3\xb3\xce\x01\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\
\xf2\x54\xdc\x82\xfc\xf8\xf3\xef\x7f\x1b\xff\x9f\xa3\xe5\x67\xbc\
\xf6\x3f\xbd\x5b\x7e\xea\x2d\xff\xb9\x6e\xf9\xf9\xb1\xf6\xe7\xf9\
\xf2\xb3\x90\xcf\x3f\x7f\xfd\x31\xe7\x6e\x23\x00\xa0\x9f\xe0\xab\
\xc4\x1e\xfb\x24\x3f\x78\x21\xd9\x35\x69\xfd\xcf\x07\x2f\x0e\x22\
\x0c\xf3\xa5\x40\x2c\x18\x2d\x04\x00\xfa\x25\x7b\xbd\x96\xec\x07\
\x6b\x49\x9f\x02\x9d\xff\x3c\xac\x89\x42\xc7\xa8\x22\x00\xf0\x7a\
\xc2\x4b\x82\x37\x3e\xd9\x9b\x84\x92\x7d\x1b\x51\x98\x7b\x51\x68\
\x29\x23\x10\x00\x66\x78\xe7\x26\x19\x27\xfc\x5b\x2c\x7c\x09\xf1\
\xd9\x0b\x02\x0e\x01\x01\x28\x62\x96\xff\xe8\x13\xbf\xe6\x8e\x3c\
\x63\xee\x05\xe1\x16\x77\x80\x00\xe4\x94\xf4\xcd\x5a\xd2\x8f\xb8\
\x23\x1b\x97\x0b\xf7\x88\x01\x02\x90\xb2\xbd\x3f\x61\xa6\x1f\x4c\
\x0c\x6e\x97\x9f\x19\x65\x02\x02\x60\x39\xe9\x47\x3e\xe1\x25\xf1\
\xc7\xdc\x91\x20\xac\x4a\x84\x19\xb7\x02\x01\xb0\x34\xdb\x9f\x63\
\xf1\x55\x91\x06\xa2\x88\xc0\x35\xae\x00\x01\x88\x59\xdb\x4b\xe2\
\x37\xdc\x8d\xa8\xdc\x7b\x21\x68\xb9\x15\x08\x80\x46\xe2\x4f\x7d\
\xe2\x53\xdb\x53\x1e\x20\x00\x24\x3e\x18\x43\x4a\x82\x4b\x84\x00\
\x01\x20\xf1\x11\x02\x84\x00\x01\x20\xf1\x11\x02\x77\x4c\x8f\x00\
\x01\xd8\x34\xf1\x1b\x47\x73\x2f\xd7\x1e\xc1\x19\x1b\x8b\x10\x80\
\xd7\x12\x5f\x66\xfa\x4f\xee\x69\x39\x0f\xf2\x65\xe6\x85\xa0\xe8\
\x47\x96\xf7\x88\x83\x67\xc9\x7f\xb1\xfc\xf1\x95\xe4\x2f\x02\x29\
\xed\xbe\x2f\xc7\xfc\x14\x07\x40\xe2\x8b\xcd\xbf\xa1\xce\x2f\x96\
\xb9\xef\x0f\xcc\x11\x80\xb2\x12\x7f\xe4\xeb\xfc\x53\x72\x00\x96\
\x5c\xb9\xa7\x15\x83\x05\x02\x90\x7f\xf2\x4f\x7c\xad\xcf\xac\x0f\
\xeb\x74\xae\xa0\xd5\x82\xaa\xc0\xc4\x67\xd6\x87\x8d\xdc\xc0\x52\
\x04\xce\x10\x80\xbc\x92\x5f\x9e\xce\xbb\x63\xd6\x07\x7a\x03\x4f\
\xec\x15\x94\xfc\x17\xee\xa9\xc3\x4f\xf2\xc3\xa6\xc8\x84\xf1\x25\
\xe7\x95\x82\xaa\x80\xc4\x1f\xf9\x59\xbf\x29\x3c\x98\x57\xc7\x75\
\xff\x5c\xef\xfe\xf8\xe9\xef\x5e\x7a\x77\x40\x4a\x27\x11\x87\xe2\
\xde\xbb\x81\x05\x02\x90\x96\xe5\xff\x52\x48\xf0\xae\x5e\xd8\xb1\
\x7e\x36\xbf\x1b\xba\x99\xb5\xf6\xd2\x11\x2b\xef\x24\xd0\x44\x04\
\xf3\x28\xa7\x92\xa0\xca\x38\xf9\xa7\xee\x69\x6d\x3f\xd7\x64\x97\
\xc4\xfe\x26\x41\x69\xa5\x63\xed\xc5\x41\xdc\xc3\x7b\xef\xb8\x72\
\x14\x05\x11\xd7\xb3\x5c\x1e\x2e\xaa\x32\x4d\x7e\x49\xfc\x69\x46\
\x5f\xa9\x75\xff\x9f\xa3\xdf\x26\x36\x16\x8d\x7b\xfe\x9e\x83\x5c\
\xc8\x62\x95\xa0\xca\x2c\xf1\x73\xa9\xf7\x3b\xf7\xfc\xac\xfc\x45\
\x46\xe3\x23\x63\xf3\xc1\xff\xac\xe9\x0b\x20\x00\x43\xd6\xfb\x37\
\x09\xdb\xce\xce\x15\x76\x04\x76\x26\xef\x47\x90\xb1\x3a\x4c\x55\
\x04\xaa\x8c\x02\x29\xc5\x66\xdf\xc2\xfd\x7f\x96\x5d\xd1\x8f\xa7\
\xfa\x31\x5c\x1d\xa1\x9e\xe2\x38\x1e\xa6\x38\x86\x55\x06\x81\x33\
\xf1\x33\x7f\x4a\x41\xd3\x3a\xce\xad\xfb\x55\x99\x30\xf1\xce\xa0\
\x41\x04\x10\x80\x5f\x05\xcb\xd4\xa5\xd5\xe9\x9f\x31\xdb\xef\xe4\
\x0a\xa6\x09\x89\x40\x52\x2b\x04\x15\xc9\xaf\x12\x14\xd7\xee\xa9\
\x6b\xbc\x20\xad\x77\x76\x05\xa7\x5e\x0c\x52\x70\x7a\xc7\xa9\x88\
\x40\x45\xf2\x93\xf8\x08\x41\xb9\x22\x50\x91\xfc\x24\x3e\x42\x50\
\xae\x08\x54\x24\xff\xa0\x14\x77\xa0\x84\x01\x21\xb0\xfe\x68\xb7\
\x69\x11\xa8\x48\xfe\x41\x68\xfd\x40\x77\xa4\x65\x94\xd8\xa8\x7d\
\x6c\x34\x88\x40\x86\x02\x60\x38\xf9\x3b\xc7\x59\xf3\x96\xe2\xa4\
\x71\x76\xcf\x76\x34\x29\x02\x55\x02\x83\x2a\x6b\xc2\x77\x06\x2f\
\xed\x92\x3a\xdf\x74\x7f\xe0\xdc\xd8\xa5\x99\xdc\x27\x50\x19\x1f\
\x4c\x8b\x3b\xfc\x8a\x3d\x41\x36\x31\x21\xb0\xb8\x35\xdc\x9c\x08\
\x54\x24\xff\x76\xb3\xfe\x72\xf0\x2e\x48\xaf\xa4\x84\xe0\xc2\x98\
\x1b\x10\x11\xf8\xdd\x8a\x73\xac\x8c\x0e\xda\xc8\x27\xbf\x15\xf5\
\x96\x5a\xff\x88\x59\x3f\x69\x37\x60\xe9\x2c\x48\x33\x0f\x10\x59\
\x3d\x13\xf0\xce\x50\xf2\xcb\xc3\x3a\xfb\x24\x7f\xba\xf8\xb1\xdb\
\xf7\x63\x69\x81\x55\x79\x82\x03\x78\x41\xad\x2d\x1d\xe6\x21\xfb\
\xba\xaf\x48\xa1\xac\xdc\x80\x34\x08\x3f\x19\xb9\x9c\xe8\x87\x8a\
\x54\xc6\x06\x67\x6a\x44\x19\x93\x7d\xbc\x13\x36\x2e\x09\xac\xf4\
\x97\xa2\x2e\x0f\x56\xc6\x06\xe5\x2b\xf5\x19\x28\xc5\x9b\x95\x3e\
\x53\xd4\xc9\xa6\x32\x34\x18\x16\xce\xec\x9f\x2d\x07\xe2\x98\xf4\
\x28\x4a\x08\x2c\x94\x9c\x9d\x7b\xea\x33\xa9\x4f\x3a\x56\x9a\x80\
\x16\x3a\xb4\x97\x24\x7f\x79\xf8\x31\xbf\x8c\x7c\x19\x75\xac\xd2\
\x37\xba\x00\xf8\xa6\x4c\x63\xa0\x0e\xbb\x20\x1d\x8a\x15\x01\x19\
\xfb\xd8\xe2\x3f\x89\xf1\x06\xa2\x2a\x72\xf2\x5b\xa8\xfb\x8f\x39\
\x9a\x0b\x7c\x3c\x4e\x5d\xfc\x26\xb4\xea\x92\xf3\x5e\xc4\x9b\xbd\
\x3a\xc2\x3b\x16\xab\xe6\x0b\xc9\x0f\x2b\x27\x20\xb1\x70\xe4\x63\
\x23\x16\xaa\x02\x14\xb3\x04\x38\x8f\x58\xf7\xaf\x92\xbf\x25\xec\
\xe1\x27\x11\x90\xcd\x42\x87\x11\x45\x60\xbc\x9c\x1c\xd5\xf6\x29\
\x44\x29\x01\xfc\x63\x9b\x5f\x22\x27\x3f\x6b\xfc\xf0\x56\x79\x1a\
\x73\xaf\x80\xca\x04\x55\x45\xb8\xb1\xb1\x97\xfc\x98\xf9\x61\xd3\
\x58\x8d\xf9\x28\x7a\xe7\x14\x96\x06\x63\x94\x00\x31\xad\x3f\x87\
\x77\xc0\xb6\xe5\x40\xac\xd5\x81\xda\x29\x3c\xc5\xa8\xea\x00\x22\
\x5b\x7f\xba\xfd\xb0\x6b\xdc\x4e\x5d\xbc\xd5\x81\xa0\xab\x02\xda\
\x0e\x20\xd6\x4d\xbc\x24\xf9\xa1\x87\x13\x90\xd8\x89\xb5\x59\x28\
\x68\xce\xa8\x09\x80\x3f\x98\x21\x86\xf5\x9f\xb1\xc9\x07\x06\x10\
\x01\x89\xa1\x18\x93\xc8\x38\xe4\x06\xa1\x4a\x29\xf9\x25\xf1\xa5\
\xf1\xa7\xdd\x51\x9d\x2f\x07\x6e\x9f\xf0\x85\x01\x63\x59\xe2\x58\
\xfb\x01\xa2\x60\xa7\x08\xfd\xa6\xf4\x05\xce\x23\x24\xff\xe3\x72\
\x5f\xa1\x41\x2a\x82\x3b\x55\x70\x56\x5d\x81\xb7\x57\x62\xea\xbb\
\x72\x3c\xcb\xef\x92\xbd\x01\x83\x37\x24\x2b\x85\x60\x6c\x5c\x9c\
\xc6\x5f\xb1\xa7\xf8\x28\xdd\xf3\x62\x97\x53\x23\x6e\x61\x1f\x3c\
\xa6\x35\x7a\x00\x31\x0e\x64\x3c\x63\xa3\x0f\x04\xec\x07\x48\x6c\
\xc5\x38\xc9\x67\xf0\x1d\x82\x41\x05\xc0\x2f\x9f\x34\xca\x37\xe9\
\x9e\x63\xbc\x40\x41\x04\x24\xc6\xb4\xcf\x18\x6c\xbc\xbb\x4b\xc6\
\x01\x68\xcf\xfe\x9d\x8b\xff\x58\x27\x94\xc3\xb1\x8f\x39\x4d\x6e\
\x92\x10\x00\x3f\xfb\xd7\xca\x37\xe7\x88\xa3\xbc\x40\xd1\x05\x48\
\xac\x1d\x29\xff\xda\xda\xe7\x96\x79\x07\xa0\x3d\xfb\x5f\x52\xf7\
\x43\xa4\x7e\x80\xf6\x26\xa1\x73\xd3\x02\x10\x61\xf6\x9f\xb3\xd9\
\x07\x22\x8a\x80\xc4\x9e\xe6\xe4\x33\x98\x0b\x08\xe5\x00\xb4\x67\
\x7f\xea\x7e\xb0\xd0\x0f\x48\xce\x05\x0c\x2e\x00\x11\x66\x7f\xac\
\x3f\x94\x58\x0a\x0c\xe2\x02\x42\x38\x00\xcd\xd9\xbf\x5b\x7e\x58\
\xf2\x03\x2b\x5c\x39\xdd\x55\x81\x13\x53\x02\xe0\xd7\x28\x35\x67\
\xff\x63\xba\xfe\x60\xc8\x05\x2c\x94\x4b\x81\x71\xdf\x7d\x01\x43\
\x3b\x00\xcd\xd9\xbf\xe5\x70\x0f\x30\x28\x02\x12\x93\x9a\x71\x79\
\x62\x42\x00\xfc\x03\x28\x8d\xe2\x17\xa7\xf1\x07\x56\xd1\x8c\xcd\
\x89\xcf\xbd\xe8\x0e\x40\x73\xf6\xbf\x2a\xf4\x49\x34\x48\xc3\x05\
\x74\x4e\xb7\x37\x75\x12\x55\x00\xfc\x41\x9f\x13\xa5\x2f\x2b\x75\
\xd6\x25\x61\x06\xc6\xb9\x74\x7a\x47\x8b\x4f\x63\x3b\x00\x49\x7e\
\xad\xe7\xa3\xaf\x69\xfc\x41\x02\x2e\x40\x62\xf4\x5a\xe9\xd7\x8d\
\x76\x5d\x12\x1c\x4a\x00\x4e\x94\xbe\xe8\xc2\xb1\xec\x07\xe9\x70\
\xa5\xe8\x02\x3e\x46\x11\x00\xdf\x80\xd0\x3a\x22\x89\xd9\x1f\x70\
\x01\x2f\xd3\xec\xd2\x0c\x1c\xc2\x01\x30\xfb\x03\xd8\x70\x01\xd3\
\x18\x02\xa0\xd5\xfc\x63\xf6\x07\x5c\xc0\xc0\x65\x40\x2f\x01\xf0\
\x67\xa3\xd5\x4a\x5f\x8e\xd9\x1f\x52\x76\x01\x1a\xd4\x3e\x27\xd5\
\x1c\xc0\x47\xa5\x2f\x36\x63\xf6\x87\xc4\x5d\xc0\xcc\xa2\x0b\xe8\
\x2b\x00\x6a\xf6\x9f\x30\x82\xc4\xd1\x8a\xe1\x89\x8a\x00\x28\xda\
\xff\x96\xc7\x7d\x21\x03\x17\x20\x31\xac\x11\xc7\x5b\x95\x01\x7d\
\x1c\x80\x96\xfd\xbf\x25\x7c\x00\x17\x10\x26\x37\xfb\x08\x40\xa3\
\xf0\x45\xa4\x76\xba\x27\x6e\x20\x13\xee\x9d\xce\x92\x60\x13\x54\
\x00\x14\x37\xff\xdc\xd3\xfc\x83\x8c\xca\x00\xad\x09\x6d\xbc\xe9\
\xa6\xa0\x5d\x1d\x00\xcd\x3f\x00\xdb\x25\x6d\x13\x52\x00\x0e\x14\
\xbe\x40\x47\xf3\x0f\x32\x74\x01\xad\xd3\x39\x36\xec\x43\x48\x01\
\xd0\xa8\xff\xa9\xfd\x21\xe7\x5e\x40\x9a\x0e\xc0\x2f\x31\x68\x3c\
\xfa\x4b\xf7\x1f\x28\x03\x76\x67\xb4\xc9\x72\xe0\x2e\x0e\x40\x63\
\xf6\xc7\xfe\x43\xce\x65\xc0\x5c\xa9\x0c\x68\x42\x08\x80\x46\xfd\
\xdf\x12\x26\x90\x39\x1a\x31\x7e\x10\x42\x00\x34\x1c\xc0\x67\xe2\
\x03\x32\x47\x23\xc6\x87\x2d\x01\xfc\xda\xa2\x46\xfd\x8f\x03\x00\
\x1c\x40\x7f\xea\xb7\xf6\x03\x6c\xeb\x00\x34\x36\xff\xb4\x6c\xfe\
\x81\x02\xfa\x00\x0b\x25\x11\x18\xa7\x26\x00\x0f\x84\x07\x14\xc2\
\x43\x6a\x02\x40\x03\x10\x20\xad\x58\x3f\x48\xca\x01\xf0\xba\x2f\
\x28\xa8\x0c\x48\xa7\x04\xf0\x2f\xff\x08\xdd\x00\x64\xed\x1f\x4a\
\x23\x74\xcc\x8f\x7c\xee\xf6\x76\x00\x2a\x0d\x40\xe2\x01\x28\x03\
\xf4\x5c\x80\x35\x01\xf8\x46\x3c\x40\x61\x7c\x4b\x45\x00\x34\xd6\
\xff\x3b\xe2\x01\x0a\x43\x23\xe6\x07\x29\x01\x82\xaf\x00\xd0\x00\
\x84\xd2\x50\x8a\xf9\x83\x21\x04\x20\x34\x34\x00\xa1\x54\xa2\xc5\
\xfe\x36\x02\xd0\x04\xbe\x16\x76\xff\x41\xa9\x84\x8e\xfd\x26\x05\
\x07\xc0\x0e\x40\x28\x95\x68\xb1\xbf\x91\x00\xfc\xf9\xf7\xbf\x8d\
\xc2\xb5\xe0\x00\x00\x07\x10\x88\xd7\x72\x98\x1e\x00\x00\x3d\x80\
\x37\x19\x31\x46\x00\x49\x33\xea\x23\x00\x3c\x03\x00\x90\x76\xec\
\x8f\xad\x97\x00\x00\x60\xb4\x04\x08\x0d\x0d\x40\x28\x9d\x85\x65\
\x01\x78\x17\xf8\x3a\x68\x00\x42\xe9\x84\xce\x81\x77\x7d\x04\xa0\
\x66\x7c\x00\x92\xa6\xa6\x07\x00\x00\x08\x00\x00\xd8\x12\x80\x8e\
\xa1\x80\xc2\x59\x94\x2c\x00\x3f\x18\x7f\x28\x9c\x6f\x25\x0b\x00\
\x00\x20\x00\x00\x80\x00\x00\x40\x51\x02\xf0\x8e\xa1\x80\xc2\x79\
\x5f\xb2\x00\xd4\x8c\x3f\x14\xce\xa8\x64\x01\x00\x00\x04\x00\x00\
\x2c\x0a\x40\xc7\xad\x02\x48\x9a\xae\x8f\x00\x84\xde\xa8\x33\x66\
\x7c\xa0\x70\x42\xe7\xc0\x0f\xcb\x25\x00\x47\x8e\x41\xe9\xd0\x04\
\x04\x00\x9b\x02\x10\xfc\xc0\x0e\xa5\xa3\xc7\x01\xcc\xa1\x14\xfb\
\xf3\x3e\x02\xc0\x91\x5d\x00\x69\xf3\x62\x0e\xff\x66\xe8\x02\xa5\
\x09\xd2\x32\x4e\x83\xa9\xfd\x61\x8c\x19\x05\x76\x8e\x7d\xbb\x02\
\x20\xc7\x16\x2f\x6d\x4a\xe8\x0b\xa4\x11\x38\x10\xcb\xf1\x5a\x20\
\xa6\x49\x31\x52\x88\x89\x5e\x25\x80\x06\x07\xc4\x01\x14\x4a\xb4\
\xd8\xdf\x46\x00\x42\xcf\x28\x38\x00\xc0\x01\x84\xa1\x1d\x42\x00\
\x72\xa8\x83\x00\x4a\xed\x01\xf4\x16\x80\xe0\xaf\x30\x66\x29\x10\
\x4a\x43\x29\xe6\x1f\x86\x10\x00\x8d\xa5\xc0\x9a\x90\x80\xc2\xd0\
\x88\xf9\xc5\x10\x02\xa0\xb1\xec\xf3\x9e\x78\x80\xc2\xd0\x88\xf9\
\x79\x2a\x02\x40\x09\x00\xa5\xd1\x24\x21\x00\x7e\x6d\x39\x74\x19\
\x40\x23\x10\x4a\x23\x74\xcc\x2f\x7c\xee\xf6\x76\x00\x2a\x2e\x80\
\x46\x20\x94\x42\xcc\x67\x00\x76\x15\x80\x07\x85\x0b\x46\x00\x00\
\xfb\x3f\x1c\x0f\x43\x0a\x80\x46\x1f\x80\x1d\x81\x50\x0a\x1a\xb1\
\x3e\x4f\x4d\x00\x9a\xa5\x35\x62\x57\x20\xe4\x6e\xff\x47\x2e\x72\
\x03\x70\x6b\x01\xf8\xe7\xaf\x3f\x3a\xa7\xb3\x1f\x80\x32\x00\xb0\
\xff\xfd\xe9\x7c\xce\x0e\xe6\x00\x84\x56\xe1\xc2\x3f\x10\x1f\x90\
\x39\x1a\x31\xfe\xa6\x63\xdf\x45\x00\x68\x04\x02\xa4\x11\xe3\x0f\
\x21\x04\x40\xc3\x01\xd4\xcb\x1a\x89\x3d\x01\x90\x6b\xfd\x2f\xb1\
\x5d\x2b\xfc\xaa\x76\x70\x01\xf0\x07\x0b\x68\xf4\x01\x3e\x12\x2a\
\x90\x29\x1a\xb1\xbd\x78\xed\x10\x90\xbe\x0e\x40\xcb\x05\x4c\x88\
\x13\xc8\x14\x8d\xd8\xde\x28\x47\x77\x15\x80\xcf\x94\x01\x00\x3b\
\xd9\xff\x46\xc9\xfe\x7f\x0e\x29\x00\xad\xd2\xfd\x3a\x21\x64\x00\
\xfb\x1f\x2e\x47\xab\x1e\x4a\xf6\xd5\x29\x3c\xc8\xb0\xfc\xfc\xfe\
\xab\x87\x19\x00\x12\x9a\xfd\x65\xf3\xcf\x77\x17\xfe\x08\xb0\xf9\
\x32\x67\xf6\x43\x3a\x00\x2d\x17\x30\xa2\x17\x00\x99\xd5\xfe\x1a\
\xbb\x5c\x37\xce\xcd\x3e\xef\x05\xb8\x5d\x7e\x4e\x95\x2c\xd3\x8c\
\xd8\xd9\xba\xce\xfc\x12\xf8\xd7\x1c\xca\x71\xf1\xdc\x6d\x93\x25\
\xed\xed\xa6\xff\xc7\x9d\x1d\x80\x5f\x62\xe8\x14\xbe\x4c\x43\x33\
\x10\x32\x10\xe5\xb1\xd3\x39\xef\xa2\xdb\x64\xf9\x6f\x88\x12\x40\
\xb8\xcf\x4c\x39\x01\x52\x8f\xe1\xad\x72\xb2\xaf\x00\xdc\x2a\x7d\
\xa9\x29\x4f\x08\x42\xc2\xb3\x7f\x2d\x31\x6c\xcd\xfe\xf7\x16\x00\
\xc5\x32\x40\x38\x25\x94\x20\x51\xb4\x92\x7f\x2b\xfb\x3f\x84\x03\
\x50\x2d\x03\x70\x01\x90\xe0\xec\x3f\x72\x06\x9b\x7f\x43\x0a\xc0\
\xb5\xd2\x97\x1b\xe1\x02\x20\x41\x4e\x9d\xde\x6b\xef\x66\xea\x02\
\xe0\x0f\x1c\xd0\x7a\x55\x34\x2e\x00\x98\xfd\x5f\xa6\x7d\xeb\xf0\
\x8f\x50\x0e\x00\x17\x00\x10\x7f\xf6\xdf\xa9\x21\x3f\x94\x00\x48\
\x1f\x40\x6b\xbb\x2e\x2e\x00\x98\xfd\x9f\x23\x8f\xfe\xce\xa2\x09\
\x80\xdf\xab\xaf\xd5\x0c\x94\x1b\x7b\x4e\x88\x81\x71\xce\x2d\xd7\
\xfe\x43\x3b\x00\xe1\x52\xd3\x5a\xf9\xb5\x55\x00\x8b\xb3\x7f\xad\
\x5c\xaa\x5e\x47\x17\x00\xdf\x80\x68\x15\xbf\xf4\x0d\xa1\x06\x46\
\xd1\x8c\xcd\xfb\x5d\x9a\x7f\x21\x1c\x80\xb6\x0b\x68\x78\x8d\x18\
\x18\x9c\xfd\xe5\x89\x3f\xcd\xb8\xec\xd5\x80\x1f\x54\x00\xfc\xd3\
\x61\x9d\xe2\x97\xbf\xa1\x21\x08\x86\x92\x5f\x62\xf1\x93\xe2\xaf\
\x9c\xf7\x7d\x22\x73\x2f\xc0\x45\x69\xba\x00\xed\x5a\x0b\xe0\x57\
\x9c\x3a\x9d\xe3\xbe\x06\x99\xfd\x83\x08\x80\x5f\x8e\xd0\x74\x01\
\xe7\x3c\x2e\x0c\x06\x66\x7f\x89\x41\xcd\xd5\xa9\x6e\xd7\xa5\xbf\
\xd0\x0e\x40\xdb\x05\x3c\x96\x02\x84\x20\x44\x46\x3b\x06\x07\xc9\
\xb1\x20\x02\x10\xc1\x05\x8c\x97\x0a\x7c\x41\x0c\x42\xa4\xd9\x5f\
\x62\x4f\xd3\x85\x0e\x32\xfb\x87\x74\x00\x31\x5c\x00\xa5\x00\x94\
\x60\xfd\x07\xcd\xad\x60\x02\x10\xc1\x05\x08\x77\xac\x0a\x80\x62\
\xf2\x4b\xac\xdd\x29\xff\xda\xc1\x66\xff\xd0\x0e\x20\x86\x0b\xa8\
\xe9\x07\x80\x72\xdd\x5f\x2b\xff\xce\xe3\x21\xff\xb1\xa0\x02\xe0\
\x95\xaa\x55\xbe\x41\x93\xa5\x32\xb3\x34\x08\xa1\x67\x7f\x89\x31\
\xed\x23\xeb\xdb\xa1\x4f\x62\xde\x53\xb8\xe8\xcb\x08\xe3\xf3\x89\
\x7e\x00\x04\xae\xfb\x3f\x45\xf8\xd5\x67\x43\xff\x83\x95\xd2\x0d\
\x13\xab\x34\x55\xbe\x59\xc5\xbe\x55\x48\xe9\x10\xca\x59\x9f\x3d\
\xe8\x89\xd7\xfd\x1a\x6f\xf7\x79\xe9\x7e\x1f\x0f\xfd\x8f\xfe\xa6\
\x74\xf1\x97\x4e\xef\xad\x28\x2b\xe4\x77\xc9\xcb\x31\xf6\x4b\x0b\
\x52\x9f\x98\x17\xcc\xd5\x41\xf8\x12\x21\xf9\x17\x21\x66\x7f\xad\
\x12\x60\x15\x90\xd7\x11\x06\x6b\xec\xdd\x07\xc0\x50\x4e\x36\x46\
\x69\x79\x19\xca\xc9\x56\xca\x37\x50\xac\x53\x1d\xe9\x06\x32\x23\
\x42\x9f\xd8\x95\xf8\x89\x71\x10\xcd\xc6\x2f\xfa\x34\xeb\x00\xd6\
\x38\x8e\x34\x7e\xb2\x49\x68\x4a\x18\xc3\x8e\xc9\x3f\x75\xf1\x4e\
\xa1\x0a\x9a\x33\xaa\x02\xe0\x97\x30\xae\x22\xdd\xc8\x1b\x44\x00\
\x76\x4c\xfe\x58\x65\xe4\xd5\xb6\x2f\xfa\xb0\xee\x00\x1e\xed\xb8\
\xd3\xdf\x21\xb8\x2e\x02\xbc\x6e\x1c\x36\x4d\xfe\x49\xc4\xe4\xef\
\x9c\xc2\x12\x7a\x15\xe9\xc6\x36\x2e\xfc\xeb\xab\x5f\x43\x9a\x29\
\x87\xa1\x95\x15\x92\x4f\xfe\xb1\x8b\xd3\xf1\x5f\xa1\xf2\xfa\xf5\
\x18\x0e\x20\x76\x29\xf0\xb8\x3c\xc8\x46\x21\x30\x9c\xfc\x57\x1a\
\xc9\x1f\x4d\x00\x0c\x94\x02\x2b\x11\xa0\x1c\x80\x97\x6c\x7f\xcc\
\xe4\x97\xae\xff\x99\xd6\x2f\x8b\x26\x00\x7e\x5d\xf3\x28\xe2\x58\
\x3f\x3e\xc9\x45\x63\x10\xd6\x92\x5f\x62\xe1\x2e\x62\xf2\x0b\xaa\
\x2b\x65\x31\x1d\xc0\xea\xf5\xe2\x67\x91\xc7\x9d\xd5\x01\x88\xdd\
\xed\x5f\x71\xa6\xdd\x9b\xaa\x8c\xdc\x7c\xb1\x5c\x4d\xe4\xcb\x60\
\xb3\x50\xb9\xc9\x2f\xe3\x1e\xfb\x6d\x53\x72\xbe\xbf\xba\x23\xde\
\x33\x32\x06\x47\x11\xfb\x01\x2b\xce\xd9\x36\x5c\x64\xf2\xdf\x18\
\x48\xfe\xce\x45\xda\x24\x57\x19\x1a\x08\xe9\xbc\x7e\x35\x70\x29\
\x62\xc1\x0e\x4b\x7c\x8a\xb0\xb0\xc4\x5f\x3d\x2c\x16\x7b\x35\x28\
\xea\xb2\xb4\x15\x07\xb0\xea\x07\x1c\x1b\xb8\x14\x09\x88\xef\x2c\
\x13\x66\x9d\xfc\x8f\x63\x6c\x20\xf9\xa3\xd4\xfd\x26\x05\xc0\x8b\
\xc0\xcc\xf5\x78\xd3\xe9\x80\xc8\xec\xf0\x95\x93\x85\xb2\x4c\xfe\
\x53\xef\x34\x2d\x9c\x1d\x79\x35\xe4\xf9\x7e\x49\x97\x00\x3f\x0d\
\x92\x85\xa6\xe0\x0a\x79\xed\xf9\x31\x25\x41\x16\x96\x5f\xea\x7d\
\x2b\x7b\x3f\xa2\x34\xfd\x4c\x3b\x80\x35\x8e\x7c\x2d\x6e\x81\x89\
\x77\x03\x94\x04\x69\x5b\xfe\xaf\x86\x92\xdf\x4a\xb9\x6b\x53\x00\
\xfc\x6c\x2b\x37\xc8\xca\xac\x5b\x7b\x11\xb8\x20\x9d\x92\x4b\xfe\
\x0b\x9f\xfc\xb5\x91\x4b\x5a\x35\xfd\x4c\xc4\x76\x65\x7c\xf0\x62\
\xef\xc9\x7e\x55\xbd\x79\x98\x28\x89\x59\x3f\xd6\x09\x3e\x6f\x25\
\xbf\x99\xd8\xa9\x12\x18\x48\xb1\x6d\x77\x06\x2f\x4d\x9e\x65\xb8\
\xa2\x37\x60\xb2\xd6\x97\x46\xdf\xb9\xb1\x4b\x33\xf9\x14\x6a\x95\
\xc8\xa0\x4e\x9d\xcd\x17\x7e\x74\xee\x69\x19\xe7\x9e\xd4\x33\x11\
\x27\x8d\x8b\xf3\xb2\x8e\x4d\x38\x8e\xdd\xf1\x4f\x56\x00\x8c\x8b\
\x80\xd0\xfa\x01\xee\x48\xc3\x28\xb1\x51\xfb\xd8\x68\x8c\x5e\xa2\
\xc9\xe4\x4f\x4a\x00\x12\x10\x01\x41\xce\x38\xb8\xa4\x2c\x50\xb5\
\xfb\xe7\xde\xf2\x3b\x92\x3f\x73\x01\x48\x44\x04\x24\xf9\xaf\xe9\
\x0f\xa8\xd4\xf9\x27\xce\x56\x83\x38\xa9\xe4\x4f\x52\x00\x12\x11\
\x01\x84\xa0\xec\xc4\x4f\x22\xf9\x93\x15\x80\x84\x44\x00\x21\x18\
\xae\xc6\x9f\x26\x92\xf8\xc9\x24\x7f\xd2\x02\x90\x98\x08\xac\x90\
\xa0\xb8\x66\x0f\xc1\xc6\xe3\x3b\xf6\x49\x3f\x4d\xe4\x92\x1f\x5f\
\xe1\x95\x4a\xf2\x27\x2f\x00\x3e\x48\x56\x47\x37\x8f\x12\xba\xec\
\xb9\x77\x05\xf7\xb8\x82\x17\x6d\xfe\xc4\x27\x7e\x4a\xdb\xaf\x93\
\x3c\x6d\xba\xca\x24\x68\x2c\xee\x18\xdc\x34\x68\x64\x0f\xc1\xad\
\xd6\x29\xb0\x86\xc7\xb0\x59\xfe\xf8\xe8\xf4\x5f\x22\x5b\x6c\xf2\
\x67\x23\x00\x6b\x22\x60\x6d\xeb\xe7\x36\x74\x6b\x62\x30\x2f\x24\
\xe9\xc7\x6b\x49\x5f\x27\xfa\x35\x92\x3e\x40\xa6\xca\x2c\xa0\x1e\
\x4f\xfa\x75\x76\x37\x84\x6c\x23\x06\xe2\x08\x3e\xcb\xcf\x5c\xca\
\x04\x3f\x3e\x32\x36\x1f\xfc\xcf\x3a\xf1\xaf\x94\xfc\xa3\xe2\x59\
\x09\xc0\x5a\xa0\x89\x13\x98\x66\xf4\x95\x44\x0c\x1e\xbc\x18\xb4\
\x89\x8d\x45\xe3\x93\xfd\x20\x03\x61\x5e\xe7\x4a\xf3\xfc\x7e\x04\
\x60\xfb\xc0\x9b\xba\xb4\x56\x08\xb6\xb5\x9d\x22\x04\xdf\xc4\x2d\
\x58\x11\x05\x9f\xec\x32\xab\xbf\xf7\xc9\x9e\xe3\x19\x0a\xc9\x75\
\xfa\x8b\x14\x80\xb5\x1a\x33\xc5\xe6\xe0\xae\xa2\xb0\xf0\x4e\x61\
\xe1\xff\xec\x86\x16\x07\x9f\xe4\xce\x27\xf7\xc8\xcf\xec\xa3\x4c\
\x93\xfd\xa5\x7b\x9c\xd5\xa3\xe0\x55\xee\x23\x96\x51\x5f\x60\x88\
\x99\x6b\xfe\xc2\xdf\x7d\xfb\xe9\xef\xde\xbf\x20\x98\xe3\x42\x44\
\x34\xeb\x7a\xbf\x48\x01\x58\x13\x82\x0b\x67\xef\x19\x71\x48\x43\
\x38\xe5\x01\xaf\xab\x1c\xbf\x5c\x55\xd2\x48\xfa\x92\xe0\xce\xa5\
\xdf\x7d\x06\x2c\x3f\x02\xd0\xa3\x24\xb0\xfe\x08\x29\xc4\x27\x8b\
\x2e\x3f\x02\xf0\xba\x10\x48\x4f\xc0\xea\xe9\x31\x10\x8f\xce\xcf\
\xfa\x6d\x09\x5f\x76\xaf\xd4\x51\xf6\x03\xbc\xef\x9e\x0e\xf1\x00\
\x70\x3e\x16\xf6\x4b\xda\x96\x5d\x31\xe6\xb8\x01\x28\xf7\xa4\x67\
\x04\xe0\xb9\x10\x5c\xb8\x74\x9e\x39\x87\xfe\x64\xdd\xe1\x47\x00\
\x76\x13\x01\x71\x01\x9f\x9c\x9d\xb7\xc8\x40\x18\x66\xee\x69\x47\
\x5f\xd1\x8f\x63\x23\x00\xbf\x2e\x0b\x64\xb5\xa0\xe1\x6e\x64\x45\
\xeb\x22\xbf\x91\x17\x01\x48\x4b\x08\xa6\x5e\x08\xe8\x0f\xa4\x5f\
\xe7\x9f\x95\x7e\xee\x02\x02\x80\x10\x94\x46\xe7\xeb\xfc\x19\xb7\
\x02\x01\x40\x08\x48\x7c\x40\x00\x10\x82\xcc\x6b\xfc\x5b\x12\x1f\
\x01\xd0\x12\x82\xc6\xd1\x2c\xb4\x80\x3c\xad\x77\x4d\x8d\x8f\x00\
\xc4\x12\x82\xda\x0b\x41\x8a\x87\x5a\xa6\x8a\x2c\xe1\xcd\x7c\xe2\
\x77\xdc\x0e\x04\xc0\x82\x10\xa4\x7a\xac\x35\x36\x1f\x01\x80\x00\
\xae\xe0\xc4\xa5\x7d\xea\xad\x15\x64\x86\xbf\x95\x19\x9f\xd9\x1e\
\x01\x48\x51\x0c\x56\x6f\xb9\x69\x10\x83\xad\x92\xbe\xa8\xa3\xd2\
\x11\x80\x72\xc4\x20\xf5\xf3\xf0\x43\x21\x89\x2e\xc7\xa1\xdf\x93\
\xf4\x08\x40\x29\x65\x82\x08\xc1\xea\xd8\xec\xd2\x1a\x88\x0b\xf7\
\xfc\x1d\x08\xd8\x7b\x04\xa0\x78\x77\xd0\x64\x2c\x08\x9d\x9f\xe5\
\x57\xef\x39\x60\x96\x47\x00\xe0\x0d\x87\x30\xf6\x9f\x03\x97\xd6\
\x09\xbd\x9d\xff\x3c\xf8\xa4\x9f\x33\xc3\x23\x00\xd0\x5f\x14\x46\
\x6b\xa2\x60\xe1\x6c\xfe\xd6\xff\x7c\x58\x4b\xfa\x39\x6f\x3e\x46\
\x00\x20\x8e\x40\x34\xfe\x3f\x7f\x16\x85\x77\x6e\xfb\xa6\xa3\x24\
\xf3\x8f\xb5\x3f\xaf\x5e\x40\xb2\xc0\xbe\x03\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x18\xe7\x3f\x01\x06\x00\x49\x50\x7d\x87\x88\xd4\
\x8b\x3e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xac\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x07\x00\x00\x00\x3f\x08\x06\x00\x00\x00\x2c\x7b\xd2\x13\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xb3\x00\x79\x00\x79\xdc\xdd\
\x53\xfc\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x2e\x14\xfa\xd6\xc4\xae\x00\x00\x00\x39\x49\x44\
\x41\x54\x38\xcb\x63\x60\x20\x06\xc4\xc7\xc7\x33\xc4\xc7\xc7\xa3\
\x88\x31\x61\x53\x84\x53\x12\xaf\xce\x91\x28\xc9\x82\xc4\xfe\x8f\
\xc4\x66\x1c\x0d\xa1\x51\xc9\x51\xc9\x51\x49\x7c\x05\x06\xe3\x68\
\x08\x91\x2a\x49\x3e\x00\x00\x88\x4b\x04\xd3\x39\x2e\x90\x3f\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xf8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x75\x49\x44\
\x41\x54\x58\x85\xed\x96\xcd\x4e\x13\x51\x18\x86\x9f\xaf\x15\xd2\
\x32\x78\x03\x56\x4d\x69\x58\x89\xa6\x3f\xf1\x06\x20\x26\x1a\x37\
\x94\x84\xd9\xb6\x33\xc4\x0b\x30\x46\x10\x34\x51\x16\x2e\x48\xd1\
\xb8\x72\x43\xb4\x74\xd8\x92\x98\xe2\xca\xb8\x11\x37\x2c\x8c\xda\
\x36\x12\xc0\x10\x40\x03\x86\x0b\xc0\x54\xa3\x71\x3e\x17\xb4\xd1\
\x44\xa6\x65\x0a\x3b\xfb\x6c\xbf\xf7\x9c\xf7\x49\xe6\xcc\x99\x81\
\x36\x6d\xfe\x77\xc4\x4f\xd8\x34\xcd\xce\xee\x70\x78\x48\x44\xd2\
\x40\x4a\x21\x02\x80\xea\x0e\x22\xef\x05\x8a\x7b\xd5\x6a\x71\x7e\
\x7e\xfe\xc7\xb1\x0b\xd8\x99\xcc\xb0\x8a\xe4\x04\x7a\x80\x0f\xa2\
\xba\xa8\x22\x3b\xb5\x71\x04\xe8\x07\x2e\x00\x1b\x2a\x32\x56\x28\
\x14\x9e\x1d\x8b\x80\x69\x9a\xc1\x93\x86\x91\x53\xd5\x1b\x02\x2f\
\x08\x06\xc7\xf3\xf9\x7c\xe5\xa0\xac\x65\x59\x09\x81\x29\x54\x2f\
\xab\xea\x74\x34\x16\x1b\x9f\x9c\x9c\x74\x1b\xed\x7f\xa2\x99\x40\
\xad\xfc\x3a\x30\x9a\x77\x9c\x07\x8d\xb2\x85\x42\xa1\x0c\x5c\x19\
\xb1\xac\x51\x60\xea\xd3\xe6\x26\xc0\x58\xa3\x35\xc1\x46\x43\x3b\
\x93\x19\x06\x1e\x09\x8c\xce\x3a\xce\xc3\x66\xb2\x75\x4a\xe5\xf2\
\x52\x32\x91\xf8\x2e\x22\xf7\x12\xc9\x64\xa5\x5c\x2e\xaf\x79\x65\
\x3d\x1f\x81\x69\x9a\x9d\xdd\x5d\x5d\xab\xc0\xc7\x59\xc7\xb9\x7a\
\xd8\xf2\xbf\xb1\xb3\xd9\x97\x40\xcf\xd7\x6a\xb5\xcf\xeb\x60\x06\
\xbc\x16\x77\x87\xc3\x43\x40\x4c\x82\xc1\x89\x56\xca\x01\x02\xaa\
\xb7\x80\x5e\xc3\x30\x06\x3d\x33\x5e\x03\x11\x49\xa3\x5a\xf1\x3a\
\x70\x87\xe1\xe9\xdc\x5c\x09\x58\x46\xd5\xbf\x00\x90\x42\xe4\x75\
\xab\xe5\x75\x44\xf5\x95\xa8\x5e\xf4\x2d\xa0\x70\x4a\xfe\xbc\xe7\
\x2d\xe3\xc2\x17\x44\x22\xbe\x05\x00\x54\xd5\xd7\x4d\x79\x60\x41\
\x20\x20\xfb\x1e\xfe\x05\x76\x45\xf5\xf4\x51\x05\x54\x35\x82\xea\
\x6e\x2b\x02\x6f\x55\xa4\xff\xa8\x02\xc0\x80\xc0\x1b\xdf\x02\x02\
\x45\xe0\xbc\x65\x59\x89\x56\x9b\x6d\xdb\x4e\x01\xe7\x14\x9e\xfb\
\x16\xd8\xab\x56\x8b\xc0\x86\xc0\x54\x8b\xfd\x22\xae\x9b\x03\xd6\
\x3b\x42\xa1\x05\xaf\x90\xe7\x55\xbc\xb2\xb2\xf2\x2b\x15\x8f\x6f\
\x03\x77\x52\xc9\x64\xb5\x54\x2e\x2f\xf9\x69\xb7\xb3\xd9\x09\xe0\
\x9a\xc0\xc8\x93\x7c\x7e\xd5\xb7\x00\x40\xa9\x52\x59\x4b\xc4\xe3\
\x06\x70\x37\x95\x4c\x7e\x3b\xa4\x84\xd4\xca\xef\x8b\xc8\x74\xde\
\x71\x1e\x37\x0a\x37\xfd\x1a\x46\x63\xb1\xf1\xcf\x5b\x5b\xaa\xaa\
\x39\x2b\x9b\xbd\x14\x54\x1d\xaf\xdd\x70\xff\x60\xdb\x76\x4a\x5c\
\x37\xa7\x30\x20\x22\xb9\xb3\xd1\xe8\xed\xa6\xb6\xcd\x02\x75\x2c\
\xcb\x4a\x8b\xea\x34\xd0\x0b\x2c\x03\x8b\xc0\x76\x6d\x7c\x86\xfd\
\x1f\x92\x3e\x60\x5d\xe0\x66\xde\x71\x3c\x0f\x5e\x4b\x02\xb0\xff\
\x85\x34\x0c\x63\x50\x5c\x37\x8d\x48\x0a\xa8\xdf\x13\x3b\x0a\xef\
\x44\xb5\xd8\x11\x0a\x2d\xcc\xcc\xcc\xfc\xf4\xb3\x6f\x9b\x36\xff\
\x37\xbf\x01\x4a\x37\xdd\xdd\x8c\xf1\x82\x6a\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x93\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x02\x62\x4b\x47\x44\x00\xd3\xb5\x57\xa0\x5c\x00\x00\
\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\
\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x0b\x07\x0c\
\x0c\x2b\x4a\x3c\x30\x74\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\
\x63\x60\x40\x05\xff\xff\xc3\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\
\xc5\x70\x0e\x23\x23\x9c\xc3\xc8\x88\x61\x1a\x0a\x00\x00\x9e\x14\
\x0a\x05\x2b\xca\xe5\x75\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x12\x0f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x11\xb1\x49\x44\x41\x54\x78\xda\xec\
\x9d\xe1\x51\x1b\x49\x13\x86\x67\xa9\x0b\x40\x17\x81\xf7\x22\xb0\
\x88\x80\xa5\xee\xff\x19\x22\xb0\x88\x00\x88\x00\x14\x81\x21\x02\
\x44\x04\xe0\xfb\x7f\xe5\x25\x02\xcb\x11\x78\x1d\xc1\x29\x84\x53\
\xc3\xa8\x4e\xb8\xc0\x48\xda\x9d\x9e\x9e\x99\xe7\xa9\x52\x71\xf6\
\xf7\x95\x59\xed\x74\xbf\xf3\x76\xcf\xec\xac\x73\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x3c\x15\
\xb7\x20\x3f\xfe\xfd\xf3\xaf\xc6\xff\xe7\x68\xf9\x19\xaf\xfd\x4f\
\xef\x96\x9f\x7a\xcb\x7f\xae\x5b\x7e\x7e\xac\xfd\x79\xbe\xfc\x2c\
\xe4\xf3\xfb\x3f\x7f\xcf\xb9\xdb\x08\x00\xe8\x27\xf8\x2a\xb1\xc7\
\x3e\xc9\x0f\x5e\x48\x76\x4d\x5a\xff\xf3\xc1\x8b\x83\x08\xc3\x7c\
\x29\x10\x0b\x46\x0b\x01\x80\x7e\xc9\x5e\xaf\x25\xfb\xc1\x5a\xd2\
\xa7\x40\xe7\x3f\x0f\x6b\xa2\xd0\x31\xaa\x08\x00\xbc\x9e\xf0\x92\
\xe0\x8d\x4f\xf6\x26\xa1\x64\xdf\x46\x14\xe6\x5e\x14\x5a\xca\x08\
\x04\x80\x19\xde\xb9\xa3\x8c\x13\xfe\x2d\x16\xbe\x84\xf8\xec\x05\
\x01\x87\x80\x00\x14\x31\xcb\x7f\xf4\x89\x5f\x73\x47\x9e\x31\xf7\
\x62\x70\x8f\x3b\x40\x00\x72\x4a\xfa\x66\x2d\xe9\x47\xdc\x91\x8d\
\xcb\x85\xfb\xe5\xe7\x16\x31\x40\x00\x52\xb5\xf7\xa7\xcc\xf4\x83\
\x89\xc1\xed\xf2\x33\xa3\x4c\x40\x00\x2c\x27\xfd\xc8\x27\xbc\x24\
\xfe\x98\x3b\x12\x84\xd6\xbb\x82\x19\xb7\x02\x01\xb0\x34\xdb\x5f\
\x60\xf1\x55\x91\x06\xa2\x88\xc0\x35\xae\x00\x01\x88\x95\xf8\xab\
\xd9\xbe\xe1\x6e\x44\xe5\xde\x0b\x41\xcb\xad\x40\x00\x34\x12\x7f\
\xe2\x67\x7c\x6a\x7b\xca\x03\x04\x80\xc4\x07\x63\x48\x49\x30\x45\
\x08\x10\x00\x12\x1f\x21\x40\x08\x10\x00\x12\x1f\x21\x70\x27\xf4\
\x08\x10\x80\x4d\x13\xbf\xf1\x89\xdf\x70\x37\xb2\xeb\x11\x9c\xb3\
\xb1\x08\x01\x78\x2d\xf1\x65\xa6\xff\xe4\x9e\x96\xf3\x20\x5f\x66\
\x5e\x08\x8a\x7e\x64\x79\x8f\x38\x78\x96\xfc\x97\xcb\x1f\x5f\x49\
\xfe\x22\x90\xd2\xee\xfb\x72\xcc\xcf\x70\x00\x24\xbe\xd8\xfc\x1b\
\xea\xfc\x62\x99\xfb\xfe\xc0\x1c\x01\x28\x2b\xf1\x47\xbe\xce\x3f\
\x23\x07\x60\xc9\x95\x7b\x5a\x31\x58\x20\x00\xf9\x27\xff\x91\xaf\
\xf5\x99\xf5\x61\x9d\xce\x15\xb4\x5a\x50\x15\x98\xf8\xcc\xfa\xb0\
\x91\x1b\x58\x8a\xc0\x39\x02\x90\x57\xf2\xcb\xd3\x79\x77\xcc\xfa\
\x40\x6f\xe0\x89\xbd\x82\x92\xff\xd2\x3d\x75\xf8\x49\x7e\xd8\x14\
\x99\x30\xbe\xe4\xbc\x52\x50\x15\x90\xf8\x23\x3f\xeb\x37\x85\x07\
\xf3\xea\xb8\xee\x9f\xeb\xdd\x1f\x3f\xfd\xdd\x4b\xef\x0e\x48\xe9\
\x24\xe2\x50\xdc\x7b\x37\xb0\x40\x00\xd2\xb2\xfc\x5f\x0a\x09\xde\
\xd5\x0b\x3b\xd6\xcf\xe6\x77\x43\x37\xb3\xd6\x5e\x3a\x62\xe5\x9d\
\x04\x94\x04\x08\xc0\x8b\x81\x3a\x71\x4f\x6b\xfb\xb9\x06\xa2\x24\
\xf6\x37\x99\xc5\xad\x74\xac\xbd\x38\x88\x7b\x78\xef\x1d\x57\x8e\
\xa2\x20\xe2\x7a\x9e\xcb\xc3\x45\x55\xa6\xc9\x2f\x89\x3f\xc9\xe8\
\x2b\xb5\xee\xff\x73\xf4\xdb\xc4\xc6\xa2\x71\xcf\xdf\x73\x90\x0b\
\x59\xac\x12\x54\x99\x25\xfe\xc8\x5b\xfe\xd4\x67\x9e\xce\x3d\x3f\
\x2b\x7f\x91\xd1\xf8\x88\x08\x7c\xf0\x3f\x6b\xfa\x02\x08\xc0\x90\
\xf5\xfe\x4d\xc2\xc9\xdf\xb9\xc2\x8e\xc0\xce\xe4\xfd\x08\x32\x56\
\x87\xa9\x8a\x40\x95\x51\x20\xa5\xd8\xec\x5b\xb8\xff\xcf\xb2\x2b\
\xfa\xf1\x54\x3f\x86\xab\x23\xd4\x53\x1c\xc7\xc3\x14\xc7\xb0\xca\
\x20\x70\x8e\xfc\xcc\x9f\x52\xd0\xb4\x8e\x73\xeb\x7e\x55\x26\x1c\
\x79\x67\xd0\x20\x02\x08\xc0\xaf\x82\x65\xe2\xd2\xea\xf4\xcf\x98\
\xed\x77\x72\x05\x93\x84\x44\x20\xa9\x15\x82\x8a\xe4\x57\x09\x8a\
\x6b\xf7\xd4\x35\x5e\x90\xd6\x3b\xbb\x82\x33\x2f\x06\x29\x38\xbd\
\x93\x54\x44\xa0\x22\xf9\x49\x7c\x84\xa0\x5c\x11\xa8\x48\x7e\x12\
\x1f\x21\x28\x57\x04\x2a\x92\x7f\x50\x8a\x3b\x50\xc2\x80\x10\x58\
\x7f\xb4\xdb\xb4\x08\x54\x24\xff\x20\xb4\x7e\xa0\x3b\xd2\x32\x4a\
\x6c\xd4\x3e\x36\x1a\x44\x20\x43\x01\x30\x9c\xfc\x9d\xe3\xac\x79\
\x4b\x71\xd2\x38\xbb\x67\x3b\x9a\x14\x81\x2a\x81\x41\x95\x35\xe1\
\x3b\x83\x97\x36\xa5\xce\x37\xdd\x1f\xb8\x30\x76\x69\x26\xf7\x09\
\x54\xc6\x07\xd3\xe2\x0e\xbf\x62\x4f\x90\x4d\x4c\x08\x2c\x6e\x0d\
\x37\x27\x02\x15\xc9\xbf\xdd\xac\xbf\x1c\xbc\x4b\xd2\x2b\x29\x21\
\xb8\x34\xe6\x06\x44\x04\xfe\xb0\xe2\x1c\x2b\xa3\x83\x66\xed\xa9\
\x3e\xa9\xf5\x8f\x99\xf5\x93\x76\x03\x96\xce\x82\x34\xf3\x00\x91\
\xd5\x33\x01\xef\x0c\x25\xbf\x3c\xac\xb3\x4f\xf2\xa7\x8b\x1f\xbb\
\x7d\x3f\x96\x16\x58\x95\x27\x38\x80\x17\xd4\xda\xd2\x61\x1e\xb2\
\xaf\xfb\x8a\x14\xca\xca\x0d\x48\x83\xf0\x93\x91\xcb\x89\x7e\xa8\
\x48\x65\x6c\x70\x26\x46\x94\x31\xd9\xc7\x3b\x61\xe3\x92\xc0\x4a\
\x7f\x29\xea\xf2\x60\x65\x6c\x50\xbe\x52\x9f\x81\x52\xbc\x59\xe9\
\x33\x45\x9d\x6c\x2a\x43\x83\x61\xe1\xcc\xfe\xd9\x72\x20\x4e\x48\
\x8f\xa2\x84\xc0\x42\xc9\xd9\xb9\xa7\x3e\x93\xfa\xa4\x63\xa5\x09\
\x68\xa1\x43\x3b\x25\xf9\xcb\xc3\x8f\xf9\x34\xf2\x65\xd4\xb1\x4a\
\xdf\xe8\x02\xe0\xd7\x69\x1b\x03\x75\xd8\x25\xe9\x50\xac\x08\xc8\
\xd8\xc7\x16\xff\xa3\x18\x6f\x20\xaa\x22\x27\xbf\x85\xba\xff\x84\
\xa3\xb9\xc0\xc7\xe3\xc4\xc5\x6f\x42\xab\x2e\x39\xef\x45\xbc\xd9\
\xab\x57\x76\xc5\x62\xd5\x7c\x21\xf9\x61\xe5\x04\x24\x16\x8e\x7d\
\x6c\xc4\x42\x55\x80\x62\x96\x00\x17\x11\xeb\xfe\x55\xf2\xb7\x84\
\x3d\xfc\x24\x02\xb2\x59\xe8\x30\xa2\x08\x8c\x97\x93\xa3\xda\x3e\
\x85\x28\x25\x80\x7f\x6c\xf3\x4b\xe4\xe4\x67\x8d\x1f\xde\x2a\x4f\
\x63\xee\x15\x50\x99\xa0\xaa\x08\x37\x36\xf6\x92\x1f\x33\x3f\x6c\
\x1a\xab\x31\x1f\x45\xef\x9c\xc2\xd2\x60\x8c\x12\x20\xa6\xf5\xe7\
\xf0\x0e\xd8\xb6\x1c\x88\xb5\x3a\x50\x3b\x85\xa7\x18\x55\x1d\x40\
\x64\xeb\x4f\xb7\x1f\x76\x8d\xdb\x89\x8b\xb7\x3a\x10\x74\x55\x40\
\xdb\x01\xc4\xba\x89\x53\x92\x1f\x7a\x38\x01\x89\x9d\x58\x9b\x85\
\x82\xe6\x8c\x9a\x00\xf8\x0d\x3f\x31\xac\xff\x8c\x4d\x3e\x30\x80\
\x08\x48\x0c\xc5\x98\x44\xc6\x21\x37\x08\x55\x4a\xc9\x2f\x89\x2f\
\x8d\x3f\xed\x8e\xea\x7c\x39\x70\xfb\x84\x2f\x0c\x18\xcb\x12\xc7\
\xda\x0f\x10\x05\x3b\x45\xe8\x37\xa5\x2f\x70\x11\x21\xf9\x1f\x97\
\xfb\x0a\x0d\x52\x11\xdc\x89\x82\xb3\xea\x0a\xbc\xbd\x12\x53\xdf\
\x95\xe3\x59\x7e\x97\xec\x0d\x18\xbc\x21\x59\x29\x04\x63\xe3\xe2\
\x34\xfe\x8a\x3d\xc5\x47\xe9\x9e\x17\xbb\x9c\x1a\x71\x0b\xfb\xe0\
\x31\xad\xd1\x03\x88\x71\x20\xe3\x39\x1b\x7d\x20\x60\x3f\x40\x62\
\x2b\xc6\x49\x3e\x83\xef\x10\x0c\x2a\x00\x7e\xf9\xa4\x51\xbe\x49\
\xf7\x1c\xe3\x05\x0a\x22\x20\x31\xa6\x7d\xc6\x60\xe3\xdd\x5d\x32\
\x0e\x40\x7b\xf6\xef\x5c\xfc\xc7\x3a\xa1\x1c\x4e\x7c\xcc\x69\x72\
\x93\x84\x00\xf8\xd9\xbf\x56\xbe\x39\xc7\x1c\xe5\x05\x8a\x2e\x40\
\x62\xed\x58\xf9\xd7\xd6\x3e\xb7\xcc\x3b\x00\xed\xd9\x7f\x4a\xdd\
\x0f\x91\xfa\x01\xda\x9b\x84\x2e\x4c\x0b\x40\x84\xd9\x7f\xce\x66\
\x1f\x88\x28\x02\x12\x7b\x9a\x93\xcf\x60\x2e\x20\x94\x03\xd0\x9e\
\xfd\xa9\xfb\xc1\x42\x3f\x20\x39\x17\x30\xb8\x00\x44\x98\xfd\xb1\
\xfe\x50\x62\x29\x30\x88\x0b\x08\xe1\x00\x34\x67\xff\x6e\xf9\x61\
\xc9\x0f\xac\x70\xe5\x74\x57\x05\x4e\x4d\x09\x80\x5f\xa3\xd4\x9c\
\xfd\x4f\xe8\xfa\x83\x21\x17\xb0\x50\x2e\x05\xc6\x7d\xf7\x05\x0c\
\xed\x00\x34\x67\xff\x96\xc3\x3d\xc0\xa0\x08\x48\x4c\x6a\xc6\xe5\
\xa9\x09\x01\xf0\x0f\xa0\x34\x8a\x5f\x9c\xc6\x1f\x58\x45\x33\x36\
\x8f\x7c\xee\x45\x77\x00\x9a\xb3\xff\x55\xa1\x4f\xa2\x41\x1a\x2e\
\xa0\x73\xba\xbd\xa9\xd3\xa8\x02\xe0\x0f\xfa\x3c\x52\xfa\xb2\x52\
\x67\x4d\x09\x33\x30\xce\xd4\xe9\x1d\x2d\x3e\x89\xed\x00\x24\xf9\
\xb5\x9e\x8f\xbe\xa6\xf1\x07\x09\xb8\x00\x89\xd1\x6b\xa5\x5f\x37\
\xda\x75\x49\x70\x28\x01\x38\x55\xfa\xa2\x0b\xc7\xb2\x1f\xa4\xc3\
\x95\xa2\x0b\xf8\x18\x45\x00\x7c\x03\x42\xeb\x88\x24\x66\x7f\xc0\
\x05\xbc\x4c\xb3\x4b\x33\x70\x08\x07\xc0\xec\x0f\x60\xc3\x05\x4c\
\x62\x08\x80\x56\xf3\x8f\xd9\x1f\x70\x01\x03\x97\x01\xbd\x04\xc0\
\x9f\x8d\x56\x2b\x7d\x39\x66\x7f\x48\xd9\x05\x68\x50\xfb\x9c\x54\
\x73\x00\x1f\x95\xbe\xd8\x8c\xd9\x1f\x12\x77\x01\x33\x8b\x2e\xa0\
\xaf\x00\xa8\xd9\x7f\xc2\x08\x12\x47\x2b\x86\x8f\x54\x04\x40\xd1\
\xfe\xb7\x3c\xee\x0b\x19\xb8\x00\x89\x61\x8d\x38\xde\xaa\x0c\xe8\
\xe3\x00\xb4\xec\xff\x2d\xe1\x03\xb8\x80\x30\xb9\xd9\x47\x00\x1a\
\x85\x2f\x22\xb5\xd3\x3d\x71\x03\x99\x70\xef\x74\x96\x04\x9b\xa0\
\x02\xa0\xb8\xf9\xe7\x9e\xe6\x1f\x64\x54\x06\x68\x4d\x68\xe3\x4d\
\x37\x05\xed\xea\x00\x68\xfe\x01\xd8\x2e\x69\x9b\x90\x02\x70\xa0\
\xf0\x05\x3a\x9a\x7f\x90\xa1\x0b\x68\x9d\xce\xb1\x61\x1f\x42\x0a\
\x80\x46\xfd\x4f\xed\x0f\x39\xf7\x02\xd2\x74\x00\x7e\x89\x41\xe3\
\xd1\x5f\xba\xff\x40\x19\xb0\x3b\xa3\x4d\x96\x03\x77\x71\x00\x1a\
\xb3\x3f\xf6\x1f\x72\x2e\x03\xe6\x4a\x65\x40\x13\x42\x00\x34\xea\
\xff\x96\x30\x81\xcc\xd1\x88\xf1\x83\x10\x02\xa0\xe1\x00\x3e\x13\
\x1f\x90\x39\x1a\x31\x3e\x6c\x09\xe0\xd7\x16\x35\xea\x7f\x1c\x00\
\xe0\x00\xfa\x53\xbf\xb5\x1f\x60\x5b\x07\xa0\xb1\xf9\xa7\x65\xf3\
\x0f\x14\xd0\x07\x58\x28\x89\xc0\x38\x35\x01\x78\x20\x3c\xa0\x10\
\x1e\x52\x13\x00\x1a\x80\x00\x69\xc5\xfa\x41\x52\x0e\x80\xd7\x7d\
\x41\x41\x65\x40\x3a\x25\x80\x7f\xf9\x47\xe8\x06\x20\x6b\xff\x50\
\x1a\xa1\x63\x7e\xe4\x73\xb7\xb7\x03\x50\x69\x00\x12\x0f\x40\x19\
\xa0\xe7\x02\xac\x09\xc0\x37\xe2\x01\x0a\xe3\x5b\x2a\x02\xa0\xb1\
\xfe\xdf\x11\x0f\x50\x18\x1a\x31\x3f\x48\x09\x10\x7c\x05\x80\x06\
\x20\x94\x86\x52\xcc\x1f\x0c\x21\x00\xa1\xa1\x01\x08\xa5\x12\x2d\
\xf6\xb7\x11\x80\x26\xf0\xb5\xb0\xfb\x0f\x4a\x25\x74\xec\x37\x29\
\x38\x00\x76\x00\x42\xa9\x44\x8b\xfd\x8d\x04\xe0\xdf\x3f\xff\x6a\
\x14\xae\x05\x07\x00\x38\x80\x40\xbc\x96\xc3\xf4\x00\x00\xe8\x01\
\xbc\xc9\x88\x31\x02\x48\x9a\x51\x1f\x01\xe0\x19\x00\x80\xb4\x63\
\x7f\x6c\xbd\x04\x00\x00\xa3\x25\x40\x68\x68\x00\x42\xe9\x2c\x2c\
\x0b\xc0\xbb\xc0\xd7\x41\x03\x10\x4a\x27\x74\x0e\xbc\xeb\x23\x00\
\x35\xe3\x03\x90\x34\x35\x3d\x00\x00\x40\x00\x00\xc0\x96\x00\x74\
\x0c\x05\x14\xce\xa2\x64\x01\xf8\xc1\xf8\x43\xe1\x7c\x2b\x59\x00\
\x00\x00\x01\x00\x00\x04\x00\x00\x8a\x12\x80\x77\x0c\x05\x14\xce\
\xfb\x92\x05\xa0\x66\xfc\xa1\x70\x46\x25\x0b\x00\x00\x20\x00\x00\
\x60\x51\x00\x3a\x6e\x15\x40\xd2\x74\x7d\x04\x20\xf4\x46\x9d\x31\
\xe3\x03\x85\x13\x3a\x07\x7e\x58\x2e\x01\x38\x72\x0c\x4a\x87\x26\
\x20\x00\xd8\x14\x80\xe0\x07\x76\x28\x1d\x3d\x0e\x60\x0e\xa5\xd8\
\x9f\xf7\x11\x00\x8e\xec\x02\x48\x9b\x17\x73\xf8\x37\x43\x17\x28\
\x4d\x90\x96\x71\x1a\x4c\xed\x0f\x63\xcc\x28\xb0\x73\xec\xdb\x15\
\x00\x39\xb6\x78\x69\x53\x42\x5f\x20\x8d\xc0\x81\x58\x8e\xd7\x02\
\x31\x4d\x8a\x91\x42\x4c\xf4\x2a\x01\x34\x38\x20\x0e\xa0\x50\xa2\
\xc5\xfe\x36\x02\x10\x7a\x46\xc1\x01\x00\x0e\x20\x0c\xed\x10\x02\
\x90\x43\x1d\x04\x50\x6a\x0f\xa0\xb7\x00\x04\x7f\x85\x31\x4b\x81\
\x50\x1a\x4a\x31\xff\x30\x84\x00\x68\x2c\x05\xd6\x84\x04\x14\x86\
\x46\xcc\x2f\x86\x10\x00\x8d\x65\x9f\xf7\xc4\x03\x14\x86\x46\xcc\
\xcf\x53\x11\x00\x4a\x00\x28\x8d\x26\x09\x01\xf0\x6b\xcb\xa1\xcb\
\x00\x1a\x81\x50\x1a\xa1\x63\x7e\xe1\x73\xb7\xb7\x03\x50\x71\x01\
\x34\x02\xa1\x14\x62\x3e\x03\xb0\xab\x00\x3c\x28\x5c\x30\x02\x00\
\xd8\xff\xe1\x78\x18\x52\x00\x34\xfa\x00\xec\x08\x84\x52\xd0\x88\
\xf5\x79\x6a\x02\xd0\x2c\xad\x11\xbb\x02\x21\x77\xfb\x3f\x72\x91\
\x1b\x80\x5b\x0b\xc0\xef\xff\xfc\xdd\x39\x9d\xfd\x00\x94\x01\x80\
\xfd\xef\x4f\xe7\x73\x76\x30\x07\x20\xb4\x0a\x17\xfe\x81\xf8\x80\
\xcc\xd1\x88\xf1\x37\x1d\xfb\x2e\x02\x40\x23\x10\x20\x8d\x18\x7f\
\x08\x21\x00\x1a\x0e\xa0\x5e\xd6\x48\xec\x09\x80\x5c\xeb\x7f\x89\
\xed\x5a\xe1\x57\xb5\x83\x0b\x80\x3f\x58\x40\xa3\x0f\xf0\x91\x50\
\x81\x4c\xd1\x88\xed\xc5\x6b\x87\x80\xf4\x75\x00\x5a\x2e\xe0\x88\
\x38\x81\x4c\xd1\x88\xed\x8d\x72\x74\x57\x01\xf8\x4c\x19\x00\xb0\
\x93\xfd\x6f\x94\xec\xff\xe7\x90\x02\xd0\x2a\xdd\xaf\x53\x42\x06\
\xb0\xff\xe1\x72\xb4\xea\xa1\x64\x5f\x9d\xc2\x83\x0c\xcb\xcf\x1f\
\xbf\x7a\x98\x01\x20\xa1\xd9\x5f\x36\xff\x7c\x77\xe1\x8f\x00\x9b\
\x2f\x73\x66\x3f\xa4\x03\xd0\x72\x01\x23\x7a\x01\x90\x59\xed\xaf\
\xb1\xcb\x75\xe3\xdc\xec\xf3\x5e\x80\xdb\xe5\xe7\x4c\xc9\x32\xcd\
\x88\x9d\xad\xeb\xcc\x2f\x81\x7f\xcd\xa1\x1c\x17\xcf\xdd\x36\x59\
\xd2\xde\x6e\xfa\x7f\xdc\xd9\x01\xf8\x25\x86\x4e\xe1\xcb\x34\x34\
\x03\x21\x03\x51\x1e\x3b\x9d\xf3\x2e\xba\x4d\x96\xff\x86\x28\x01\
\x84\xfb\xcc\x94\x13\x20\xf5\x18\xde\x2a\x27\xfb\x0a\xc0\xad\xd2\
\x97\x9a\xf0\x84\x20\x24\x3c\xfb\xd7\x12\xc3\xd6\xec\x7f\x6f\x01\
\x50\x2c\x03\x84\x33\x42\x09\x12\x45\x2b\xf9\xb7\xb2\xff\x43\x38\
\x00\xd5\x32\x00\x17\x00\x09\xce\xfe\x23\x67\xb0\xf9\x37\xa4\x00\
\x5c\x2b\x7d\xb9\x11\x2e\x00\x12\xe4\xcc\xe9\xbd\xf6\x6e\xa6\x2e\
\x00\xfe\xc0\x01\xad\x57\x45\xe3\x02\x80\xd9\xff\x65\xda\xb7\x0e\
\xff\x08\xe5\x00\x70\x01\x00\xf1\x67\xff\x9d\x1a\xf2\x43\x09\x80\
\xf4\x01\xb4\xb6\xeb\xe2\x02\x80\xd9\xff\x39\xf2\xe8\xef\x2c\x9a\
\x00\xf8\xbd\xfa\x5a\xcd\x40\xb9\xb1\x17\x84\x18\x18\xe7\xc2\x72\
\xed\x3f\xb4\x03\x10\xa6\x9a\xd6\xca\xaf\xad\x02\x58\x9c\xfd\x6b\
\xe5\x52\xf5\x3a\xba\x00\xf8\x06\x44\xab\xf8\xa5\x6f\x08\x35\x30\
\x8a\x66\x6c\xde\xef\xd2\xfc\x0b\xe1\x00\xb4\x5d\x40\xc3\x6b\xc4\
\xc0\xe0\xec\x2f\x4f\xfc\x69\xc6\x65\xaf\x06\xfc\xa0\x02\xe0\x9f\
\x0e\xeb\x14\xbf\xfc\x0d\x0d\x41\x30\x94\xfc\x12\x8b\x9f\x14\x7f\
\xe5\xbc\xef\x13\x99\x7b\x01\x2e\x4a\xd3\x05\x68\xd7\x5a\x00\xbf\
\xe2\xcc\xe9\x1c\xf7\x35\xc8\xec\x1f\x44\x00\xfc\x72\x84\xa6\x0b\
\xb8\xe0\x71\x61\x30\x30\xfb\x4b\x0c\x6a\xae\x4e\x75\xbb\x2e\xfd\
\x85\x76\x00\xda\x2e\xe0\xb1\x14\x20\x04\x21\x32\xda\x31\x38\x48\
\x8e\x05\x11\x80\x08\x2e\x60\xbc\x54\xe0\x4b\x62\x10\x22\xcd\xfe\
\x12\x7b\x9a\x2e\x74\x90\xd9\x3f\xa4\x03\x88\xe1\x02\x28\x05\xa0\
\x04\xeb\x3f\x68\x6e\x05\x13\x80\x08\x2e\x40\xb8\x63\x55\x00\x14\
\x93\x5f\x62\xed\x4e\xf9\xd7\x0e\x36\xfb\x87\x76\x00\x31\x5c\x40\
\x4d\x3f\x00\x94\xeb\xfe\x5a\xf9\x77\x9e\x0c\xf9\x8f\x05\x15\x00\
\xaf\x54\xad\xf2\x0d\x3a\x5a\x2a\x33\x4b\x83\x10\x7a\xf6\x97\x18\
\xd3\x3e\xb2\xbe\x1d\xfa\x24\xe6\x3d\x85\x8b\x9e\x46\x18\x9f\x4f\
\xf4\x03\x20\x70\xdd\xff\x29\xc2\xaf\x3e\x1f\xfa\x1f\xac\x94\x6e\
\x98\x58\xa5\x89\xf2\xcd\x2a\xf6\xad\x42\x4a\x87\x50\xce\xfa\xec\
\x41\x4f\xbc\xee\xd7\x78\xbb\xcf\x4b\xf7\xfb\x64\xe8\x7f\xf4\x37\
\xa5\x8b\x9f\x3a\xbd\xb7\xa2\xac\x90\xdf\x25\x2f\xc7\xd8\x2f\x2d\
\x48\x7d\x62\x5e\x32\x57\x07\xe1\x4b\x84\xe4\x5f\x84\x98\xfd\xb5\
\x4a\x80\x55\x40\x5e\x47\x18\xac\xb1\x77\x1f\x00\x43\x39\xd9\x18\
\xa5\xe5\x34\x94\x93\xad\x94\x6f\xa0\x58\xa7\x3a\xd2\x0d\x64\x46\
\x84\x3e\xb1\x2b\xf1\x13\xe3\x20\x9a\x8d\x5f\xf4\x69\xd6\x01\xac\
\x71\x12\x69\xfc\x64\x93\xd0\x84\x30\x86\x1d\x93\x7f\xe2\xe2\x9d\
\x42\x15\x34\x67\x54\x05\xc0\x2f\x61\x5c\x45\xba\x91\x37\x88\x00\
\xec\x98\xfc\xb1\xca\xc8\xab\x6d\x5f\xf4\x61\xdd\x01\x3c\xda\x71\
\xa7\xbf\x43\x70\x5d\x04\x78\xdd\x38\x6c\x9a\xfc\x47\x11\x93\xbf\
\x73\x0a\x4b\xe8\x55\xa4\x1b\xdb\xb8\xf0\xaf\xaf\x7e\x0d\x69\xa6\
\x1c\x86\x56\x56\x48\x3e\xf9\xc7\x2e\x4e\xc7\x7f\x85\xca\xeb\xd7\
\x63\x38\x80\xd8\xa5\xc0\xe3\xf2\x20\x1b\x85\xc0\x70\xf2\x5f\x69\
\x24\x7f\x34\x01\x30\x50\x0a\xac\x44\x80\x72\x00\x5e\xb2\xfd\x31\
\x93\x5f\xba\xfe\xe7\x5a\xbf\x2c\x9a\x00\xf8\x75\xcd\xe3\x88\x63\
\xfd\xf8\x24\x17\x8d\x41\x58\x4b\x7e\x89\x85\xbb\x88\xc9\x2f\xa8\
\xae\x94\xc5\x74\x00\xab\xd7\x8b\x4f\x23\x8f\x3b\xab\x03\x10\xbb\
\xdb\xbf\xe2\x5c\xbb\x37\x55\x19\xb9\xf9\x62\xb9\x9a\xc8\x97\xc1\
\x66\xa1\x72\x93\x5f\xc6\x3d\xf6\xdb\xa6\xe4\x7c\x7f\x75\x47\xbc\
\x67\x64\x0c\x8e\x23\xf6\x03\x56\x5c\xb0\x6d\xb8\xc8\xe4\xbf\x31\
\x90\xfc\x9d\x8b\xb4\x49\xae\x32\x34\x10\xd2\x79\xfd\x6a\xe0\x52\
\xc4\x82\x1d\x96\xf8\x14\x61\x61\x89\xbf\x7a\x58\x2c\xf6\x6a\x50\
\xd4\x65\x69\x2b\x0e\x60\xd5\x0f\x38\x31\x70\x29\x12\x10\xdf\x59\
\x26\xcc\x3a\xf9\x1f\xc7\xd8\x40\xf2\x47\xa9\xfb\x4d\x0a\x80\x17\
\x81\x99\xeb\xf1\xa6\xd3\x01\x91\xd9\xe1\x2b\x27\x0b\x65\x99\xfc\
\x67\xde\x69\x5a\x38\x3b\xf2\x6a\xc8\xf3\xfd\x92\x2e\x01\x7e\x1a\
\x24\x0b\x4d\xc1\x15\xf2\xda\xf3\x13\x4a\x82\x2c\x2c\xbf\xd4\xfb\
\x56\xf6\x7e\x44\x69\xfa\x99\x76\x00\x6b\x1c\xfb\x5a\xdc\x02\x47\
\xde\x0d\x50\x12\xa4\x6d\xf9\xbf\x1a\x4a\x7e\x2b\xe5\xae\x4d\x01\
\xf0\xb3\xad\xdc\x20\x2b\xb3\x6e\xed\x45\xe0\x92\x74\x4a\x2e\xf9\
\x2f\x7d\xf2\xd7\x46\x2e\x69\xd5\xf4\x33\x11\xdb\x95\xf1\xc1\x8b\
\xbd\x27\xfb\x55\xf5\xe6\x61\xa2\x24\x66\xfd\x58\x27\xf8\xbc\x95\
\xfc\x66\x62\xa7\x4a\x60\x20\xc5\xb6\xdd\x19\xbc\x34\xd9\xc1\x78\
\x45\x6f\xc0\x64\xad\x2f\x8d\xbe\x0b\x63\x97\x66\xf2\x29\xd4\x2a\
\x91\x41\x9d\x38\x9b\x2f\xfc\xe8\xdc\xd3\x32\xce\x3d\xa9\x67\x22\
\x4e\x1a\x17\xe7\x65\x1d\x9b\x70\x12\xbb\xe3\x9f\xac\x00\x18\x17\
\x01\xa1\xf5\x03\xdc\x91\x86\x51\x62\xa3\xf6\xb1\xd1\x18\xbd\x44\
\x93\xc9\x9f\x94\x00\x24\x20\x02\x82\x9c\x71\x30\xa5\x2c\x50\xb5\
\xfb\x17\xde\xf2\x3b\x92\x3f\x73\x01\x48\x44\x04\x24\xf9\xaf\xe9\
\x0f\xa8\xd4\xf9\xa7\xce\x56\x83\x38\xa9\xe4\x4f\x52\x00\x12\x11\
\x01\x84\xa0\xec\xc4\x4f\x22\xf9\x93\x15\x80\x84\x44\x00\x21\x18\
\xae\xc6\x9f\x24\x92\xf8\xc9\x24\x7f\xd2\x02\x90\x98\x08\xac\x90\
\xa0\xb8\x66\x0f\xc1\xc6\xe3\x3b\xf6\x49\x3f\x49\xe4\x92\x1f\x5f\
\xe1\x95\x4a\xf2\x27\x2f\x00\x3e\x48\x56\x47\x37\x8f\x12\xba\xec\
\xb9\x77\x05\xf7\xb8\x82\x17\x6d\xfe\x91\x4f\xfc\x94\xb6\x5f\x27\
\x79\xda\x74\x95\x49\xd0\x58\xdc\x31\xb8\x69\xd0\xc8\x1e\x82\x5b\
\xad\x53\x60\x0d\x8f\x61\xb3\xfc\xf1\xd1\xe9\xbf\x44\xb6\xd8\xe4\
\xcf\x46\x00\xd6\x44\xc0\xda\xd6\xcf\x6d\xe8\xd6\xc4\x60\x5e\x48\
\xd2\x8f\xd7\x92\xbe\x4e\xf4\x6b\x24\x7d\x80\x4c\x95\x59\x40\x3d\
\x9e\xf4\xeb\xec\x6e\x08\xd9\x46\x0c\xc4\x11\x7c\x96\x9f\xb9\x94\
\x09\x7e\x7c\x64\x6c\x3e\xf8\x9f\x75\xe2\x5f\x29\xf9\x47\xc5\xb3\
\x12\x80\xb5\x40\x13\x27\x30\xc9\xe8\x2b\x89\x18\x3c\x78\x31\x68\
\x13\x1b\x8b\xc6\x27\xfb\x41\x06\xc2\xbc\xce\x95\xe6\xf9\xfd\x08\
\xc0\xf6\x81\x37\x71\x69\xad\x10\x6c\x6b\x3b\x45\x08\xbe\x89\x5b\
\xb0\x22\x0a\x3e\xd9\x65\x56\x7f\xef\x93\x3d\xc7\x33\x14\x92\xeb\
\xf4\x17\x29\x00\x6b\x35\x66\x8a\xcd\xc1\x5d\x45\x61\xe1\x9d\xc2\
\xc2\xff\xd9\x0d\x2d\x0e\x3e\xc9\x9d\x4f\xee\x91\x9f\xd9\x47\x99\
\x26\xfb\x4b\xf7\x38\xab\x47\xc1\xab\xdc\x47\x2c\xa3\xbe\xc0\x10\
\x33\xd7\xfc\x85\xbf\xfb\xf6\xd3\xdf\xbd\x7f\x41\x30\xc7\x85\x88\
\x68\xd6\xf5\x7e\x91\x02\xb0\x26\x04\x97\xce\xde\x33\xe2\x90\x86\
\x70\xca\x03\x5e\x57\x39\x7e\xb9\xaa\xa4\x91\xf4\x25\xc1\x9d\x4b\
\xbf\xfb\x0c\x58\x7e\x04\xa0\x47\x49\x60\xfd\x11\x52\x88\x4f\x16\
\x5d\x7e\x04\xe0\x75\x21\x90\x9e\x80\xd5\xd3\x63\x20\x1e\x9d\x9f\
\xf5\xdb\x12\xbe\xec\x5e\xa9\xa3\xec\x07\x78\xdf\x3d\x1d\xe2\x01\
\xe0\x7c\x2c\xec\x97\xb4\x2d\xbb\x62\xcc\x71\x03\x50\xee\x49\xcf\
\x08\xc0\x73\x21\xb8\x74\xe9\x3c\x73\x0e\xfd\xc9\xba\xc3\x8f\x00\
\xec\x26\x02\xe2\x02\x3e\x39\x3b\x6f\x91\x81\x30\xcc\xdc\xd3\x8e\
\xbe\xa2\x1f\xc7\x46\x00\x7e\x5d\x16\xc8\x6a\x41\xc3\xdd\xc8\x8a\
\xd6\x45\x7e\x23\x2f\x02\x90\x96\x10\x4c\xbc\x10\xd0\x1f\x48\xbf\
\xce\x3f\x2f\xfd\xdc\x05\x04\x00\x21\x28\x8d\xce\xd7\xf9\x33\x6e\
\x05\x02\x80\x10\x90\xf8\x80\x00\x20\x04\x99\xd7\xf8\xb7\x24\x3e\
\x02\xa0\x25\x04\x8d\xa3\x59\x68\x01\x79\x5a\xef\x9a\x1a\x1f\x01\
\x88\x25\x04\xb5\x17\x82\x14\x0f\xb5\x4c\x15\x59\xc2\x9b\xf9\xc4\
\xef\xb8\x1d\x08\x80\x05\x21\x48\xf5\x58\x6b\x6c\x3e\x02\x00\x01\
\x5c\xc1\xa9\x4b\xfb\xd4\x5b\x2b\xc8\x0c\x7f\x2b\x33\x3e\xb3\x3d\
\x02\x90\xa2\x18\xac\xde\x72\xd3\x20\x06\x5b\x25\x7d\x51\x47\xa5\
\x23\x00\xe5\x88\x41\xea\xe7\xe1\x87\x42\x12\x5d\x8e\x43\xbf\x27\
\xe9\x11\x80\x52\xca\x04\x11\x82\xd5\xb1\xd9\xa5\x35\x10\x17\xee\
\xf9\x3b\x10\xb0\xf7\x08\x40\xf1\xee\xa0\xc9\x58\x10\x3a\x3f\xcb\
\xaf\xde\x73\xc0\x2c\x8f\x00\xc0\x1b\x0e\x61\xec\x3f\x07\x2e\xad\
\x13\x7a\x3b\xff\x79\xf0\x49\x3f\x67\x86\x47\x00\xa0\xbf\x28\x8c\
\xd6\x44\xc1\xc2\xd9\xfc\xad\xff\xf9\xb0\x96\xf4\x73\xde\x7c\x8c\
\x00\x40\x1c\x81\x68\xfc\x7f\xfe\x2c\x0a\xef\xdc\xf6\x4d\x47\x49\
\xe6\x1f\x6b\x7f\x5e\xbd\x80\x64\x81\x7d\x07\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x30\xce\x7f\x02\x0c\x00\x78\x9e\x7d\x93\xa2\x6f\
\xae\x9b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xd0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x4d\x49\x44\
\x41\x54\x58\x85\xed\xd7\x4d\x4e\xc2\x40\x18\xc6\xf1\xff\x5b\x08\
\x08\xea\x01\xd0\x2b\x88\x09\x5b\xcf\x21\xbb\xca\xd8\x1a\x49\xe0\
\x3e\x62\x42\x42\x69\x49\x97\x78\x0c\xd7\x84\x70\x07\x71\xef\x07\
\x02\x81\xd7\x85\xd4\x10\xc0\xdd\x10\x13\xed\xb3\x9b\xc9\x9b\x79\
\x7e\x93\x6e\x3a\xf0\xdf\x23\x9b\x6b\xcf\x98\x6b\xa0\x01\x94\x81\
\x03\x4b\x3d\x1f\xc0\x48\x44\x5a\x41\x18\x46\x80\xee\x02\x88\x67\
\x4c\x08\xd4\x80\x29\x30\x00\x5e\x2d\x01\x8e\x80\x0a\x90\x07\xba\
\xdd\x28\xba\x49\x10\xdf\x00\xcf\x18\x0f\x08\x04\x1e\xb3\x8b\x45\
\xb5\x1d\xc7\x63\x4b\xe5\x00\xd4\x5d\xb7\x34\x77\x9c\x3e\x22\x17\
\x02\x26\x88\xa2\x1e\x80\xb3\x36\xd3\x00\xa6\x4b\x91\x4b\xdb\xe5\
\x00\xed\x38\x1e\x4b\x36\x5b\x05\x66\x2a\xd2\x4c\xf6\xd7\x01\x67\
\xc0\x20\x0c\xc3\x67\xdb\xe5\x49\x82\x20\x78\x42\x64\x80\x6a\x79\
\x17\xa0\x80\xea\xfb\xbe\xca\xbf\xb3\x5c\xbe\x01\xc5\x5d\x80\x5f\
\x49\x0a\x48\x01\x29\x20\x05\xa4\x80\x14\x90\x02\x52\xc0\x3a\x60\
\x82\x48\xf1\xc7\x49\x6b\x8d\xce\x21\x30\xd9\x02\x28\x8c\x80\x4a\
\xdd\x75\x4b\xfb\xea\xae\xd5\x6a\xa7\xa8\x56\x80\xe1\x16\xc0\x11\
\xb9\x07\xf2\xf3\x4c\xe6\xc1\xf7\xfd\x93\x7d\x94\x67\x44\xfa\x40\
\x4e\x45\x5a\xc9\xfe\xe6\xc3\xa4\x03\x78\xc0\x6c\xf5\xf7\xfa\x62\
\xa5\x5d\xe4\x78\x75\xf3\x9c\x42\x27\x8c\xa2\x5b\x36\x1f\x26\xc9\
\xa8\x6f\xcc\x95\x8a\x34\x51\x3d\x07\x0a\x56\x00\x5f\xdf\x7c\x88\
\xea\x5d\xb7\xd7\x8b\x2d\x9d\xf9\x47\xf2\x09\x3e\x70\x64\x41\x95\
\x87\xdf\x69\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xdc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x40\x08\x06\x00\x00\x00\x13\x7d\xf7\x96\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xb3\x00\x79\x00\x79\xdc\xdd\
\x53\xfc\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x2d\x19\xaf\x4a\xeb\xd0\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x00\x40\x49\x44\x41\x54\x58\xc3\xed\xce\x31\
\x0a\x00\x20\x0c\x03\x40\xf5\xa3\x7d\x5b\x5f\xaa\x53\xc1\xc9\xc5\
\x45\xe4\x32\x05\x1a\x8e\xb6\x76\x99\x5e\x25\x22\x66\xf5\xcc\xec\
\xfb\xe8\x74\x1b\xb7\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\xf0\x36\xf0\x41\x16\x0b\x42\x08\x78\x15\x57\x44\xa2\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xb6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x11\x08\x06\x00\x00\x00\xc7\x78\x6c\x30\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x0b\x2c\x0d\x1f\x43\xaa\xe1\x00\x00\x00\x36\x49\x44\x41\x54\x38\
\xcb\x63\x60\x20\x01\x2c\x5a\xb4\xe8\xff\xa2\x45\x8b\xfe\x93\xa2\
\x87\x89\x81\xc6\x60\xd4\x82\x11\x60\x01\x23\xa9\xc9\x74\xd0\xf9\
\x80\x85\x1c\x4d\x71\x71\x71\x8c\xa3\xa9\x68\xd4\x82\x61\x64\x01\
\x00\x31\xb5\x09\xec\x1f\x4b\xb4\x15\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x00\xa0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1f\x0d\xfc\
\x52\x2b\x9c\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\x63\x60\x40\
\x05\x73\x3e\xc0\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\xc5\x70\x4e\
\x8a\x00\x9c\x93\x22\x80\x61\x1a\x0a\x00\x00\x29\x95\x08\xaf\x88\
\xac\xba\x34\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xd0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x4d\x49\x44\
\x41\x54\x58\x85\xed\x97\x3b\x4e\xc3\x40\x14\x00\xe7\x45\x51\xc2\
\xf7\x00\x81\x2b\x00\x52\xee\x42\xca\x8d\xed\x58\x14\x70\x1f\x42\
\x65\x99\x8d\x29\xc3\x1d\x68\xa8\xa3\x28\x77\x20\xf4\x7c\x42\x3e\
\xf2\xa3\x70\x8c\x8c\x4c\xb9\x16\x12\x78\x2a\x5b\x5a\x79\x66\x25\
\x17\xef\xc1\x7f\x47\x8a\x2f\xaa\x2a\x36\x8e\xfd\x86\xc8\xa5\xc2\
\x29\xb0\xe3\xc8\xf3\x21\x30\x03\x86\xc6\xf7\xad\x88\x68\x29\x40\
\x55\x25\x89\xe3\x5b\x15\xe9\x03\x4b\x60\x82\xc8\xab\x13\xbd\xea\
\x01\xd0\x05\xda\x88\xc4\x7d\xcf\x0b\xf3\x88\x66\x7e\xc6\xc6\xb1\
\x2f\x99\xfc\xb1\xd1\x6c\xf6\x8c\x31\x73\x27\xf2\x2d\x49\x92\x74\
\xd2\xcd\x66\x8c\x6a\x60\xad\x7d\x00\x46\x00\x8d\xfc\x40\x43\xe4\
\x12\x58\xa6\x70\xee\x5a\x0e\x60\x8c\x99\x6f\xd2\xb4\x07\xac\x44\
\xf5\xea\xcb\x9b\x3f\x28\x9c\x00\x93\x20\x08\x9e\x5d\xcb\x73\xc2\
\x30\x7c\x02\x26\x64\xff\xd7\xf7\x00\x60\x17\x78\xaf\x4a\x5e\xe0\
\x0d\xd8\xfb\x29\xe0\x57\xa8\x03\xea\x80\x3a\xa0\x0e\xa8\x03\xea\
\x80\x3a\xa0\x0e\x28\x06\x2c\x28\x4c\x2a\x15\xb2\xbf\x75\x95\x02\
\x66\x40\x37\x49\x92\x4e\x55\x66\x6b\xed\x31\xd9\x78\x3e\x2d\x05\
\x08\xdc\x00\xed\x74\xbd\xbe\x8f\xa2\xe8\xa8\x12\x79\x9a\x8e\x81\
\x96\xc0\xb0\xe0\xcd\x50\x55\x19\x59\x1b\xa1\x1a\x00\x2b\xb2\xc5\
\xe4\xc5\x89\x5d\xf5\x90\xec\xe6\x2d\x85\xc8\xf3\xfd\x8b\x7c\x31\
\x29\xaf\x66\xd6\x9a\xed\xdc\x7e\x46\x36\x29\xbb\x60\x01\x4c\x51\
\xbd\xf6\x06\x83\x3b\x47\xdf\xfc\x23\x7c\x02\x90\xc4\x75\x30\xa3\
\x38\xd1\xd4\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x42\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xb3\x00\x79\x00\x79\xdc\xdd\
\x53\xfc\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x17\x3b\x5f\x83\x74\x4d\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xa6\x49\x44\x41\x54\x78\xda\xed\x9b\xdb\
\x0e\xc3\x20\x0c\x43\x9b\x68\xff\xdd\xf6\xcb\xb7\xb7\x69\x9a\x76\
\x49\x4b\xec\x98\x42\x5e\x37\x51\x7c\x70\x28\x85\xb0\x2c\x33\x66\
\xcc\x18\x39\x8c\xf9\xb0\x6d\xdb\xee\xc1\xff\xd9\x25\x00\x44\x05\
\x57\x02\x31\x55\xd1\x2c\x18\xd6\x8b\x70\x14\x08\xeb\x51\x7c\x26\
\x04\xeb\x51\x78\x26\x08\xeb\x5d\x7c\x2b\x04\xeb\x5d\x78\x2b\x08\
\xbb\x92\xf8\x33\x10\xec\x6a\xe2\x8f\x42\xb8\x55\x76\x72\x5d\xd7\
\x67\x27\xf7\x7d\x2f\x01\x6c\x55\xa3\xff\x2a\x1e\x05\x21\xe2\x02\
\x53\x11\x5f\x05\xc1\x2b\x6d\x7f\xe6\x77\x6a\x0a\x64\x8f\xfe\x11\
\x71\x99\x4e\xf8\xe5\x02\x53\x14\xcf\x84\xe0\xd5\xb6\xff\x25\x92\
\x91\x0e\x86\x1e\xfd\xa8\x78\xc6\xc4\xf8\xc9\x05\xae\x32\xf2\x55\
\x4e\x70\x25\xdb\x57\x40\x30\x84\xfd\x5b\xed\x8c\x4c\x87\xf7\x34\
\x70\x85\x91\xaf\x74\x82\xab\x89\x67\x43\x70\x45\xf1\x4c\x08\x96\
\x91\xff\xe8\x57\x58\x76\xfb\xaf\xf3\x80\x2b\x8e\x3c\xd3\x09\xae\
\x2e\x1e\x0d\xc1\x7b\x10\x8f\x84\xe0\xcc\x4e\x2a\xb6\x4f\x5d\x07\
\x28\xb6\xef\x6a\x39\xc9\x4e\x3b\x57\xcb\x49\xf6\x9c\xe3\xc8\x9c\
\xcc\x82\x80\x9c\x70\x53\xe6\x00\x24\x04\xf4\xdb\x26\xf5\x6b\x30\
\xbb\xb3\x08\xf1\xd0\xaf\xc1\x4c\x27\xb0\xd6\x19\xd4\x75\x40\x14\
\x02\x73\x91\x05\xd9\x11\x6a\x81\xc0\x5e\x61\x42\x37\x45\x8f\x8a\
\x41\x8b\xa7\x6f\x8a\x1e\x71\x42\xc5\xb7\x05\x1c\x40\x14\x42\x95\
\xf8\xaf\x29\x90\x99\x06\x2d\xeb\x81\xcb\x9c\x0c\x9d\x11\xc3\xaa\
\x17\xa0\x1e\x8e\x46\x9d\xc0\x3c\x22\xa7\x1f\x8f\xff\x13\xc7\xae\
\x14\x29\x29\x90\xf8\xe6\x04\x84\xf8\x7f\x05\x12\x65\x25\x32\xef\
\x10\x2a\xc4\x87\x01\x20\x21\xa0\x22\x5a\x25\xe6\xcb\xe0\x31\x0b\
\x25\x4f\x34\x3e\x6e\xa9\xac\x32\x08\x5a\xb1\xb4\x22\x84\x92\x72\
\x79\x15\x08\xad\x97\x26\xe6\x95\x19\x40\xc7\xc6\xbc\x34\x85\x84\
\xd1\xd5\xb5\xb9\x0c\x20\xcc\x8b\x93\x33\x46\x8f\x07\x53\x21\x72\
\xe7\x17\x36\x2b\x63\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x03\xac\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x03\x29\x49\x44\
\x41\x54\x58\x85\xed\x95\x4f\x68\x5c\x55\x14\xc6\x7f\xe7\x65\x88\
\x64\xda\xc6\xbd\xa9\x94\x48\x57\xb6\x91\x3a\x28\xae\xd3\x4d\xc5\
\x0a\x4d\x40\x66\x63\xda\x37\x2f\x25\xcd\x46\x07\xd1\x24\x8e\xae\
\xb2\x50\xa8\x49\xdd\x64\x99\xc2\xbc\x19\xd3\x6e\x9e\x20\x53\xc1\
\xe2\x9f\x85\x75\x1b\xfc\xd3\xa4\x15\x91\x52\x4a\x70\x4a\xd7\x25\
\x33\x24\xcd\xe0\xfb\x5c\xbc\x37\x4d\x90\xbc\x37\x1d\xe9\xce\xf9\
\x56\xf7\xcf\x77\xce\xfd\xee\x39\xe7\x9e\x0b\x3d\xf4\xf0\x7f\x87\
\x75\x43\x0e\x82\xa0\x7f\xab\xd1\x18\x97\xd9\x98\x41\x0e\x18\x8a\
\xb7\xea\x98\xfd\x2a\xa8\x65\xb3\xd9\x5a\x3e\x9f\xdf\x79\xea\x02\
\xaa\xe5\xf2\x5b\x98\x2d\x00\xc3\x06\xb7\x04\x37\x64\x56\x07\x70\
\xc2\x70\x08\xb3\x51\xc1\x08\x70\xd7\x60\xee\x9c\xe7\x7d\xf5\x54\
\x04\x04\x41\xd0\xb7\xd5\x6c\x2e\x00\xef\x1b\x7c\x6b\x61\x58\x3a\
\x7b\xfe\xfc\xda\x7e\x5c\xdf\xf7\x4f\x38\x70\x11\x38\x05\x2c\xde\
\xdb\xd8\x28\xcd\xcf\xcf\x87\x69\xfe\x33\x9d\x04\xc4\x87\xbf\x27\
\x69\xd6\x9d\x9c\xbc\x94\xc6\xf5\x3c\xef\x26\xf0\x7a\xd5\xf7\x67\
\x81\x8b\xc3\x47\x8e\x00\xcc\xa5\xd9\xa4\x46\x20\x0e\xfb\x97\x66\
\x36\x73\xae\x50\xf8\x1c\x60\x69\x69\xe9\x99\xc1\xc1\xc1\x69\x93\
\xde\x26\x0a\x39\x26\xad\xcb\xec\xea\xc3\xcd\xcd\xe5\x62\xb1\xf8\
\x08\xa0\x52\xa9\xcc\x99\xf4\x99\x03\xe3\x67\x3d\xaf\xd6\xb5\x80\
\x20\x08\xfa\xb7\x9b\xcd\x3f\x24\xfd\xe9\x4e\x4e\xbe\x01\x70\xe5\
\xf2\xe5\xc3\x61\x26\x73\x3d\xce\x75\x08\x38\x31\x3d\x1a\x9b\xad\
\xf7\xb5\x5a\xa7\x27\xa6\xa6\xea\x00\x15\xdf\xff\xde\xcc\x86\x07\
\xb2\xd9\x63\x49\x85\xe9\xec\xb7\x08\xb0\xd5\x68\x8c\x0b\x5e\x70\
\xa4\x8f\xda\x37\x0f\x33\x99\xeb\x32\x3b\xbe\x8f\x6d\x7b\x3c\xf2\
\x77\x26\xf3\x4d\x10\x04\xfd\x00\xe6\x38\x1f\x22\x1d\xdd\x6e\x36\
\xcf\x24\x9d\x93\x28\x40\x66\x63\xc0\x5a\xbb\xe0\x9e\x3d\x74\xe8\
\x82\x60\x04\x29\x39\x6d\xd1\xde\x4b\x5b\x8d\xc6\x05\x00\xd7\x75\
\x7f\xc3\xec\x36\xd0\xbd\x00\x83\x9c\x49\x3f\xed\x59\x9a\x20\x0a\
\x75\x3a\xa4\xd0\x22\x6e\x7b\xfe\xa3\xe0\x95\xae\x05\x60\xf6\x5c\
\xfb\x9d\xc7\x38\x96\xca\xdf\xb5\x73\x14\x71\xdb\xb8\x8f\xd9\x50\
\x12\x3d\xd5\xa1\xcc\xba\xea\x94\xfb\xea\x01\x43\x4a\x8c\x5c\xb2\
\x00\xe9\x81\x49\x87\xf7\xac\xfc\xce\x13\xa6\x40\x70\xfb\xf1\x34\
\xba\xfd\x83\xee\x05\x98\xfd\x8c\xd9\xe8\x9e\x95\x2b\xa9\xfc\x5d\
\x3b\xc7\xe0\xea\xae\x1e\x9d\x04\x56\xbb\x16\x20\xa8\x21\x1d\xf7\
\x7d\xff\x04\xc0\xc3\xcd\xcd\x65\xcc\xd6\x31\x53\xca\xe1\x02\x6e\
\x0e\x1c\x3c\xb8\x0c\xb0\x52\x2e\xe7\x0c\x5e\x44\xfa\xba\x6b\x01\
\xd9\x6c\xb6\x06\xdc\x8d\x7b\x3b\xc5\x62\xf1\x51\x5f\xab\x75\x1a\
\xb8\x15\x53\x76\xd3\xd1\xce\xb1\xb4\x86\xe3\xbc\x99\xcf\xe7\x77\
\x24\x59\x18\x7d\x5e\x77\xb6\x5b\xad\x6b\x5d\x0b\xc8\xe7\xf3\x3b\
\x38\xce\x2c\x70\x2a\xee\xed\x4c\x4c\x4d\xd5\x07\xb2\xd9\x57\x91\
\xde\x95\xb4\x0a\x34\x81\xa6\x60\xd5\xcc\xde\x19\x38\x70\xe0\x35\
\xd7\x75\xef\x03\x54\x7d\xbf\x04\x9c\x94\xd9\xcc\xf4\xf4\x74\x2b\
\xe9\x9c\x8e\x55\x5e\xf5\xfd\x05\xe0\x03\xa0\xe4\x7a\xde\x62\x27\
\xbe\x24\xab\xfa\x7e\xc9\xcc\x3e\x01\x16\x5d\xcf\x2b\xa5\xf1\x3b\
\x16\xd5\xbd\x8d\x8d\x92\xa4\x4b\xc0\x42\xd5\xf7\xbf\xab\x56\xab\
\x2f\x27\x71\x57\xca\xe5\xdc\x17\x95\xca\x0f\x66\xf6\x29\xd1\x77\
\xfc\x71\x27\xff\x4f\xfc\xce\x57\x7c\x7f\x2c\x34\x5b\x44\x3a\x1a\
\xb7\xd7\x1b\x82\xbf\x62\x27\xcf\x23\x8d\x12\x35\xa0\x3b\x32\x9b\
\x29\x14\x0a\x89\x85\xf7\x9f\x04\xc0\xe3\x1f\xf2\x8c\x60\x0c\xc8\
\x61\x16\xf5\x09\xa9\x6e\xf0\x8b\xa4\xda\x76\xab\x75\x2d\x2d\xe7\
\x3d\xf4\xd0\xc3\xbf\xf1\x0f\x78\xe5\x4e\xf2\x11\xe4\x69\x42\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x4a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x14\x1a\x38\xc7\x37\xd0\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xae\x49\x44\x41\x54\x78\xda\xed\x9b\x49\
\x92\xc3\x20\x0c\x45\x23\x5d\xdc\xf6\xc9\xd3\xbb\xae\x54\x06\x26\
\xe9\x7f\x09\x8c\xd6\x5d\x32\xef\x21\x68\x20\xf0\x78\xec\xd8\xb1\
\xe3\xce\x21\xcc\x8f\x9d\xe7\xf9\x6c\xfc\x3b\x59\x42\x40\x2b\x70\
\xa4\x10\xc9\x0a\xcd\x92\x21\xb3\x80\xa3\x44\xc8\x8c\xf0\x9e\x12\
\x64\x46\x70\x4f\x11\x32\x3b\xbc\x55\x82\xcc\x0e\x6e\x15\x21\x2b\
\xc1\x8f\x48\x90\xd5\xe0\x7b\x25\xe8\x5e\x0a\x2f\xd8\xfb\x3d\x55\
\x20\x56\xf8\xe3\x38\xfe\x73\x5c\xd7\x45\x11\xf5\xfa\xcd\xda\x77\
\x6b\x12\xd4\xbb\x61\xef\x8d\x43\xc3\x5b\x43\x11\xa5\x8f\x92\x30\
\x92\xb7\xc6\xa0\xa8\x71\xef\x2d\xc1\x92\xaf\xc4\x62\x1e\x02\xa5\
\xf1\xe7\x25\xa1\x94\xc7\x3a\xef\x88\x57\xef\xa3\x1a\xe9\x99\xf7\
\xdb\x84\xe8\x36\x09\x22\x2a\x01\xd9\xf3\x90\xff\x02\x9e\x12\x18\
\xf0\x5f\x87\x80\xc7\xa2\xc7\xda\x78\x24\xfc\xfb\x30\x80\x2c\x85\
\x2d\x95\xc0\xea\x79\xf8\x5e\x60\x44\x02\x1b\x1e\xbe\x19\xea\x91\
\x10\x01\xff\x31\x07\xa0\x36\x3d\x35\x38\x36\xfc\xeb\x3c\x40\xd9\
\x0e\x8f\xce\x09\x8c\xcd\x15\xed\x3c\xa0\x17\x86\xb5\xb3\xa4\x1e\
\x88\xb4\x42\xb1\xe0\xe9\x02\x5a\xe0\x98\xf0\x21\x02\x2c\xeb\x80\
\xe9\x05\xb4\xc2\x31\x25\x68\x36\x78\xb6\x04\x8d\x86\x67\x9c\x27\
\x84\x0a\x68\x81\x8f\x94\x00\xd9\x0d\x8e\xf6\x3c\x63\x51\x44\xd9\
\x0d\x8e\xc2\x44\x54\x82\x66\x1a\xf3\x11\x12\x34\x13\x7c\x84\x04\
\xb7\x43\x51\xc4\x18\xf6\xce\x07\x3d\x14\x45\x4c\x60\x8c\x4a\xd0\
\xac\xf0\x2c\x09\x52\x28\x97\x67\x34\xbc\xe7\x77\x7e\xfd\x48\x1a\
\x72\x26\x98\x21\x5f\x55\x80\xe5\xe6\x15\xaa\xb1\xa3\x79\x4b\x2c\
\x9a\xbd\xe7\xd1\xf9\xcd\x17\x24\xb2\x47\xad\x92\xf7\x15\x99\x8e\
\x64\xfb\x96\xd8\x8a\xb1\x2f\x4a\x0e\x24\xbf\xef\x55\xd9\xcc\x22\
\x68\x97\xa5\x33\x4a\x08\xb9\x2e\x9f\x45\x82\xf5\xd1\xc4\x7e\x32\
\x03\x68\xd8\x3d\x1f\x4d\x21\x65\x4c\xf5\x6c\xce\x43\x08\xf3\xe1\
\xe4\x8e\xbb\xc7\x1f\xfe\x88\x5a\xe2\xcd\xef\x1c\x49\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x01\xed\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x6a\x49\x44\
\x41\x54\x58\x85\xed\x97\xcb\x4e\xc2\x40\x14\x86\xbf\x43\x08\x78\
\x7d\x00\xf4\x15\xd4\x84\x77\x91\x65\x69\x0b\x71\xa1\xef\x23\xae\
\x9a\x71\xa8\x4b\x7c\x07\x37\xae\x09\xe1\x1d\xc4\xbd\x17\xe4\x92\
\x1e\x17\xa5\xa6\x06\xd8\x98\x21\x18\xed\xbf\x9a\x76\x26\xfd\xbe\
\x4e\xa6\xcd\x39\xf0\xdf\x23\xf9\x0b\x55\x15\x6b\x4c\x50\x12\xb9\
\x54\x38\x05\x76\x1c\x71\x3e\x04\x86\x40\xc7\x0b\x02\x2b\x22\xba\
\x24\xa0\xaa\x12\x1b\x73\xab\x22\x4d\x60\x02\xf4\x11\x79\x75\x82\
\x57\x3d\x00\xea\x40\x15\x11\xd3\xf4\xfd\x76\x26\x51\xce\xd6\x58\
\x63\x02\x49\xe1\x8f\xa5\x72\xb9\xe1\x79\xde\xc8\x09\x7c\x91\x38\
\x8e\x6b\xc9\x7c\xde\x43\x35\xb4\xd6\x3e\x00\x5d\x80\x52\xb6\xa0\
\x24\x72\x09\x4c\x12\x38\x77\x0d\x07\xf0\x3c\x6f\x34\x4f\x92\x06\
\x30\x15\xd5\xab\x2f\x6e\x36\x50\x38\x01\xfa\x61\x18\x3e\xbb\x86\
\x67\x69\xb7\xdb\x4f\x40\x9f\xf4\x7c\x7d\x17\x00\x76\x81\xf7\x4d\
\xc1\x73\x79\x03\xf6\x56\x09\x6c\x25\x85\xc0\xd6\x05\xca\xeb\x26\
\xac\x31\xba\x6e\xee\x27\xf1\xc3\x50\x56\xdd\xdf\xfa\x0e\x14\x02\
\x85\x40\x21\xb0\xf6\x3f\xb0\xee\xbb\x75\x9d\xad\xef\x40\x21\xf0\
\xab\x04\xc6\xe4\x2a\x95\x0d\x66\x7f\xc1\x5a\x12\x18\x02\xf5\x38\
\x8e\x6b\x9b\x22\x5b\x6b\x8f\x49\xcb\xf3\xc1\x92\x80\xc0\x0d\x50\
\x4d\x66\xb3\xfb\x28\x8a\x8e\x36\x02\x4f\x92\x1e\x50\x11\xe8\xe4\
\xb8\x69\x54\x55\xba\xd6\x46\xa8\x86\xc0\x94\xb4\x31\x79\x71\x42\
\x57\x3d\x24\x7d\xf3\x8a\x42\xe4\x07\xc1\x45\xd6\x98\x2c\xb7\x66\
\xd6\x7a\x8b\xba\xfd\x8c\xb4\x52\x76\x91\x31\x30\x40\xf5\xda\x6f\
\xb5\xee\x1c\x3d\xf3\x8f\xe4\x13\xfb\x36\x7a\x56\x11\xde\xcf\xd8\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\x9c\x53\x34\xfc\x5d\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x0b\x1b\x29\xb3\
\x47\xee\x04\x00\x00\x00\x24\x49\x44\x41\x54\x08\xd7\x63\x60\x40\
\x05\x73\x3e\xc0\x58\x4c\xc8\x5c\x26\x64\x59\x26\x64\xc5\x70\x4e\
\x8a\x00\x9c\x93\x22\x80\x61\x1a\x0a\x00\x00\x29\x95\x08\xaf\x88\
\xac\xba\x34\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x06\x00\x00\x00\x09\x08\x04\x00\x00\x00\xbb\x93\x95\x16\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x14\x1d\x00\xb0\
\xd5\x35\xa3\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x06\xfe\x9f\x67\x60\x60\x42\x30\xa1\x1c\x08\x93\x81\x81\x09\xc1\
\x64\x60\x60\x62\x60\x60\x34\x44\xe2\x20\x73\x19\x90\x8d\x40\x02\
\x00\x64\x40\x09\x75\x86\xb3\xad\x9c\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x0c\xfa\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x01\x00\x00\x00\x01\x00\x08\x06\x00\x00\x00\x5c\x72\xa8\x66\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x0c\x9c\x49\x44\x41\x54\x78\xda\xec\
\xdd\xdd\x71\x1b\x47\x16\x86\xe1\x16\x4a\x01\x30\x04\x30\x03\xf3\
\x6a\xaf\xb6\x0a\xca\x40\x21\xd0\x11\x58\xda\x04\x48\x26\xb0\x94\
\x22\x10\x43\x50\x06\x62\x95\xaf\xf6\x4a\xce\x40\xda\x0c\x36\x84\
\x45\xab\xc6\x32\x25\x13\x24\x7e\x4e\xcf\xf4\xcf\xf3\x56\xb1\x28\
\x9b\xe0\x00\x18\x74\x9f\xf3\xcd\xf4\xcc\xcb\x94\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\xd0\x21\xff\xfc\xf7\x7f\xde\xe4\x2f\x7b\x62\
\x4c\x5e\xd8\x05\x43\x4f\xfe\xb3\xed\xb7\x2f\xd3\x7f\x9e\xff\xfe\
\xaf\x7f\xfc\xcf\x5e\x19\x8b\x95\x5d\x30\x34\x57\xdb\xaf\xb3\xe9\
\xeb\xca\xee\x90\x00\x30\x4e\xf7\x5f\x3f\xe8\xfe\xe9\x41\x0a\xf8\
\x6a\xef\x48\x00\xe8\x9f\x0f\x7b\xfe\x3f\x28\x00\xe8\xac\xfb\xbf\
\xde\x7e\xdb\x3c\xf2\xa3\xcd\xf4\x33\x28\x00\xe8\x98\xdb\x23\x7f\
\x06\x05\x00\x8d\x77\xff\xeb\xed\xb7\xf5\x13\x0f\x59\x4f\x8f\xc1\
\x00\x38\x09\x38\xd6\xe4\xff\x73\xd9\xef\xec\x99\x87\xe6\xe5\x40\
\xcb\x82\x12\x00\x3a\x8c\xfe\x67\x7b\x3c\xee\xcc\xa1\x80\x04\x80\
\xbe\xba\xff\x66\xfb\xed\xd3\x81\xbf\xf6\x6a\x9b\x02\xee\xed\x3d\
\x09\x00\xed\x73\x35\xd3\xef\x40\x01\x40\x65\xdd\xff\x32\x3d\xbe\
\xec\xf7\x1c\x9b\xe9\x77\xe1\x10\x00\x8d\x4e\xfe\x7c\x3c\xff\x39\
\x3d\x7d\xe6\xff\x29\xbe\x6e\xbf\x2e\x9c\x10\x94\x00\xd0\x26\x6f\
\x4e\x98\xfc\x69\xfa\x5d\x77\x0b\x4a\x00\x68\xb0\xfb\xe7\xc9\xfb\
\x25\x68\x73\xee\x13\x90\x00\xd0\x18\xb7\x95\x6e\x0b\x12\x00\x0a\
\x77\xff\x4d\x3a\x7c\xd9\xef\x39\x2c\x0b\x4a\x00\x18\xb0\xfb\x4b\
\x01\x0a\x00\x1a\xea\xfe\xf9\xa4\xdd\x2f\x05\x36\xfd\x0b\x7d\x98\
\x43\x00\xd4\x3d\xf9\xf7\xbd\xde\xff\x58\xdc\x27\x20\x01\xa0\x62\
\xae\x0a\x4e\xfe\x94\xe8\xc3\x24\x00\x54\xdb\xfd\xd7\x29\x6e\xd9\
\xef\x39\x2c\x0b\x4a\x00\xa8\x8c\x0f\x9d\x3e\x17\x14\x00\x3c\xd3\
\xfd\x77\x69\xbe\x4a\x41\x1f\xa6\x00\xa0\x22\x6e\x07\x79\x4e\x28\
\x00\xf8\xa9\xfb\x5f\xa7\xd3\xae\xf7\x3f\x16\xfa\xb0\xc6\x71\x12\
\xb0\xfd\xc9\x5f\x7a\xd9\xef\x39\x2c\x0b\x4a\x00\x58\x38\xfa\x9f\
\x2d\xf8\xfc\xf4\x61\x12\x00\x16\xea\xfe\x9b\x14\x7f\xbd\xff\xb1\
\xb8\x4f\x40\x02\xc0\xcc\x5c\x79\x2d\x50\x00\xc6\xec\xfe\x97\x29\
\x66\xd9\xef\x6e\xfa\x3a\x15\xfa\x30\x87\x00\x98\x69\xf2\x9f\xaa\
\xf9\xfa\x93\x7c\xe2\xee\x62\xfa\xf7\xe7\x80\x73\x09\x5f\x13\x7d\
\x98\x04\x80\xe2\xbc\x49\x31\xcb\x7e\xef\xf3\xe5\xbc\xd3\x25\xbd\
\xef\x03\xb6\xb7\x4e\xf4\x61\x0a\x00\x8a\x76\xff\x75\xd0\xf1\x76\
\x9e\xf4\xef\x1e\xfc\xf7\xbb\xe9\xff\x9d\x7c\x2e\x60\x7a\x8d\x50\
\x00\x50\x80\xa8\x25\xb7\xb7\x0f\xa3\xfa\xf4\xef\xb7\x95\xbd\x46\
\x38\x07\x80\x07\xdd\x7f\x93\x62\x96\xfd\xee\xb7\x13\xfe\xd5\x8e\
\xe7\xc8\xdb\xdf\x04\x3c\x87\x65\x41\x09\x00\xb5\x76\xff\x23\x7f\
\x26\x05\x28\x00\x58\xa8\xfb\x47\x69\xbe\xee\xb6\x9d\xf9\x8f\x5d\
\x3f\x9c\x7e\x76\x17\xf0\x3c\xf4\x61\x0a\x00\x82\x26\x7f\x94\x85\
\x67\xdf\xe3\xfc\xb7\xd3\x63\x4f\xe5\x6a\x7a\xed\x50\x00\x70\xca\
\x44\x4a\x31\xd7\xfb\xdf\xec\xb3\x46\x3f\x3d\xe6\x26\xe0\xf9\xe8\
\xc3\x2a\xc7\x49\xc0\xfa\xbb\xff\x3a\xc5\x68\xbe\xf2\x7a\xff\xf9\
\x81\xcf\x9d\x9f\x77\x1d\xf0\xdc\xf4\x61\x12\x00\x8e\x24\x4a\xbd\
\xf5\xeb\x4c\xbf\x53\xf2\x3d\x40\x01\x18\xaa\xfb\x47\x69\xbe\xee\
\x8f\x59\x92\x9b\x7e\xe7\x3e\xe0\xf9\xe9\xc3\x14\x00\x1c\x41\xd4\
\x52\xda\xaf\x0b\xfd\x6e\x89\xf7\x02\x05\x60\x88\xee\x7f\x1d\x74\
\xfc\x7d\x73\xca\xf1\xf7\xf4\xbb\x11\x27\x04\xe9\xc3\x14\x00\xec\
\x39\xf9\xf3\xd9\xf3\xdf\x02\x36\x95\xcf\xe6\xbf\x0b\xd8\xce\xbb\
\x14\xb3\x2c\xf8\x9b\x65\x41\x05\x00\xfb\xc5\xe5\x88\x89\xf2\x36\
\xe2\xd6\xdc\xc0\xfb\x04\xe8\xc3\x2a\xc3\x32\x60\x7d\xdd\x7f\x93\
\x62\xae\xf7\xff\x63\x3b\x71\x2f\x82\x5f\x5b\x76\x06\x44\x5c\x8d\
\xe8\x3e\x01\x09\x00\x3b\x88\xba\x70\xe6\x6d\x81\xd7\xf6\xb6\xb2\
\xf7\x08\x05\xa0\xab\xee\x7f\x99\x82\x34\x5f\x25\x3a\xec\xb4\xcd\
\xbb\x80\x4d\xd1\x87\x29\x00\xf8\x69\xf2\x47\x5e\xef\x7f\x53\xf0\
\xa5\xde\x24\xf7\x09\x28\x00\x08\x27\x54\xf3\x55\xea\x45\xd2\x87\
\x29\x00\x88\xef\xfe\xeb\x54\x46\xf3\x55\x0a\xfa\x30\x05\x00\x81\
\x14\xd1\x7c\x15\x4c\x01\xf4\x61\x9d\x60\x19\x70\xf9\xee\xbf\x49\
\x85\x35\x5f\x05\x5f\x3b\x7d\x98\x04\x80\x5a\xba\xff\x02\xaf\x5d\
\x0a\x50\x00\x70\x42\x07\x9d\x45\xf3\x55\xf0\x50\x80\x3e\x4c\x01\
\xc0\x91\x93\x7f\x6e\xcd\x57\xc9\x14\x60\x59\x50\x01\xc0\xa1\x03\
\x3e\xcd\xa8\xf9\x2a\x98\x02\xe8\xc3\x1a\xc6\x49\xc0\x65\xba\xff\
\x3a\x2d\xa4\xf9\x2a\xf8\x9e\xe8\xc3\x24\x00\xec\xc9\x92\x9a\xaf\
\x52\xd0\x87\x29\x00\xd8\xa3\x53\x2e\xaa\xf9\x2a\x78\x28\x90\x5f\
\x4b\xc4\xeb\xa1\x0f\x53\x00\xba\xa6\x06\xcd\x57\xed\x29\xc0\xb2\
\xa0\x02\xd0\x65\xf7\xbf\x4e\x15\x68\xbe\x0a\xa6\x80\xfc\x9a\xe8\
\xc3\x14\x00\x3c\x32\xf9\x6b\xd3\x7c\x95\x82\x3e\x4c\x01\xc0\x8e\
\x58\x5b\x8d\xe6\xab\x60\x0a\xa0\x0f\x6b\x08\xcb\x80\xf3\x74\xff\
\x4d\xaa\x54\xf3\x55\xf0\x3d\xd3\x87\x49\x00\x98\xa8\x59\xf3\x55\
\x0a\xfa\x30\x05\x00\xb5\x6b\xbe\x0a\x1e\x0a\xe4\xd7\x7a\x17\xb0\
\x29\xfa\x30\x05\xa0\xd9\xc9\xdf\x8a\xe6\xab\x14\xf4\x61\x0a\xc0\
\xd0\x34\xa1\xf9\x2a\x98\x02\xf2\x6b\xa6\x0f\x53\x00\x86\xec\xfe\
\xeb\xd4\x96\xe6\xab\x14\xf4\x61\x0a\xc0\x90\x34\xa5\xf9\x2a\x98\
\x02\xe8\xc3\x2a\xc6\x32\x60\x99\xee\xbf\x49\x33\x6b\xbe\xa6\xe7\
\x9c\x73\x62\xdf\x1f\xb8\x4f\xe8\xc3\x2a\xe4\xa5\x5d\x50\x77\xf7\
\x3f\xe0\xb1\x9f\x66\x7e\x8f\x2f\x8e\x78\x2f\x9f\x83\xf6\xed\x85\
\x21\xe6\x10\xa0\xd6\xee\xdf\xb4\xe6\xab\x60\x62\xa0\x0f\x53\x00\
\xba\x9f\xfc\xbd\x68\xbe\x4a\x41\x1f\xa6\x00\x74\x4d\x17\x9a\xaf\
\x82\x29\x80\x3e\x4c\x01\xe8\xb6\xfb\xaf\x53\xcc\x5a\x75\xd6\x7c\
\xbd\xeb\x75\x3f\x4d\xef\xed\x6b\xc0\xa6\xde\x58\x16\x54\x00\x6a\
\xa2\x47\xcd\x57\x29\xe8\xc3\x14\x80\xae\xba\x7f\x97\x9a\xaf\x82\
\x29\x20\xbf\xc7\x88\xf7\x49\x1f\xa6\x00\x54\x41\xcf\x9a\xaf\xda\
\x53\x80\x8b\x83\x14\x80\x45\xbb\xff\x75\xea\x58\xf3\x55\x30\x05\
\xe4\xf7\x4a\x1f\xa6\x00\x34\x3d\xf9\x47\xd1\x7c\x95\x82\x3e\x4c\
\x01\x68\x3e\xfa\x77\xaf\xf9\x2a\x98\x02\xe8\xc3\x14\x80\x66\xbb\
\xff\x66\xfb\xed\x32\x60\x53\x59\xf3\x75\x37\xea\x7e\x9c\xde\x7b\
\xc4\x15\x8f\x97\x73\xdf\x0f\xa1\x00\x8c\xcd\x88\x9a\xaf\x52\xd0\
\x87\x29\x00\x4d\x75\xff\xdc\xf9\x23\xba\xcd\x9d\x3b\xdb\xe8\xc3\
\x14\x80\xb6\x26\xff\xe8\x9a\xaf\x52\xd0\x87\x29\x00\x4d\x30\xb4\
\xe6\xab\x60\x0a\xc8\xfb\x82\x3e\x4c\x01\xa8\xba\xfb\xaf\x13\xcd\
\x57\x49\xe8\xc3\x14\x80\xaa\xa1\xf9\x2a\x9b\x02\xe8\xc3\x14\x80\
\x6a\xbb\xff\x66\xfb\x2d\xe2\xba\xf3\x7c\xbd\xff\x47\x7b\x74\x67\
\x11\xc8\xfb\xe6\x3e\x60\x53\xaf\x2d\x0b\x2a\x00\x55\x76\x7f\xbb\
\x72\xb6\x7d\x24\x05\x28\x00\x21\xdd\x9f\xe6\x6b\xde\x14\x40\x1f\
\xa6\x00\x54\x33\xf9\x69\xbe\x96\x4b\x01\x96\x05\x15\x80\xc5\xa1\
\xf9\x5a\x26\x05\xd0\x87\x29\x00\x8b\x77\xff\x75\xa2\xf9\x5a\xb2\
\x08\xd0\x87\x29\x00\x8b\x42\xf3\xb5\x3c\xf4\x61\x0a\xc0\x22\xdd\
\x9f\xe6\xab\x8e\x14\x90\xf7\x5d\xc4\xfe\xa3\x0f\x53\x00\x0e\x82\
\xe6\xab\xbf\x14\x60\x59\x50\x01\xd8\xab\xfb\x5f\x27\x9a\xaf\x9a\
\x52\x40\xde\x87\xf4\x61\x0a\xc0\x2c\x93\x9f\xe6\xab\x4e\xe8\xc3\
\x14\x80\xd9\xa2\x3f\xcd\x57\x7d\x29\x80\x3e\x4c\x01\x28\xde\xfd\
\x37\x89\xe6\xab\xe6\x22\x90\xf7\x29\x7d\x98\x02\x50\x0c\x9a\xaf\
\xfa\xa1\x0f\x53\x00\x8a\x74\xff\xdc\xf9\x23\xba\x02\xcd\x57\xd9\
\x14\x90\xf7\x6d\x44\xba\xa2\x0f\x53\x00\xbe\x4f\x7e\x9a\xaf\xb6\
\xa0\x0f\x53\x00\x42\xa1\xf9\x6a\x2b\x05\xe4\x7d\x4c\x1f\xa6\x00\
\x84\x74\xff\x75\xa2\xf9\x6a\x11\xfa\x30\x05\x20\x04\x9a\xaf\x36\
\x53\x00\x7d\x98\x02\x70\x72\xf7\xdf\x24\x9a\xaf\x96\x8b\x00\x7d\
\x98\x02\x50\x47\xf7\x37\x1d\x17\x43\x0a\x50\x00\x8e\xea\xfe\x34\
\x5f\x7d\xa4\x00\xfa\x30\x05\xe0\xe0\xc9\x4f\xf3\xd5\x5f\x0a\xb0\
\x2c\xa8\x00\xec\xff\x41\x27\x9a\xaf\x9e\x52\x00\x7d\x98\x02\xb0\
\x77\xf7\x5f\x27\x9a\xaf\x1e\x8b\x00\x7d\x98\x02\xb0\x17\x34\x5f\
\xfd\x42\x1f\xa6\x00\x3c\xd9\xfd\x69\xbe\xfa\x4e\x01\xf9\x33\x89\
\xf8\x5c\x86\xd1\x87\x8d\x96\x00\x68\xbe\xa4\x80\xb9\xc7\x8a\x02\
\x50\x49\xf7\xbf\x4e\x34\x5f\x23\xa4\x80\xfc\xd9\xd0\x87\x29\x00\
\x3f\x4c\x7e\x9a\xaf\xb1\xa0\x0f\x53\x00\xfe\x16\xe7\x68\xbe\xc6\
\x49\x01\xf4\x61\x0a\xc0\xf7\xee\xbf\x49\x34\x5f\x23\x16\x81\xfc\
\x59\xd1\x87\x49\x00\x34\x5f\x03\x43\x1f\x36\x72\x01\xa0\xf9\x1a\
\x3e\x05\xe4\xcf\x2c\x22\xb5\x75\xab\x0f\x5b\x75\x3c\xf9\x69\xbe\
\x90\x12\x7d\xd8\xb0\x09\x80\xe6\x0b\xf4\x61\x23\x16\x00\x9a\x2f\
\xfc\x04\x7d\xd8\x60\x09\x80\xe6\x0b\x0f\x53\x00\x7d\xd8\x28\x05\
\x80\xe6\x0b\x3b\x8a\x00\x7d\xd8\x20\x09\x80\xe6\x0b\xa5\x3f\xd3\
\x5b\x05\xa0\xce\xee\x4f\xf3\x85\xa7\x52\x00\x7d\x58\xaf\x05\x80\
\xe6\x0b\x07\xa4\x00\xcb\x82\x1d\x26\x00\x9a\x2f\xec\x93\x02\xe8\
\xc3\x7a\x2b\x00\x34\x5f\x38\xb0\x08\xd0\x87\x75\x96\x00\x68\xbe\
\xb0\xd4\x67\xfd\x41\x01\x58\xb6\xfb\xd3\x7c\xe1\x98\x14\x90\x3f\
\xeb\x88\xcf\xbb\x69\x7d\x58\x0f\x09\x80\xe6\x0b\x4b\x7f\xe6\xb7\
\x0a\xc0\x32\xdd\xff\x3a\xd1\x7c\xe1\xf8\x14\x90\x3f\xf3\xa1\xf5\
\x61\xab\x86\x27\x3f\xcd\x17\x22\x18\x5a\x1f\xd6\x72\x02\xa0\xf9\
\x42\x44\x0a\x18\x5a\x1f\xd6\x64\x01\xa0\xf9\x42\x70\x11\xc8\x63\
\x60\x48\x7d\x58\xab\x09\x80\xe6\x0b\xd1\x0c\xa9\x0f\x6b\xae\x00\
\xd0\x7c\xa1\x50\x0a\xc8\x63\x21\x22\x0d\x36\xa5\x0f\x7b\xd9\xd8\
\xe4\xa7\xf9\xda\x0d\x6d\x59\xcc\x3e\x7c\x9d\x4e\x3f\xb7\x94\xef\
\x13\xf8\xd8\xc2\xb9\xa5\x97\x8d\x7d\x40\x34\x5f\xbb\x3b\xd8\xb5\
\xf9\x7b\xf2\x3e\xfc\xba\x9d\xb8\xef\x03\x9a\xcc\x7a\x1a\xab\xd5\
\x7f\x26\xcd\x1c\x02\xd0\x7c\x61\x26\x86\xd2\x87\xb5\x74\x0e\x80\
\xe6\x0b\x73\xa4\x80\xa1\xf4\x61\x2f\x1a\xe9\xfe\x9b\xed\xb7\x4f\
\x01\x9b\xca\xd7\xfb\xbf\x32\xcc\xb1\xc7\x98\xcb\xe3\x6d\x13\xb0\
\xa9\x57\x35\x9f\x6c\x6e\x25\x01\xd0\x7c\x61\x6e\x86\x48\x01\xd5\
\x17\x00\x9a\x2f\x2c\x74\x28\x30\x84\x3e\x6c\x55\xf9\xe4\xa7\xf9\
\xc2\xd2\x29\xa0\x6b\x7d\x58\xed\x09\x80\xe6\x0b\x4b\xa6\x80\xee\
\xf5\x61\xd5\x9e\x04\x9c\x96\x50\xbe\x04\x6c\x2a\x6b\xbe\xce\x0d\
\x67\x9c\x30\x16\xf3\x38\x5c\x07\x6c\xea\xbc\xb6\xeb\x4f\x6a\x4e\
\x00\x34\x5f\xa8\x85\x6e\xf5\x61\x55\x16\x00\x9a\x2f\x54\x76\x28\
\x90\xc7\x50\xc4\x38\xaa\x4e\x1f\x56\x6b\x02\xa0\xf9\x42\xaf\x29\
\xe0\x56\x01\x78\xba\xfb\x5f\x27\x9a\x2f\xd4\x97\x02\xf2\x58\xea\
\x4e\x1f\xb6\xaa\x6c\xf2\xd3\x7c\xa1\x66\xba\xd3\x87\xd5\x96\x00\
\x68\xbe\x50\x73\x0a\xe8\x4e\x1f\x56\xcd\x32\x60\xe0\xf5\xfe\x59\
\xf3\x75\x31\xda\xe0\x0c\xdc\x7f\xfb\xf0\x6a\xe4\x93\xab\xdb\x7d\
\xfd\x39\xc5\x5c\x9d\xba\xf8\x7e\xac\x29\x01\xd0\x7c\xa1\x15\xba\
\xd1\x87\x55\x51\x00\x68\xbe\xd0\xd8\xa1\x40\x1e\x63\x77\x01\x9b\
\x5a\x5c\x1f\xb6\xaa\x60\xf2\xd3\x7c\xa1\x45\x6e\x52\x07\xf7\x09\
\xd4\x90\x00\x68\xbe\xd0\x62\x0a\xc8\x63\xed\x7d\xc0\xa6\xd6\x29\
\xe6\x2f\x5b\xb7\x57\x00\x68\xbe\xd0\x38\xcd\xeb\xc3\x96\x4e\x00\
\x34\x5f\x68\x39\x05\x34\xaf\x0f\x5b\xac\x00\x4c\xcb\x56\x11\xd7\
\x45\xe7\xeb\xfd\x3f\x1a\x8e\x58\xa8\x08\xe4\xb1\x77\x1f\xb0\xa9\
\xd7\x4b\xfc\x55\xa1\x25\x13\x00\xcd\x17\x7a\xa1\xd9\x14\xb0\x48\
\x01\xa0\xf9\x42\x67\x29\xa0\x59\x7d\xd8\x6a\x81\xc9\x4f\xf3\x85\
\x5e\x53\x40\x73\xcb\x82\x4b\x24\x00\x9a\x2f\xf4\x98\x02\x9a\xd4\
\x87\xcd\x5a\x00\xa6\xa5\x8e\x88\x88\x93\x35\x5f\x96\xfd\x50\x5b\
\x11\x88\x5a\x16\x7c\x33\xd7\xb2\xe0\xdc\x09\x80\xe6\x0b\xbd\xd3\
\x94\x3e\x6c\xb6\x02\x40\xf3\x85\x41\x52\x40\x1e\x9b\x11\xe3\x73\
\x16\x7d\xd8\x9c\x09\x80\xe6\x0b\x52\xc0\x32\x73\x66\xd9\x02\x40\
\xf3\x85\xc1\x52\x40\x1e\xa3\x4d\xe8\xc3\x56\x33\x4c\x7e\x9a\x2f\
\x8c\x48\x13\xfa\xb0\x39\x12\x00\xcd\x17\x46\x4c\x01\x4d\xe8\xc3\
\x8a\x16\x80\xe9\xda\xe6\xcb\x80\x4d\x65\xcd\xd7\x9d\x61\x85\xc6\
\x8a\x40\x1e\xb3\x11\x57\xaa\x5e\x96\xba\x4f\xa0\x74\x02\xa0\xf9\
\xc2\xe8\x54\xad\x0f\x2b\x56\x00\x68\xbe\x80\xfa\xf5\x61\xab\x42\
\x93\x9f\xe6\x0b\xf8\x8b\x6a\xf5\x61\xa5\x12\x00\xcd\x17\xf0\x57\
\x0a\xc8\x63\xb8\x4a\x7d\x58\x78\x01\xa0\xf9\x02\x1e\xa5\x4a\x7d\
\x58\x89\x04\x40\xf3\x05\xfc\x3d\x05\x54\xa9\x0f\x0b\xfd\xcb\x40\
\x81\x7f\x9d\x26\x5f\xef\xff\xca\xb0\x39\x38\x79\x5d\xce\xf4\x74\
\x77\x0e\xcd\x8e\xfe\x9c\xf2\xfc\xd8\x04\x6c\x2a\xe4\xaf\x0a\xbd\
\xac\xb5\xfb\x1b\x2a\x47\x1d\x67\x5e\xdb\x13\xd5\x93\xc7\xf6\xe7\
\xa0\xb9\x76\xf2\x9f\xc0\x0b\x3b\x04\xa0\xf9\x02\xf6\x2a\xd4\x55\
\xe9\xc3\x56\x41\x93\x9f\xe6\x0b\x38\x2c\x05\x54\xb1\x2c\x18\x95\
\x00\x68\xbe\x80\xfd\x53\x40\x35\xfa\xb0\x93\x4f\x02\x4e\x27\x9f\
\xbe\x04\xbc\x99\xac\xf9\x3a\x37\x3c\x30\x0a\xdb\xb9\x93\xe7\xcd\
\x3a\x60\x53\xe7\xc7\x9e\x94\x8d\x48\x00\x34\x5f\xc0\xb2\x63\xfe\
\xe8\x39\x78\x52\x01\xa0\xf9\x02\x4e\x3a\x14\xc8\x63\x3e\x62\xdc\
\x1f\xad\x0f\x3b\x35\x01\xd0\x7c\x01\x75\x8c\xfd\xdb\x59\x0b\x00\
\xcd\x17\x10\x92\x02\xf2\xd8\x5f\x4c\x1f\xb6\x3a\x72\xf2\xd3\x7c\
\x01\x71\x2c\xa6\x0f\x3b\x36\x01\xd0\x7c\x01\x71\x29\x60\x31\x7d\
\xd8\xc1\xcb\x80\x81\xd7\xfb\x67\xcd\xd7\x85\x8f\x1f\xf8\x3e\xb7\
\xf2\x25\xc2\x11\x57\xd3\xee\x7d\x9f\xc0\x31\x09\x80\xe6\x0b\x28\
\xc3\xec\xfa\xb0\x83\x0a\x00\xcd\x17\x50\xf4\x50\x20\xcf\x89\xbb\
\x80\x4d\xed\xad\x0f\x5b\x1d\x30\xf9\x69\xbe\x80\xf2\xcc\xaa\x0f\
\x3b\xe4\x76\xe0\x28\xcd\x57\x7e\x51\x5f\xb6\x2f\xce\x47\x0d\x94\
\x63\x3d\xcd\xd9\xeb\xa7\x1e\xb4\xd7\x49\xc0\xc0\xeb\xfd\x01\xcc\
\xcb\x93\xf7\x09\xec\x7b\x08\x70\x6b\x3f\x02\x4d\x72\x7b\x52\x02\
\x08\x5c\xf6\x03\xb0\x0c\x3b\x97\x05\xf7\x49\x00\xba\x3f\xd0\x69\
\x0a\x58\x3d\xd3\xfd\xa3\x34\x5f\x00\x96\x63\xa7\x3e\xec\xc5\x13\
\x93\xff\xdb\xd9\xfa\x14\x73\xc9\x2f\x80\x65\xc9\x4b\x8b\xe7\x3f\
\x5f\x7a\xff\x54\x02\xb8\x32\xf9\x81\x6e\x78\xf4\x3a\x9e\x17\x3b\
\xba\xff\x3a\x59\xf6\x03\x7a\xe4\x87\x65\xc1\x5d\x09\xe0\x83\xfd\
\x04\x74\xc9\x87\x27\x0f\x01\x02\x35\x5f\x00\xea\xe3\x07\x7d\xd8\
\x63\x09\xc0\xb2\x1f\xd0\x37\xb7\x8f\x16\x80\x40\xcd\x17\x80\x7a\
\xf9\xae\x0f\x7b\xf1\x60\xf2\x5b\xf6\x03\xc6\xe1\xdb\xb2\xe0\xea\
\xa7\x58\x60\xf2\x03\x63\xf0\x4d\x1f\xf6\xf0\x76\xe0\xff\x26\xf7\
\xe9\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x4f\
\xf0\x7f\x01\x06\x00\x32\x01\x7a\x45\x9d\x58\x20\xba\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x81\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x01\x03\x00\x00\x00\x25\x3d\x6d\x22\
\x00\x00\x00\x06\x50\x4c\x54\x45\x00\x00\x00\xae\xae\xae\x77\x6b\
\xd6\x2d\x00\x00\x00\x01\x74\x52\x4e\x53\x00\x40\xe6\xd8\x66\x00\
\x00\x00\x29\x49\x44\x41\x54\x78\x5e\x05\xc0\xb1\x0d\x00\x20\x08\
\x04\xc0\xc3\x58\xd8\xfe\x0a\xcc\xc2\x70\x8c\x6d\x28\x0e\x97\x47\
\x68\x86\x55\x71\xda\x1d\x6f\x25\xba\xcd\xd8\xfd\x35\x0a\x04\x1b\
\xd6\xd9\x1a\x92\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x00\xe4\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x36\x00\x00\x00\x0a\x08\x06\x00\x00\x00\xff\xfd\xad\x0b\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\x7f\x00\x87\x00\x95\xe6\xde\xa6\xaf\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\
\x09\x2a\x2b\x98\x90\x5c\xf4\x00\x00\x00\x64\x49\x44\x41\x54\x48\
\xc7\x63\xfc\xcf\x30\x3c\x01\x0b\xa5\x06\x34\xb4\x4f\x85\x87\xcd\
\xaa\xa5\x73\x18\xae\x5d\x39\xcf\x48\x2b\x35\x14\x79\xcc\xd8\xc8\
\x88\x24\x03\x7c\x89\xd0\x4f\x2d\x35\x84\xc0\xd9\x73\xe7\xe0\x6c\
\x26\x86\x91\x92\x14\x91\x7d\x4d\x54\x52\x0c\x4d\x26\xa8\x9f\x5a\
\x6a\x46\x93\xe2\x68\x52\x1c\x82\x49\x91\x91\xd2\x7a\x4c\x4b\xc7\
\x10\xc5\x08\x6c\xc5\x34\xb5\xd4\xd0\xd5\x63\x83\x15\x00\x00\x7a\
\x30\x4a\x09\x71\xea\x2d\x6e\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x02\x02\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x7f\x49\x44\
\x41\x54\x58\x85\xed\x97\xcb\x4a\x42\x51\x14\x86\xbf\x65\xa5\xd9\
\xe5\x01\xac\x57\xc8\x40\x28\xa3\xd2\x9e\x22\x87\xdd\x88\x06\x36\
\x33\xa1\x9e\xa1\x89\x36\xa9\x46\x5d\x69\x58\xef\x10\x1c\x8d\xb4\
\x40\xa2\x77\xc8\xe6\x5d\xac\x2c\x57\x83\x3a\xa2\x1c\xcf\x24\xb6\
\x18\x75\xfe\xd9\x5e\x1b\xf6\xf7\xb1\x60\x6f\xf6\x82\xff\x1e\x69\
\x5a\xa9\x4a\x2c\x5b\x58\x14\x95\x24\x42\x18\xe8\x35\xc4\x79\x41\
\xb9\x05\xd9\xb1\xd6\xc6\x8f\x10\x51\xa7\x80\xaa\xcc\x6c\x15\x0f\
\x55\x99\x07\x5e\x05\x4a\x8a\x3e\x9a\xa0\x0b\x32\xa0\x10\x01\x02\
\x20\x07\x56\x6a\x7c\xd9\x96\xa8\x0b\xc4\x32\x97\x4b\x82\xec\x83\
\xe6\x91\xee\x84\x95\x1a\x2b\x9b\x80\xdb\x89\x67\xaf\x43\xe8\xc7\
\x29\x30\xa5\xca\x42\x2e\x3d\x71\x0c\xe0\xab\x5b\xaa\x24\x81\xd7\
\xae\x77\xdf\xac\x69\x38\x80\x95\x1a\x2b\xd7\xaa\xd5\x04\xf0\x26\
\xc2\xaa\x5d\xaf\x0b\x20\x8c\x08\x94\xce\xd7\xa3\xf7\xa6\xe1\x76\
\xf2\x1b\xb1\x3b\xa0\x04\x84\x9d\x02\x10\x54\x78\x6e\x17\xbc\x21\
\x4f\x40\x5f\x2b\x81\x8e\xc4\x13\xe8\xb8\x40\xb7\xdb\x46\x3c\x53\
\x50\xb7\xbd\x9f\xc4\x5a\x9b\x90\x56\xf5\x8e\x77\xc0\x13\xf0\x04\
\x3c\x01\xd7\x77\xc0\xed\xde\x9a\x4e\xc7\x3b\xe0\x09\xfc\x2a\x81\
\x8a\x34\xfc\x54\xda\x98\x7e\xa0\xd2\x42\x40\x6f\x15\x22\xf1\xec\
\x75\xa8\x5d\xe4\xc9\xcc\xc5\x30\x10\x11\xb8\x71\x0a\xa8\x6f\x17\
\x08\xa0\x1f\x67\xd3\x9b\xb9\xa1\x76\xc0\x7b\xe8\x3a\x05\xfc\x35\
\xd1\x1d\xbb\xde\x34\x98\xc4\xb3\x57\x7b\xa0\x4b\xc0\x1b\x50\x02\
\x7d\x30\x83\x97\x41\xbe\x06\x13\xbf\xc2\x5e\x2e\x15\x5d\x71\x0c\
\x26\xb6\x44\x2c\x53\x9c\xfb\xfe\xb7\x8f\x02\x41\x33\x02\x54\x04\
\x6e\x54\x65\xdb\x4a\x47\x4f\x0c\x9d\xf9\x47\xf2\x09\xb5\xbd\x75\
\x94\xee\x91\xe8\xbe\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x00\xc3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdc\x0b\x07\x09\x2e\x37\xff\x44\xe8\xf0\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x00\x27\x49\x44\x41\x54\x78\xda\xed\xc1\x01\
\x0d\x00\x00\x00\xc2\xa0\xf7\x4f\x6d\x0e\x37\xa0\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x77\x03\x40\x40\
\x00\x01\xaf\x7a\x0e\xe8\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x00\x96\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x02\x62\x4b\x47\x44\x00\xd3\xb5\x57\xa0\x5c\x00\x00\
\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\
\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x0b\x07\x0c\
\x0d\x1b\x75\xfe\x31\x99\x00\x00\x00\x27\x49\x44\x41\x54\x08\xd7\
\x65\x8c\xb1\x0d\x00\x00\x08\x83\xe0\xff\xa3\x75\x70\xb1\xca\xd4\
\x90\x50\x78\x08\x55\x21\x14\xb6\x54\x70\xe6\x48\x8d\x87\xcc\x0f\
\x0d\xe0\xf0\x08\x02\x34\xe2\x2b\xa7\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\x56\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x40\x00\x00\x00\x40\x08\x06\x00\x00\x00\xaa\x69\x71\xde\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdf\x04\x19\x10\x15\x00\xdc\xbe\xff\xeb\x00\x00\x00\x1d\x69\x54\
\x58\x74\x43\x6f\x6d\x6d\x65\x6e\x74\x00\x00\x00\x00\x00\x43\x72\
\x65\x61\x74\x65\x64\x20\x77\x69\x74\x68\x20\x47\x49\x4d\x50\x64\
\x2e\x65\x07\x00\x00\x01\xba\x49\x44\x41\x54\x78\xda\xed\x9b\x5b\
\x92\x02\x21\x0c\x45\x4d\xd6\x37\x2e\x48\x17\xa0\x0b\xd2\xfd\xe9\
\x9f\x65\x39\xda\x3c\x92\x7b\x13\x68\xf2\x3d\x95\xe6\x1c\x1e\x43\
\x10\x0e\x87\x15\x2b\x56\xec\x39\x84\xf9\xb1\xdb\xe9\xf4\xa8\xf9\
\xbb\xe3\xf5\x2a\x53\x08\xa8\x05\x8e\x14\x22\x59\xa1\x59\x32\x64\
\x14\x70\x94\x08\x19\x11\xde\x53\x82\x8c\x08\xee\x29\x42\x46\x87\
\xb7\x4a\x90\xd1\xc1\xad\x22\x64\x26\xf8\x1e\x09\x32\x1b\x7c\xab\
\x04\x5d\x5b\xe1\x09\x7b\xbf\x65\x14\x88\x15\xfe\xef\x72\x79\xe5\
\xb8\x9f\xcf\x14\x51\xef\xdf\x2c\x7d\xb7\x24\x41\xbd\x1b\xf6\xd9\
\x38\x34\xbc\x35\x14\x31\xf4\x51\x12\x7a\xf2\x96\x18\x14\x35\xef\
\xbd\x25\x58\xf2\x6d\xb1\x98\xa7\xc0\xd6\xfc\xf3\x92\xb0\x95\xc7\
\xba\xee\x88\x57\xef\xa3\x1a\xe9\x99\xf7\xdb\x82\xe8\xb6\x08\x22\
\x46\x02\xb2\xe7\x21\xff\x05\x3c\x25\x30\xe0\xbf\x4e\x01\x8f\x4d\
\x8f\xb5\xf1\x48\xf8\xcf\x69\x00\xd9\x0a\x5b\x46\x02\xab\xe7\xe1\
\xb5\x40\x8f\x04\x36\x3c\xbc\x18\x6a\x91\x10\x01\xff\x6f\x0d\x40\
\x15\x3d\x25\x38\x36\xfc\xfb\x3a\x40\x29\x87\x7b\xd7\x04\x46\x71\
\x45\x3b\x0f\x68\x85\x61\x55\x96\xd4\x03\x91\x5a\x28\x16\x3c\x5d\
\x40\x0d\x1c\x13\x3e\x44\x80\x65\x1f\x30\xbc\x80\x5a\x38\xa6\x04\
\xcd\x06\xcf\x96\xa0\xd1\xf0\x8c\xf3\x84\x50\x01\x35\xf0\x91\x12\
\x20\xd5\x60\x6f\xcf\x33\x36\x45\x94\x6a\xb0\x17\x26\x62\x24\x68\
\xa6\x39\x1f\x21\x41\x33\xc1\x47\x48\x70\x3b\x14\x45\xcc\x61\xef\
\x7c\xd0\x43\x51\xc4\x02\xc6\x18\x09\x9a\x15\x9e\x25\xe1\x67\x82\
\xda\x69\xc0\xaa\xe7\xad\xdf\xf9\xf5\x23\x69\xc8\x99\x60\x86\x7c\
\x45\x01\x96\x9b\x57\xa8\xc6\xf6\xe6\xdd\x62\xd1\xec\x3d\x8f\xce\
\x6f\xbe\x20\x91\x3d\x4a\x23\x79\x5d\x91\xa9\x4d\xb6\x6e\x89\x4d\
\x1a\xeb\xa2\x64\x6b\xf2\x5d\x5f\x95\xcd\x2c\x82\x76\x59\x3a\xa3\
\x84\x90\xeb\xf2\x59\x24\x58\x1f\x4d\xac\x27\x33\xde\x0d\xdb\xed\
\xa3\x29\xa4\x8c\xa1\x9e\xcd\x79\x08\x61\x3e\x9c\x5c\xb1\xf7\x78\
\x02\x47\xb0\x5b\x07\x3a\x44\x3e\x01\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\xd8\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x02\x55\x49\x44\
\x41\x54\x58\x85\xed\x95\x4d\x4f\x53\x51\x10\x86\x9f\xb9\x1a\x12\
\xef\x4f\x10\x0d\xc1\xb0\x12\x4d\xb0\xf1\x0f\xc0\x06\xe3\x06\x48\
\x4c\x77\xd0\x0f\x16\x6c\x8d\x01\x2c\xae\x58\x68\x82\x05\xff\xc2\
\x3d\xad\xec\xae\x89\x16\x57\x7e\x2c\xc4\xad\xf1\x8b\x68\x62\x0c\
\x21\xa4\xb1\x86\x3f\xd0\x86\x86\x26\x7d\x5d\xb4\x21\xc6\x70\x5b\
\x2e\xb0\xb3\xef\x76\xe6\xcc\x3c\x67\xce\x99\x19\xe8\xa9\xa7\xff\
\x5d\x16\xc7\x39\x0c\xc3\xbe\xfd\x6a\x75\x4a\x66\x93\x06\x09\xa0\
\xbf\x6d\xaa\x60\xf6\x59\x50\xf2\x7d\xbf\x94\x4c\x26\x0f\xce\x1c\
\xa0\x18\x04\x77\x30\xcb\x03\x83\x06\xdf\x04\x9b\x32\xab\x00\x78\
\xcd\x66\x3f\x66\xa3\x82\xeb\xc0\x8e\xc1\xe2\x4c\x26\xf3\xfc\x4c\
\x00\xc2\x30\x3c\xb7\x5f\xab\xe5\x81\x7b\x06\xaf\xac\xd9\xcc\x4d\
\xcf\xce\x6e\x1d\xe5\xeb\x9c\x1b\xf1\x60\x05\x18\x07\x56\x77\xcb\
\xe5\xdc\xf2\xf2\x72\xb3\x53\xfc\xf3\xdd\x00\xda\xc9\xef\x4a\x5a\
\x48\x65\xb3\x6b\x9d\x7c\x33\x99\xcc\x57\xe0\x56\xd1\xb9\x05\x60\
\x65\x70\x60\x00\x60\xb1\xd3\x99\x8e\x15\x68\x97\xfd\x99\x99\xcd\
\xcf\xa4\xd3\x4f\xba\xc1\xfe\xad\x42\xa1\xb0\x68\xd2\x63\x0f\xa6\
\xa6\x33\x99\x52\x6c\x80\x30\x0c\xfb\xea\xb5\xda\x0f\x49\x3f\x53\
\xd9\xec\xed\x38\xc9\x0f\x21\x9c\x7b\x63\x66\x83\x17\x7c\x7f\x38\
\xea\x63\x7a\x51\x87\xf7\xab\xd5\x29\xc1\x15\x4f\x5a\x3a\x49\x72\
\x00\xf3\xbc\xfb\x48\x43\xf5\x5a\x6d\x22\xca\x27\x12\x40\x66\x93\
\xc0\x56\xd4\x87\x3b\x8e\x52\xa9\xd4\x17\xcc\xbe\x03\xf1\x01\x0c\
\x12\x26\xbd\x3f\x69\xf2\x43\x49\xef\x04\x37\xa3\xcc\xd1\x5d\x60\
\x76\x51\x50\x39\x35\x00\xfc\xc6\xac\x3f\xca\x18\x59\x01\x00\x99\
\xc5\x9a\x94\x47\xc9\xc0\x90\x22\x67\x41\x34\x80\xb4\x67\xd2\xa5\
\xd3\x02\xa8\x75\xfb\xbd\x28\x7b\xa7\x27\xf8\x08\x8c\x9e\x1a\x40\
\x1a\x33\xf8\x10\x65\x8f\xee\x02\x28\x21\x5d\x73\xce\x8d\x9c\x34\
\xf9\x7a\x10\x24\x0c\xae\x22\xbd\x8c\x0d\xe0\xfb\x7e\x09\xd8\x69\
\xcf\xf6\xd8\x92\x64\xcd\xd6\xf2\xda\xae\x37\x1a\x1b\xb1\x01\x92\
\xc9\xe4\x01\x9e\xb7\x00\x8c\xb7\x67\x7b\x2c\x15\x9d\xcb\x01\x63\
\x32\x9b\x9f\x9b\x9b\x6b\xc4\x06\x00\x48\xa5\x52\x2f\x80\x55\x60\
\xe5\xb8\x10\x92\xac\x10\x04\x4b\x66\xf6\x10\xc8\xa7\xd3\xe9\xc8\
\xf2\x77\x05\x00\xd8\x2d\x97\x73\x92\xd6\x80\x7c\xd1\xb9\xd7\xc5\
\x62\xf1\x46\x94\xef\x7a\x10\x24\x9e\x16\x0a\x6f\xcd\xec\x11\xad\
\x75\xfc\xa0\x5b\xfc\x63\xf7\xf9\xba\x73\x93\x4d\xb3\x55\xa4\xa1\
\xf6\x78\xdd\x14\xfc\x6a\x07\xb9\x8c\x34\x0a\x0c\x03\xdb\x32\x9b\
\xef\x76\xf3\xd8\x00\x70\xb8\x21\x27\x04\x93\x40\x02\xb3\xd6\x9c\
\x90\x2a\x06\x9f\x24\x95\xea\x8d\xc6\x46\xa7\x37\xef\xa9\xa7\x9e\
\xfe\xd5\x1f\x3e\xd4\xef\x44\x0d\xbc\xff\x65\x00\x00\x00\x00\x49\
\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x00\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0d\xd7\x00\x00\x0d\xd7\
\x01\x42\x28\x9b\x78\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x01\x7d\x49\x44\
\x41\x54\x58\x85\xed\x97\x3b\x4e\x02\x51\x14\x86\xbf\x83\x28\x3e\
\x17\xa0\x6e\x41\x4d\x48\x78\x44\x9d\x71\x15\x5a\xfa\x8a\xb1\xd0\
\x0e\x48\x74\x0d\x36\x60\xa3\x56\x3e\x63\xa9\x7b\x30\x19\x34\x82\
\x24\x84\xb8\x07\xb5\xf7\x81\x82\x70\x2c\x74\x08\x04\xc3\x14\xce\
\x58\xe8\x7c\xdd\xbd\xe7\xe6\xfe\x5f\x6e\x73\xcf\x81\xff\x8e\xb4\
\xac\x54\xc5\xc8\xe4\x96\x44\x65\x0d\x61\x1c\xe8\x75\x29\xe7\x15\
\xe5\x16\x64\xd7\x4a\x46\x8f\x11\xd1\x76\x01\x55\x99\xd9\xce\x1f\
\xa9\xb2\x00\xbc\x09\x14\x15\x7d\x72\x23\x5d\x90\x41\x85\x30\x10\
\x02\x39\xb4\x12\xd1\x15\x5b\xa2\x21\x60\xa4\xaf\x97\x05\x39\x00\
\xbd\x44\x82\x73\x56\x22\x72\xef\x46\xb8\x8d\x99\x29\x0c\xa3\xb5\
\x33\x60\x4a\x95\xc5\x6c\x2a\x7e\x02\x10\x68\x58\xaa\xac\x01\x6f\
\x5d\xef\x81\x59\xb7\xc3\x01\xac\x44\xe4\xbe\x5e\xad\xce\x01\x15\
\x11\xd6\xed\xfd\x86\x00\xc2\x98\x40\xf1\x62\x23\xf6\xe0\x76\xb8\
\xcd\xe5\xa6\x71\x07\x14\x81\xf1\x76\x01\xe8\x53\x78\xf1\x2a\xbc\
\x89\x67\xa0\xdf\x5e\x04\x9d\x4e\x9b\xe9\x9c\x3a\x9d\xe9\x84\x95\
\x8c\x4b\xa7\x7a\xa0\x53\xf1\x37\xf0\x05\x7c\x01\x5f\xc0\x17\xf0\
\x05\x7c\x01\x5f\xc0\xb1\x1f\x70\xfa\xcf\x7f\x4a\xf3\x0b\x94\xa5\
\xa9\x53\xf1\x90\x01\xa0\xfc\x8d\x80\xde\x2a\x84\xcd\x4c\x61\xd8\
\xab\xe4\xc9\xf4\xd5\x28\x10\x16\x28\xb5\x0b\x68\x60\x0f\x08\xa1\
\xb5\xf3\xe9\xad\xec\x88\x17\xe1\xdd\x74\x9d\x01\x3d\x75\xd1\x5d\
\x7b\xbf\x65\x30\x31\x33\x37\xfb\xa0\xcb\x40\x05\x28\x82\x3e\xba\
\x13\x2f\x43\x7c\x0e\x26\x3d\x0a\xfb\xd9\x44\x6c\xb5\x6d\x30\xb1\
\x25\x8c\x74\x7e\xfe\xab\x6f\x9f\x00\xfa\xdc\x11\xa0\x2c\x50\x52\
\x95\x1d\x2b\x15\x3b\x75\xe9\xce\x3f\xc2\x07\xd1\xbc\x75\x94\xcf\
\xbc\x8d\xf9\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x9e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x08\x15\x0f\xfd\
\x8f\xf8\x2e\x00\x00\x00\x22\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x0d\xfe\x9f\x87\xb1\x18\x91\x05\x18\x0d\xe1\x42\x48\x2a\x0c\x19\
\x18\x18\x91\x05\x10\x2a\xd1\x00\x00\xca\xb5\x07\xd2\x76\xbb\xb2\
\xc5\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\x9f\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x08\x14\x1f\xf9\
\x23\xd9\x0b\x00\x00\x00\x23\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x0d\xe6\x7c\x80\xb1\x18\x91\x05\x52\x04\xe0\x42\x08\x15\x29\x02\
\x0c\x0c\x8c\xc8\x02\x08\x95\x68\x00\x00\xac\xac\x07\x90\x4e\x65\
\x34\xac\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xa6\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x09\x00\x00\x00\x06\x08\x04\x00\x00\x00\xbb\xce\x7c\x4e\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x02\x62\x4b\x47\x44\x00\x9c\x53\x34\xfc\x5d\x00\x00\x00\x09\x70\
\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\
\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xdc\x08\x17\x0b\x1b\x0e\x16\
\x4d\x5b\x6f\x00\x00\x00\x2a\x49\x44\x41\x54\x08\xd7\x63\x60\xc0\
\x00\x8c\x0c\x0c\x73\x3e\x20\x0b\xa4\x08\x30\x32\x30\x20\x0b\xa6\
\x08\x30\x30\x30\x42\x98\x10\xc1\x14\x01\x14\x13\x50\xb5\xa3\x01\
\x00\xc6\xb9\x07\x90\x5d\x66\x1f\x83\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x09\
\x09\x15\x46\xb5\
\x00\x64\
\x00\x61\x00\x72\x00\x6b\x00\x5f\x00\x62\x00\x6c\x00\x75\x00\x65\
\x00\x03\
\x00\x00\x70\x37\
\x00\x69\
\x00\x6d\x00\x67\
\x00\x09\
\x00\x28\xad\x23\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x2e\x00\x71\x00\x73\x00\x73\
\x00\x1c\
\x08\x3f\xda\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\
\x00\x64\x00\x5f\x00\x66\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0e\
\x04\xa2\xfc\xa7\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1d\
\x09\x07\x81\x07\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\
\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x07\xec\xd1\xc7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x02\x9f\x05\x87\
\x00\x72\
\x00\x69\x00\x67\x00\x68\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0c\xab\x51\x07\
\x00\x64\
\x00\x6f\x00\x77\x00\x6e\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\
\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\xc4\x6a\xa7\
\x00\x56\
\x00\x73\x00\x65\x00\x70\x00\x61\x00\x72\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x19\
\x0b\x59\x6e\x87\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x66\
\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x19\
\x08\x3e\xcc\x07\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x2d\x00\x62\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\
\x00\x2d\x00\x65\x00\x6e\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x0e\xbc\xc3\x67\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x64\x00\x69\x00\x73\
\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x0b\xc5\xd7\xc7\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x2d\x00\x76\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\x90\x94\x67\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x2d\x00\x70\x00\x72\x00\x65\x00\x73\x00\x73\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x17\
\x0f\x1e\x9b\x47\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x66\x00\x6f\x00\x63\
\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x03\x76\xc2\x07\
\x00\x71\
\x00\x75\x00\x65\x00\x73\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x0c\x65\xce\x07\
\x00\x6c\
\x00\x65\x00\x66\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\
\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x09\x65\x8e\x67\
\x00\x65\
\x00\x72\x00\x72\x00\x6f\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x08\x8c\x6a\xa7\
\x00\x48\
\x00\x73\x00\x65\x00\x70\x00\x61\x00\x72\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x1c\
\x01\xe0\x4a\x07\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\x00\x64\
\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x14\
\x06\x5e\x2c\x07\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x63\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x64\x00\x2d\x00\x6f\x00\x6e\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x04\x2d\x42\xa7\
\x00\x63\
\x00\x72\x00\x69\x00\x74\x00\x69\x00\x63\x00\x61\x00\x6c\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1f\
\x0a\xae\x27\x47\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\
\x00\x64\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x01\x00\xca\xa7\
\x00\x48\
\x00\x6d\x00\x6f\x00\x76\x00\x65\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x01\x21\xeb\x47\
\x00\x73\
\x00\x74\x00\x79\x00\x6c\x00\x65\x00\x73\x00\x68\x00\x65\x00\x65\x00\x74\x00\x2d\x00\x62\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\
\x00\x2d\x00\x6d\x00\x6f\x00\x72\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x18\
\x03\x8e\xde\x67\
\x00\x72\
\x00\x69\x00\x67\x00\x68\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\
\x00\x6c\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x16\
\x01\x75\xcc\x87\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\
\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0a\
\x05\x95\xde\x27\
\x00\x75\
\x00\x6e\x00\x64\x00\x6f\x00\x63\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x0a\xe5\x6c\x07\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x09\
\x06\x98\x83\x27\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x1a\
\x01\x87\xae\x67\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x74\x00\x65\x00\x72\x00\x6d\
\x00\x69\x00\x6e\x00\x61\x00\x74\x00\x65\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x0b\xda\x30\xa7\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x63\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0e\
\x0e\xde\xfa\xc7\
\x00\x6c\
\x00\x65\x00\x66\x00\x74\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x00\xb5\x45\xe7\
\x00\x77\
\x00\x61\x00\x72\x00\x6e\x00\x69\x00\x6e\x00\x67\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x06\x41\x40\x87\
\x00\x73\
\x00\x69\x00\x7a\x00\x65\x00\x67\x00\x72\x00\x69\x00\x70\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x01\x07\x4a\xa7\
\x00\x56\
\x00\x6d\x00\x6f\x00\x76\x00\x65\x00\x74\x00\x6f\x00\x6f\x00\x6c\x00\x62\x00\x61\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x20\
\x09\xd7\x1f\xa7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x69\x00\x6e\x00\x64\x00\x65\x00\x74\x00\x65\x00\x72\x00\x6d\
\x00\x69\x00\x6e\x00\x61\x00\x74\x00\x65\x00\x5f\x00\x66\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x0c\xe2\x68\x67\
\x00\x74\
\x00\x72\x00\x61\x00\x6e\x00\x73\x00\x70\x00\x61\x00\x72\x00\x65\x00\x6e\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x07\x8f\x9d\x27\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x6f\x00\x70\x00\x65\x00\x6e\x00\x2d\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x0f\
\x01\xf4\x81\x47\
\x00\x63\
\x00\x6c\x00\x6f\x00\x73\x00\x65\x00\x2d\x00\x68\x00\x6f\x00\x76\x00\x65\x00\x72\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x13\
\x08\xc8\x96\xe7\
\x00\x72\
\x00\x61\x00\x64\x00\x69\x00\x6f\x00\x5f\x00\x75\x00\x6e\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x2e\x00\x70\
\x00\x6e\x00\x67\
\x00\x1a\
\x05\x11\xe0\xe7\
\x00\x63\
\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x62\x00\x6f\x00\x78\x00\x5f\x00\x63\x00\x68\x00\x65\x00\x63\x00\x6b\x00\x65\x00\x64\x00\x5f\
\x00\x66\x00\x6f\x00\x63\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0c\
\x06\xe6\xe6\x67\
\x00\x75\
\x00\x70\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x15\
\x0f\xf3\xc0\x07\
\x00\x75\
\x00\x70\x00\x5f\x00\x61\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x5f\x00\x64\x00\x69\x00\x73\x00\x61\x00\x62\x00\x6c\x00\x65\x00\x64\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x06\x53\x25\xa7\
\x00\x62\
\x00\x72\x00\x61\x00\x6e\x00\x63\x00\x68\x00\x5f\x00\x6f\x00\x70\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x2b\x00\x00\x00\x04\
\x00\x00\x00\x24\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x05\xc4\x00\x00\x00\x00\x00\x01\x00\x00\x79\xdb\
\x00\x00\x04\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x6a\x46\
\x00\x00\x05\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x87\x5e\
\x00\x00\x04\x44\x00\x00\x00\x00\x00\x01\x00\x00\x6b\x26\
\x00\x00\x04\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x6c\x84\
\x00\x00\x05\x40\x00\x00\x00\x00\x00\x01\x00\x00\x76\x9c\
\x00\x00\x03\x50\x00\x00\x00\x00\x00\x01\x00\x00\x52\xcc\
\x00\x00\x06\xb8\x00\x00\x00\x00\x00\x01\x00\x00\x8b\xad\
\x00\x00\x01\x0a\x00\x00\x00\x00\x00\x01\x00\x00\x19\x57\
\x00\x00\x02\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x29\xe6\
\x00\x00\x04\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x6b\xe0\
\x00\x00\x03\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x56\x5f\
\x00\x00\x00\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x14\xcf\
\x00\x00\x07\x08\x00\x00\x00\x00\x00\x01\x00\x00\x90\xe3\
\x00\x00\x04\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x6e\x58\
\x00\x00\x05\xe0\x00\x00\x00\x00\x00\x01\x00\x00\x86\xd9\
\x00\x00\x07\x90\x00\x00\x00\x00\x00\x01\x00\x00\x94\x2c\
\x00\x00\x03\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x55\xc8\
\x00\x00\x05\x28\x00\x00\x00\x00\x00\x01\x00\x00\x74\x4e\
\x00\x00\x07\x42\x00\x00\x00\x00\x00\x01\x00\x00\x92\xe7\
\x00\x00\x06\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x8b\x13\
\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x17\x67\
\x00\x00\x01\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x1e\x3c\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x12\xe8\
\x00\x00\x03\x28\x00\x00\x00\x00\x00\x01\x00\x00\x52\x1c\
\x00\x00\x02\x62\x00\x00\x00\x00\x00\x01\x00\x00\x23\xe3\
\x00\x00\x01\x62\x00\x00\x00\x00\x00\x01\x00\x00\x1a\xa5\
\x00\x00\x06\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x8e\x07\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x15\x78\
\x00\x00\x03\x10\x00\x00\x00\x00\x00\x01\x00\x00\x40\x09\
\x00\x00\x06\x24\x00\x00\x00\x00\x00\x01\x00\x00\x88\x46\
\x00\x00\x03\xda\x00\x00\x00\x00\x00\x01\x00\x00\x68\x72\
\x00\x00\x05\x00\x00\x00\x00\x00\x00\x01\x00\x00\x70\x9e\
\x00\x00\x01\x8a\x00\x00\x00\x00\x00\x01\x00\x00\x1b\x64\
\x00\x00\x02\x34\x00\x00\x00\x00\x00\x01\x00\x00\x22\xf0\
\x00\x00\x05\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x78\x8d\
\x00\x00\x02\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x3f\x5f\
\x00\x00\x01\x2e\x00\x00\x00\x00\x00\x01\x00\x00\x19\xfb\
\x00\x00\x06\x6a\x00\x00\x00\x00\x00\x01\x00\x00\x8a\x4c\
\x00\x00\x01\xfa\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x20\
\x00\x00\x05\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x79\x31\
\x00\x00\x02\x8a\x00\x00\x00\x00\x00\x01\x00\x00\x26\x3d\
\x00\x00\x07\x60\x00\x00\x00\x00\x00\x01\x00\x00\x93\x89\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x2b\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x24\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\xc4\x00\x00\x00\x00\x00\x01\x00\x00\x79\xdb\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x04\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x6a\x46\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x87\x5e\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x04\x44\x00\x00\x00\x00\x00\x01\x00\x00\x6b\x26\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x04\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x6c\x84\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\x40\x00\x00\x00\x00\x00\x01\x00\x00\x76\x9c\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x03\x50\x00\x00\x00\x00\x00\x01\x00\x00\x52\xcc\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x06\xb8\x00\x00\x00\x00\x00\x01\x00\x00\x8b\xad\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x01\x0a\x00\x00\x00\x00\x00\x01\x00\x00\x19\x57\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x02\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x29\xe6\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x04\x7e\x00\x00\x00\x00\x00\x01\x00\x00\x6b\xe0\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x03\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x56\x5f\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x00\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x14\xcf\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x07\x08\x00\x00\x00\x00\x00\x01\x00\x00\x90\xe3\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x04\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x6e\x58\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\xe0\x00\x00\x00\x00\x00\x01\x00\x00\x86\xd9\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x07\x90\x00\x00\x00\x00\x00\x01\x00\x00\x94\x2c\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x03\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x55\xc8\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\x28\x00\x00\x00\x00\x00\x01\x00\x00\x74\x4e\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x07\x42\x00\x00\x00\x00\x00\x01\x00\x00\x92\xe7\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x06\x8e\x00\x00\x00\x00\x00\x01\x00\x00\x8b\x13\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x17\x67\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x01\xc2\x00\x00\x00\x00\x00\x01\x00\x00\x1e\x3c\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x12\xe8\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x03\x28\x00\x00\x00\x00\x00\x01\x00\x00\x52\x1c\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x02\x62\x00\x00\x00\x00\x00\x01\x00\x00\x23\xe3\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x01\x62\x00\x00\x00\x00\x00\x01\x00\x00\x1a\xa5\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x06\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x8e\x07\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x15\x78\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x03\x10\x00\x00\x00\x00\x00\x01\x00\x00\x40\x09\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x06\x24\x00\x00\x00\x00\x00\x01\x00\x00\x88\x46\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x03\xda\x00\x00\x00\x00\x00\x01\x00\x00\x68\x72\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\x00\x00\x00\x00\x00\x00\x01\x00\x00\x70\x9e\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x01\x8a\x00\x00\x00\x00\x00\x01\x00\x00\x1b\x64\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x02\x34\x00\x00\x00\x00\x00\x01\x00\x00\x22\xf0\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\x7a\x00\x00\x00\x00\x00\x01\x00\x00\x78\x8d\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x02\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x3f\x5f\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x01\x2e\x00\x00\x00\x00\x00\x01\x00\x00\x19\xfb\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x06\x6a\x00\x00\x00\x00\x00\x01\x00\x00\x8a\x4c\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x01\xfa\x00\x00\x00\x00\x00\x01\x00\x00\x1f\x20\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x05\xa2\x00\x00\x00\x00\x00\x01\x00\x00\x79\x31\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x02\x8a\x00\x00\x00\x00\x00\x01\x00\x00\x26\x3d\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
\x00\x00\x07\x60\x00\x00\x00\x00\x00\x01\x00\x00\x93\x89\
\x00\x00\x01\x52\x5b\x1d\xae\xd0\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="tv_report",
version="1.1.7",
author="Justin Dray",
author_email="[email protected]",
url="https://github.com/justin8/tv_report",
description="A parser for tv show episode files of various formats",
packages=find_packages(),
license="MIT",
install_requires=[
"colorama",
"pymediainfo",
"tqdm",
"video_utils",
],
tests_require=["nose",
"coverage",
],
test_suite="nose.collector",
entry_points={
"console_scripts": [
"tv-report=tv_report:main",
"tv_report=tv_report:main",
],
},
classifiers=[
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
],
)
|
# python cloudmesh/compute/awsboto/test_boto.py
import sys
import traceback
from pprint import pprint
import boto3
from cloudmesh.configuration.Config import Config
config = Config()
# pprint(config.data)
credentials = config['cloudmesh.cloud.awsboto.credentials']
pprint(credentials)
new_yaml_data_dict = {
'User_5': {
'Access Key Id': 2,
'Secret Access Key Id': 'user2',
'region name': 'us-west',
}
}
# update_credential(new_yaml_data_dict,filepath)
'''
credentials:
region: 'us-west-2'
EC2_SECURITY_GROUP: 'group1'
EC2_ACCESS_ID: TBD
EC2_SECRET_KEY: TBD
EC2_PRIVATE_KEY_FILE_PATH: '~/.cloudmesh/aws_cert.pem'
EC2_PRIVATE_KEY_FILE_NAME: 'aws_cert'
'''
try:
session = boto3.Session(
aws_access_key_id=credentials['EC2_ACCESS_ID'],
aws_secret_access_key=credentials['EC2_SECRET_KEY'],
region_name=credentials['region'])
except:
print("Access Key is not correct...Please try again..")
sys.exit()
if session:
pass
# ec2_instance, ec2_ids = list_ec2_instance_id(session)
# print(ec2_ids)
sys.exit()
# List all EC2 instance ID's in a particular region for a user
def list_ec2_instance_id(session):
ec2_instance = session.resource('ec2')
instance_ids = []
for each_instance in ec2_instance.instances.all():
instance_ids.append(each_instance.id)
return ec2_instance, instance_ids
# Returns status of an EC2 instance
def get_ec2_instance_status(ec2_instance):
state = []
for each in ec2_instance.instances.all():
print(each)
state.append(each.id.state)
return state
return instance.state
# Starts an EC2 instance
def start_ec2_instance(session, instance_id):
ec2 = session.client('ec2')
print(instance_id)
try:
ec2.start_instances(InstanceIds=[instance_id])
print("Instance starting..")
ec2.wait_until_running()
print("Instance started")
except Exception as ex:
traceback.print_exc()
# Stops an EC2 instance
def stop_ec2_instance(session, instance_id):
ec2 = session.client('ec2')
try:
ec2.stop_instances(InstanceIds=[instance_id])
print("Instance stopping..")
except:
print("EC2 Instance stopping failed...Try again..")
# instance_state = get_ec2_instance_status(ec2_instance)
print("currently instance state is {instance_state")
# start_ec2_instance(session,ec2_ids[0])
# stop_ec2_instance(session,ec2_ids[0])
|
#encoding:utf-8
from system.models import *
import hashlib
Customer.objects.create(
CustomerAccount = 'customer1',
CustomerName = 'customer1',
CustomerPassword = hashlib.md5('123456').hexdigest(),
CustomerTelephone = '11111111',
CustomerEmail = '[email protected]',
CustomerAddress = 'far away')
Customer.objects.create(
CustomerAccount = 'cccc',
CustomerName = 'ccc',
CustomerPassword = hashlib.md5('123456').hexdigest(),
CustomerTelephone = '11111111',
CustomerEmail = '[email protected]',
CustomerAddress = 'far away')
Seller.objects.create(
SellerAccount = 'Seller1',
SellerName = 'Seller1',
SellerPassword = hashlib.md5('123456').hexdigest(),
SellerTelephone = '11111111',
SellerEmail = '[email protected]',
SellerAddress = 'far away')
HelpCenter.objects.create(
HelpCenterName = 'hcname1',
HelpCenterContent = 'hccotent1')
Shop.objects.create(
ShopDescription = 'ShopDescription1',
SellerID = Seller.objects.get(id=1),
ShopName = 'shopname1',
ShopState = 1)
#images/1.png
Commodity.objects.create(
CommodityName = 'Commodity1',
CommodityDescription = 'This is Commodity1',
CommodityAmount = 100,
SoldAmount = 0,
PurchasePrice = 10,
SellPrice = 15,
CommodityType = 'C',
CommodityImage = 'images/1.png',
CommodityDiscount = 0.9,
ShopID = Shop.objects.get(id=1))
Administrator.objects.create(
AdministratorAccount = 'aaaddd',
AdministratorName = 'eric',
AdministratorPassword = hashlib.md5('123456').hexdigest(),
AdministratorTelephone = '12121212',
AdministratorEmail = '[email protected]')
ShopAdv.objects.create(
ShopID = Shop.objects.get(id=1),
OwnerID = Seller.objects.get(id=1),
AdvertisementContent = 'This is shopadv')
CommodityAdv.objects.create(
CommodityID = Commodity.objects.get(id=1),
OwnerID = Seller.objects.get(id=1),
AdvertisementContent = 'This is adv')
HomeShopAdv.objects.create(
ShopID = Shop.objects.get(id=1),
OwnerID = Administrator.objects.get(id=1),
AdvertisementContent = 'This is adv')
HomeCommodityAdv.objects.create(
CommodityID = Commodity.objects.get(id=1),
OwnerID = Administrator.objects.get(id=1),
AdvertisementContent = 'This is adv')
System.objects.create(
BulletinBoardContent = 'This is BulletinBoardContent',
BulletinBoardDescription = 'This is BulletinBoardDescription',
BulletinBoardDate = '2014-10-17',
ComissionRate = 0.01)
BlacklistSeller.objects.create(
BlacklistSellerReason = 'yanchou',
SellerID = Seller.objects.get(id=1),
AdministratorID = Administrator.objects.get(id=1))
Discount.objects.create(
DiscountRate = 0.9,
SellerID = Seller.objects.get(id=1),
ShopID = Shop.objects.get(id=1))
ShopOrder.objects.create(
ShopOrderState = 0,
ShopOrderDate = '2014-10-17',
ShopID = Shop.objects.get(id=1))
CustomerOrder.objects.create(
CustomerOrderState = '0',
CustomerOrderDate = '2014-10-17',
CustomerID = Customer.objects.get(id=1))
#订单状态:0-待付款,1-已付款待发货,2-待签收,
# 3-待评价,4-待退款,5-退款成功,6-卖家拒绝退款
OrderList.objects.create(
OrderListState = 0,
OrderListDate = '2014-10-17',
#SellerID = Seller.objects.get(id=2),
ShopOrderID = ShopOrder.objects.get(id=1),
CustomerOrderID = CustomerOrder.objects.get(id=1),
CommodityID = Commodity.objects.get(id=1))
Comment.objects.create(
CommentContent = 'This is comment',
CustomerID = Customer.objects.get(id=1),
CommodityID = Commodity.objects.get(id=1))
Cart.objects.create(
CartDate = '2014-10-17',
CustomerID = Customer.objects.get(id=1),
CommodityID = Commodity.objects.get(id=1),
CartCommodityAmount = 1)
Favorite.objects.create(
FavoriteDate = '2014-10-17',
CustomerID = Customer.objects.get(id=1),
CommodityID = Commodity.objects.get(id=1))
NotificationCustomer.objects.create(
NotificationContent = 'well this is..')
BlacklistCustomer.objects.create(
BlacklistCustomerReason = 'you dare',
AdministratorID = Administrator.objects.get(id=1),
CustomerID = Customer.objects.get(id=1)) |
class FTLSerializer():
def serialize(self, ast):
body = ast['body']
comment = ast['comment']
string = u''
if comment is not None:
string += self.dumpComment(comment) + u'\n\n'
for entry in body:
string += self.dumpEntry(entry)
return string
def dumpEntry(self, entry):
if entry['type'] == 'Entity':
return self.dumpEntity(entry) + u'\n'
elif entry['type'] == 'Comment':
return self.dumpComment(entry) + u'\n\n'
elif entry['type'] == 'Section':
return self.dumpSection(entry) + u'\n'
elif entry['type'] == 'JunkEntry':
return u''
else:
print(entry)
raise Exception('Unknown entry type.')
return u''
def dumpEntity(self, entity):
str = u''
if entity['comment']:
str += u'\n' + self.dumpComment(entity['comment']) + u'\n'
id = self.dumpIdentifier(entity['id'])
str += u'{} ='.format(id)
if (entity['value']):
value = self.dumpPattern(entity['value'])
str += u' {}'.format(value)
if len(entity['traits']):
traits = self.dumpMembers(entity['traits'], 2)
str += u'\n{}'.format(traits)
return str
def dumpComment(self, comment):
return u'# {}'.format(comment['content'].replace('\n', u'\n# '))
def dumpSection(self, section):
comment = u'{}\n'.format(self.dumpComment(
section['comment'])) if section['comment'] else u''
sec = self.dumpKeyword(section['key'])
str = u'\n\n{}[[ {} ]]\n\n'.format(comment, sec)
for entry in section['body']:
str += self.dumpEntry(entry)
return str
def dumpIdentifier(self, id):
return id['name']
def dumpKeyword(self, kw):
if kw['namespace']:
return u'{}/{}'.format(kw['namespace'], kw['name'])
return kw['name']
def dumpPattern(self, pattern):
if pattern is None:
return u''
str = u''
for elem in pattern['elements']:
if elem['type'] == 'TextElement':
if '\n' in elem['value']:
str += u'\n | {}'.format(
elem['value'].replace('\n', '\n | '))
else:
str += elem['value']
elif elem['type'] == 'Placeable':
str += self.dumpPlaceable(elem)
if pattern['quoted']:
return u'"{}"'.format(str)
return str
def dumpPlaceable(self, placeable):
source = u', '.join(map(self.dumpExpression, placeable['expressions']))
if source.endswith('\n'):
return u'{{ {}}}'.format(source)
return u'{{ {} }}'.format(source)
def dumpExpression(self, exp):
if exp['type'] == 'Identifier' or \
exp['type'] == 'FunctionReference' or \
exp['type'] == 'EntityReference':
return self.dumpIdentifier(exp)
if exp['type'] == 'ExternalArgument':
return u'${}'.format(self.dumpIdentifier(exp))
elif exp['type'] == 'SelectExpression':
sel = self.dumpExpression(exp['expression'])
variants = self.dumpMembers(exp['variants'], 2)
return u'{} ->\n{}\n'.format(sel, variants)
elif exp['type'] == 'CallExpression':
id = self.dumpExpression(exp['callee'])
args = self.dumpCallArgs(exp['args'])
return u'{}({})'.format(id, args)
elif exp['type'] == 'Pattern':
return self.dumpPattern(exp)
elif exp['type'] == 'Number':
return exp['value']
elif exp['type'] == 'Keyword':
return self.dumpKeyword(exp)
elif exp['type'] == 'MemberExpression':
obj = self.dumpExpression(exp['object'])
key = self.dumpExpression(exp['keyword'])
return u'{}[{}]'.format(obj, key)
def dumpCallArgs(self, args):
return u', '.join(map(
lambda arg:
u'{}: {}'.format(arg['name'],
self.dumpExpression(arg['value']))
if arg['type'] == 'KeyValueArg' else self.dumpExpression(arg),
args))
def dumpMembers(self, members, indent):
return u'\n'.join(map(lambda member: u'{}[{}] {}'.format(
u' ' * (indent - 1) + u'*' if member['default'] else u' ' * indent,
self.dumpExpression(member['key']),
self.dumpPattern(member['value'])
), members))
|
import math
class sample:
def __init__(self,a=[],r=1):
self.dat = a
self.rate = r
def __getitem__(self,i):#no interpolation
return self.dat[int(i*self.rate)]
def __len__(self):
return len(self.dat)/self.rate
class linterpSample(sample):
def __getitem__(self,i):
if math.ceil(i*self.rate) < len(self.dat):
a = i*self.rate-math.floor(i*self.rate)
return self.dat[math.floor(i*self.rate)]*(1-a)+self.dat[math.ceil(i*self.rate)]*a
return self.dat[-1]
table_rate = 256
table_size = table_rate**2
sampled_step = sample([0 for i in range(table_size)],table_rate)
for i in range(1,len(sampled_step.dat)):
sampled_step.dat[i] = sampled_step.dat[i-1]+math.sin(i*math.pi/table_rate)/(i*math.pi/table_rate)/table_rate
def getSampledStep(s):
if s < 0:
return -getSampledStep(-s)
if s >= sampled_step.__len__():
return 1
else:
return sampled_step[s]
sinTable = sample([math.sin(i*math.pi/table_size/2) for i in range(table_size+1)],table_size*4)
def sin2π(x):
x = x%1
if x > .5:
return -sin2π(x-.5)
if x > .25:
return sinTable[.5-x]
return sinTable[x]
class square:
def __init__(self,f,w=.5,p=0,sr=48000):
self.phase = p
self.sample_frequency = f/sr
self.sample_rate = sr
self.delta = 1
self.width = w
self.method_switch = 1/2
def freq(self,f=None):
if f == None:
return self.sample_frequency*self.sample_rate*self.delta
else:
self.sample_frequency = f/self.sample_rate/self.delta
def hi(self,d=1):
self.sample_frequency *= self.delta/d
self.delta = d
def __next__(self):
p = self.phase
self.phase = (self.phase + self.sample_frequency*self.delta)%1
if self.sample_frequency > self.method_switch:
#forier form (for high frequency waves):
f = self.sample_frequency
i = 1
tot = 0
while f <= .5:
tot += 2/i/math.pi*sin2π(i*self.width/2)*sin2π(p)
i += 1
f += self.sample_frequency
return tot
else:
#sinc-filter table form
#phase in samples
sp = p/self.sample_frequency
return getSampledStep(sp)\
+getSampledStep(self.width/self.sample_frequency-sp)
#return -getSampledStep(sp-self.width/self.sample_frequency)
# -getSampledStep(1/self.sample_frequency-sp)
def __iter__(self):
while 1:
yield next(self)
|
def menu(parent_id=0, menutree=None):
menutree = menutree or []
cur = g.db.execute('select id, parent, alias, title, ord from static where parent="'+ str(parent_id) +'" and ord>0 order by ord')
fetch = cur.fetchall()
if not fetch:
return None
return [{'id':raw[0], 'parent':raw[1], 'alias':raw[2], 'title':raw[3], 'sub':menu(raw[0])} for raw in fetch]
create table static (
id integer primary key autoincrement,
parent integer,
alias string not null,
title string not null,
text string not null,
ord integer
);
@app.route('/')
def index():
menu_list = menu()
[...]
return render_template('index.tpl', **locals())
<nav role="navigation">
{% for menu in menu_list %}
<li>
<a{% if page_id == menu.id %} class="active"{% endif %} href="/{{ menu.alias }}">{{ menu.title }}</a>
{% if menu.sub %}
<ul>
{% for sub in menu.sub %}
<li><a href="/{{ menu.alias }}/{{ sub.alias }}">{{ sub.title }}</a>
{% if sub.sub %}
<ul>
{% for subsub in sub.sub %}
<li><a href="/{{ menu.alias }}/{{ sub.alias }}/{{ subsub.alias }}">{{ subsub.title }}</a>
{% endfor %}
</ul>
{% endif %}
</li>
{% endfor %}
</ul>
{% endif %}
</li>
{% endfor %}
</nav>
{% block navbar %}
<ul>
{% for item in nav.top %}
<li class="{{ 'active' if item.is_active else '' }}">
{{ item }}
{% if item.items %}
<ul>
{% for child in item.items %}
<li class="{{ 'active' if child.is_active else '' }}">
{{ child }}
</li>
{% endfor %}
</ul>
{% endif %}
</li>
{% endfor %}
</ul>
{% endblock %}
nav.Bar('top', [
nav.Item('Home', 'index'),
nav.Item('Latest News', 'news', {'page': 1}),
nav.Item('Nestable', 'nestable', items=[
nav.Item('Nested 1', 'nested-1'),
nav.Item('Nested 2', 'nested-2'),
]),
])
# taken from https://github.com/tonyseek/flask-navigation
# Copyright (c) 2014 Jiangge Zhang
# MIT License
import collections
class Item(object):
"""The navigation item object.
:param label: the display label of this navigation item.
:param endpoint: the unique name of this navigation item.
"""
def __init__(self, label, endpoint, args=None, url=None, html_attrs=None, items=None):
self.label = label
self.endpoint = endpoint
self._args = args
self._url = url
self.html_attrs = {} if html_attrs is None else html_attrs
self.items = ItemCollection(items or None)
@property
def args(self):
"""The arguments which will be passed to ``url_for``.
:type: :class:`dict`
"""
if self._args is None:
return {}
if callable(self._args):
return dict(self._args())
return dict(self._args)
@property
def ident(self):
"""The identity of this item.
:type: :class:`~flask.ext.navigation.Navigation.ItemReference`
"""
return ItemReference(self.endpoint, self.args)
class ItemCollection(collections.MutableSequence, collections.Iterable):
"""The collection of navigation items.
"""
def __init__(self, iterable=None):
#: the item collection
self._items = []
#: the mapping collection of endpoint -> item
self._items_mapping = {}
#: initial extending
self.extend(iterable or [])
def __repr__(self):
return 'ItemCollection(%r)' % self._items
def __getitem__(self, index):
if isinstance(index, int):
return self._items[index]
if isinstance(index, tuple):
endpoint, args = index
else:
endpoint, args = index, {}
ident = ItemReference(endpoint, args)
return self._items_mapping[ident]
def __setitem__(self, index, item):
# remove the old reference
old_item = self._items[index]
del self._items_mapping[old_item.ident]
self._items[index] = item
self._items_mapping[item.ident] = item
def __delitem__(self, index):
item = self[index]
del self._items[index]
del self._items_mapping[item.ident]
def __len__(self):
return len(self._items)
def __iter__(self):
return iter(self._items)
def insert(self, index, item):
self._items.insert(index, item)
self._items_mapping[item.ident] = item
class ItemReference(collections.namedtuple('ItemReference', 'endpoint args')):
"""The identity tuple of navigation item.
:param endpoint: the endpoint of view function.
:type endpoint: ``str``
:param args: the arguments of view function.
:type args: ``dict``
"""
def __new__(cls, endpoint, args=()):
if isinstance(args, dict):
args = freeze_dict(args)
return super(cls, ItemReference).__new__(cls, endpoint, args) |
# coding:utf-8
"""
[email protected]
package A : pack num, pack length
package B : pack lenght, pack payload
Client Send pA, get ack, then send pB, Server give ack
python ttcp.py -m server
python ttcp.py -m client
"""
import socket
import time
import struct
import sys
import argparse
INT_LEN = struct.calcsize('i')
def read_in(sock, n):
data = ''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
def upack_ack(ack):
num = None
try:
num = int(struct.unpack('i', ack)[0])
except Exception, e:
print 'parse ack error', ack, e
return num
def transmit(host, port, num, length):
payload = '0' * length
total_mb = 1.0 * length * num / 1024 / 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
print 'connected to {0}:{1} ...'.format(host, port)
start_time = time.time()
#可以直接做成一个msg, 也可以分2次发送
hello = struct.pack('>ii', num, length)
s.send(hello)
print 'message num ==>', num
print 'message length ==>', length
print 'all message {0} MB'.format(total_mb)
msg = struct.pack('>i', length) + payload
for i in xrange(num):
s.sendall(msg)
ack = read_in(s, INT_LEN)
if upack_ack(ack) != length:
print 'ack error'
sys.exit(1)
s.close()
total_time = time.time()-start_time
print '%.3f seconds \n%.3f MiB/s' % (total_time, total_mb/total_time)
def receive(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(5)
print 'listen on {0}:{1}'.format(host, port)
while 1:
conn, client_address = sock.accept()
print 'client from {0}'.format(client_address)
hello = read_in(conn, 8)
num, length = struct.unpack('>ii', hello)
print 'message num ==>', num
print 'message length ==>', length
start_time = time.time()
recv_length = 0
for i in xrange(num):
msg_len = struct.unpack('>i', read_in(conn, INT_LEN))[0]
# print 'payload length', msg_len
data = read_in(conn, msg_len)
recv_length += len(data)
conn.sendall(struct.pack('i', msg_len))
conn.close()
total_mb = 1.0 * recv_length / 1024 / 1024
total_time = time.time()-start_time
print 'all messages is %s MB' % total_mb
print '%.3f seconds \n%.3f MiB/s \n' % (total_time, total_mb/total_time)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple TTCP tools')
parser.add_argument('--host', default='127.0.0.1', help="hostname")
parser.add_argument('--port', type=int, default=5001, help="port")
parser.add_argument('-n', type=int, default=1000, help="message's number")
parser.add_argument('-l', type=int, default=1024, help="message's length")
parser.add_argument('-m', choices=['server', 'client'], required=True,
help="server or client")
args = parser.parse_args()
if args.m == 'server':
receive(host=args.host, port=args.port)
else:
transmit(args.host, args.port, args.n, args.l)
|
import platform
from setuptools import setup, find_packages
from sys import argv
import setuptools
TORCH_MAJOR = 1
TORCH_MINOR = 3
import pprint
extra_compile_args = []
if platform.system() != 'Windows':
extra_compile_args += ['-Wno-unused-variable']
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
extra_compile_args += ['-DVERSION_GE_1_3']
ext_modules = [
setuptools.Extension('torch_scatter.scatter_cpu', ['cpu/scatter.cpp'],
extra_compile_args=extra_compile_args)
]
def my_build_ext(pars):
# import delayed:
from setuptools.command.build_ext import build_ext as _build_ext#
# include_dirs adjusted:
class build_ext(_build_ext):
def __init__(self, *args, **kwargs):
print(args, kwargs)
self.ARGS = args
self.KWARGS = kwargs
super().__init__(*args, **kwargs)
def finalize_options(self):
_build_ext.finalize_options(self)
pprint.pprint(self.__dict__)
# Prevent numpy from thinking it is still in its setup process:
# print(__builtins__.__dict__)
__builtins__.__TORCH_SETUP__ = False
import torch.utils.cpp_extension
import importlib
importlib.reload(torch)
# print(torch.utils.cpp_extension)
# print(dir(torch.utils))
extensions = self.extensions
a = torch.utils.cpp_extension.BuildExtension(*self.ARGS, **self.KWARGS)
# self.__dict__.update(a.__dict__)
# self.extensions = extensions
pprint.pprint(a.__dict__)
from torch.utils.cpp_extension import CppExtension
b = CppExtension('torch_scatter.scatter_cpu', ['cpu/scatter.cpp'],
extra_compile_args=extra_compile_args)
self.include_dirs += b.include_dirs
self.language = b.language
pprint.pprint(self.__dict__)
return build_ext(pars)
cmdclass = {'build_ext': my_build_ext}
GPU = False
for arg in argv:
if arg == '--cpu':
GPU = False
argv.remove(arg)
# if CUDA_HOME is not None and GPU:
# ext_modules += [
# CUDAExtension('torch_scatter.scatter_cuda',
# ['cuda/scatter.cpp', 'cuda/scatter_kernel.cu'])
# ]
__version__ = '1.4.0'
url = 'https://github.com/rusty1s/pytorch_scatter'
install_requires = ['torch']
setup_requires = ['torch==1.3.1', 'pytest-runner']
tests_require = ['pytest', 'pytest-cov']
print(ext_modules)
setup(
name='torch_scatter',
version=__version__,
description='PyTorch Extension Library of Optimized Scatter Operations',
author='Matthias Fey',
author_email='[email protected]',
url=url,
download_url='{}/archive/{}.tar.gz'.format(url, __version__),
keywords=[
'pytorch',
'scatter',
],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=find_packages(),
)
|
import numpy as np
import matplotlib.pyplot as plt
def plot2D(array, cmap='viridis'):
plt.figure(0)
plt.imshow(np.repeat(array, repeats=5, axis=0), cmap=cmap)
plt.show()
def plot3D(array, n_cols=3, cmap='viridis'):
plt.figure(0)
n, h, w = array.shape
n_cols = min(n, n_cols)
n_rows = int(np.ceil(n/n_cols))
for i in range(n_rows):
for j in range(n_cols):
if i*n_cols+j < n:
plt.subplot2grid((n_rows, n_cols), (i, j))
plt.imshow(array[i*n_cols+j].reshape((h, w)), cmap=cmap)
plt.show()
def plot(array, n_cols=3, cmap='viridis'):
if len(array.shape) == 2:
plot2D(array, cmap=cmap)
elif len(array.shape) == 3:
plot3D(array, n_cols, cmap=cmap) |
# -*- coding: utf-8 -*-
"""
s3://sanhe-aws-athena-practice/data/db_learn_athena/
"""
import boto3
from .config import aws_profile, bucket_name, dbname
ses = boto3.Session(profile_name=aws_profile)
s3 = ses.resource("s3")
bucket = s3.Bucket(bucket_name)
if bucket.creation_date is None:
s3.create_bucket(Bucket=bucket_name)
bucket = s3.Bucket(bucket_name)
root_uri = f"s3://{bucket_name}/data/{dbname}"
result_uri = f"s3://{bucket_name}/result"
athena = ses.client("athena")
def get_data_key(dataset_name):
return f"data/{dbname}/tb_{dataset_name}"
def get_data_uri(dataset_name):
return f"s3://{bucket_name}/data/{dbname}/tb_{dataset_name}/"
def run_query(sql):
from pyathena import connect
cursor = connect(
profile_name=aws_profile,
s3_staging_dir=result_uri,
schema_name="learn_athena",
).cursor()
cursor.execute(sql)
for row in cursor.fetchall():
print(row)
|
def KMPSearch(pat, txt):
pat_len = len(pat)
txt_len = len(txt)
lps = [0] * pat_len
pat_index = 0
computeLPSArray(pat, pat_len, lps)
txt_index = 0
while txt_index < txt_len:
if pat[pat_index] == txt[txt_index]:
txt_index += 1
pat_index += 1
if pat_index == pat_len:
print ("Found pattern at index " + str(txt_index - pat_index))
pat_index = lps[pat_index - 1]
# mismatch after pat_index matches
elif txt_index < txt_len and pat[pat_index] != txt[txt_index]:
# Do not match lps[0..lps[pat_index - 1]] characters,
# they will match anyway
if pat_index != 0:
pat_index = lps[pat_index - 1]
else:
txt_index += 1
def computeLPSArray(pat, pat_len, lps):
len = 0 # length of the previous longest prefix suffix
lps[0]
now = 1
while now < pat_len:
# If letters match, now and len plus 1, otherwise only now plus 1.
if pat[now] == pat[len]:
len += 1
lps[now] = len
now += 1
else:
#
if len != 0:
len = lps[len - 1]
# From the back to the first, all of letters are different.
else:
lps[now] = 0
now += 1
txt = "ABABDABACDABABCABAB"
pat = "ABABCABAB"
KMPSearch(pat, txt)
txt = "THIS IS A TEST TEXT"
pat = "TEST"
KMPSearch(pat, txt)
txt = "AABAACAADAABAABA"
pat = "AABA"
KMPSearch(pat, txt) |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 7 14:56:22 2020
@author: sj
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import rscls
colors = ['#000000','#CACACA', '#02FF00', '#00FFFF', '#088505', '#FF00FE', '#AA562E', '#8C0085', '#FD0000', '#FFFF00']
cmap = ListedColormap(colors)
#0000FF, #228B22, #7BFC00, #FF0000, #724A12, #C0C0C0, #00FFFF, #FF8000, #FFFF00
def save_cmap_hk(img, cmap, fname):
colors = ['#000000','#008000','#808080','#FFF700','#0290DE','#EDC9Af','#F3F2E7']
cmap = ListedColormap(colors)
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, cmap=cmap, vmin=0, vmax=6)
plt.savefig(fname, dpi = height)
plt.close()
def save_cmap_pc(img, cmap, fname):
colors = ['#000000','#0000FF','#228B22','#7BFC00', '#FF0000', '#724A12', '#C0C0C0',
'#00FFFF', '#FF8000', '#FFFF00']
cmap = ListedColormap(colors)
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, cmap=cmap, vmin=0, vmax=9)
plt.savefig(fname, dpi = height)
plt.close()
def save_cmap_salinas16(img,cmap,fname):
colors = ['#000000','#DCB809','#03009A','#FE0000','#FF349B','#FF66FF',
'#0000FD','#EC8101','#00FF00','#838300','#990099','#00F7F1',
'#009999','#009900','#8A5E2D','#67FECB','#F6EF00']
cmap = ListedColormap(colors)
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, cmap=cmap, vmin=0, vmax=16)
plt.savefig(fname, dpi = height)
plt.close()
def save_cmap_indian16(img,cmap,fname):
colors = ['#000000','#FFFC86','#0037F3','#FF5D00','#00FB84','#FF3AFC',
'#4A32FF','#00ADFF','#00FA00','#AEAD51','#A2549E','#54B0FF',
'#375B70','#65BD3C','#8F462C','#6CFCAB','#FFFC00']
cmap = ListedColormap(colors)
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, cmap=cmap, vmin=0, vmax=16)
plt.savefig(fname, dpi = height)
plt.close()
def save_cmap_pu9(img, cmap, fname):
colors = ['#000000','#CACACA','#02FF00','#00FFFF','#088505','#FF00FE','#AA562E','#8C0085','#FD0000', '#FFFF00']
cmap = ListedColormap(colors)
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img, cmap=cmap, vmin=0, vmax=9)
plt.savefig(fname, dpi = height)
plt.close()
def save_im(img,fname):
sizes = np.shape(img)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width/height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(img)
plt.savefig(fname, dpi = height)
plt.close()
#save_im(rscls.strimg255(im[:,:,[50,34,20]],5),'indian_im')
#plt.imshow(rscls.strimg255(im[:,:,[50,34,20]],5))
#save_cmap(pre1,cmap,'a')
|
# pip install pygame
import pygame
# Import random for random numbers
import random
# pip install pyserial
import serial
import json
# change the port as necessary by your OS
ser = serial.Serial('/dev/cu.usbserial-02301AC2', 9600)
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
K_ESCAPE,
KEYDOWN,
QUIT,
)
# initialization
pygame.init()
pygame.mixer.init()
# play music on loop
pygame.mixer.music.load('alexander-nakarada-space-ambience.mp3')
pygame.mixer.music.play(-1)
# -- PLAYER CLASS -- #
class Player(pygame.sprite.Sprite):
def __init__(self):
super(Player, self).__init__()
self.surf = pygame.Surface((75, 25))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect()
# Move the sprite based on hardware movement
def update(self, movement):
paused = 0
vY = int(movement['vY']) * (-1)
vX = int(movement['vX'])
button = int(movement['button'])
# pause if button pressed
if (button == 0):
paused = 1
return paused
# move player up/down
if vY:
self.rect.move_ip(0, vY)
# move player right/left
if vX:
self.rect.move_ip(vX, 0)
# Keep player on the screen
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > SCREEN_WIDTH:
self.rect.right = SCREEN_WIDTH
if self.rect.top <= 0:
self.rect.top = 0
if self.rect.bottom >= SCREEN_HEIGHT:
self.rect.bottom = SCREEN_HEIGHT
# returns status of whether or not game is paused
return paused
# -- ENEMY CLASS -- #
class Enemy(pygame.sprite.Sprite):
def __init__(self):
super(Enemy, self).__init__()
self.surf = pygame.Surface((20, 10))
self.surf.fill((255, 255, 255))
self.rect = self.surf.get_rect(
center=(
random.randint(SCREEN_WIDTH + 20, SCREEN_WIDTH + 100),
random.randint(0, SCREEN_HEIGHT),
)
)
self.speed = random.randint(5, 20)
# Move the sprite based on speed
# Remove the sprite when it passes the left edge of the screen
def update(self, movement):
# change direction and speed of enemies
dial = int(movement['dial'])
if dial:
self.rect.move_ip(-(self.speed+(dial)), 0)
else:
self.rect.move_ip(-self.speed, 0)
if self.rect.right < 0:
self.kill()
# Define constants for the screen width and height
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
# Set screen size
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
# Custom event for adding a new enemy
ADDENEMY = pygame.USEREVENT + 1
pygame.time.set_timer(ADDENEMY, 250)
# Instantiate player
player = Player()
# Create groups to hold enemy sprites and all sprites
# - enemies is used for collision detection and position updates
# - all_sprites is used for rendering
enemies = pygame.sprite.Group()
all_sprites = pygame.sprite.Group()
all_sprites.add(player)
# Run until the user asks to quit
running = True
while running:
# Get current movement of hardware
string = str(ser.readline().strip(), 'ascii')
# Prints hardware input to console
print()
print(string)
print()
# fix bad inputs
if (len(string) == 0):
continue
while (string[0] != '{' and string[0] != '}'):
string = string[1:]
continue
if (string[0] == '}'):
continue
input = json.loads(string)
button = input['button']
dial = input['dial']
vY = input['vY']
vX = input['vX']
switch = input['switch']
# Check if user clicked the window close button
for event in pygame.event.get():
# check if user hit a key
if event.type == KEYDOWN:
# Was it the Escape key? If so, stop the loop.
if event.key == K_ESCAPE:
running = False
elif event.type == pygame.QUIT:
running = False
elif event.type == ADDENEMY:
# Create the new enemy and add it to sprite groups
new_enemy = Enemy()
enemies.add(new_enemy)
all_sprites.add(new_enemy)
# Update player position based on hardware input
paused = player.update(input)
# Pause game if button is pressed
if (paused == 1):
continue
# Update enemy position and speed based on hardware input
enemies.update(input)
# Fill the screen with black
screen.fill((0, 0, 0))
# Draw all sprites
for entity in all_sprites:
screen.blit(entity.surf, entity.rect)
# Used for testing purposes
# - Set invincible to 1 to turn make player unable to die
invincible = 0
# Check if any enemies have collided with the player
if (pygame.sprite.spritecollideany(player, enemies) and invincible == 0):
# If so, then remove the player and stop the loop
running = False
# kill all sprites
for entity in all_sprites:
entity.kill()
# List of end quotes
quotes = ["So much universe and so little time.",
"The Earth is the cradle of humanity, but mankind cannot stay in the cradle forever.",
"Space exploration is a force of nature unto itself that no other force in society can rival.",
"Exploration is not a choice really; it's an imperative.",
"Remember to look up at the stars and not down at your feet.",
"When I first looked back at the Earth, standing on the Moon, I cried.",
"To confine our attention to terrestial matters would be to limit the human spirit.",
"I didn't feel like a giant. I felt very, very small."]
# List of corresponding end quote citations
citations = ["-Terry Pratchett",
"-Konstantin Tsiolkovsky",
"-Neil deGrasse Tyson",
"-Michael Collins, Apollo 11 Astronaut",
"-Stephen Hawking",
"-Alan Shepard",
"-Stephen Hawking",
"-Neil Armstrong"]
# random selection of end quote and corresponding citation
selection = random.randint(0, len(quotes)-1)
# size based on selection
size = 22
if selection == 1 or selection == 2 or selection == 6:
size = 18
# draw end quote onto screen
font = pygame.font.Font('freesansbold.ttf', size)
text_surface1 = font.render(quotes[selection], True, (0, 255, 255))
text_surface2 = font.render(citations[selection], True, (0, 255, 255))
text_rect1 = text_surface1.get_rect()
text_rect2 = text_surface2.get_rect()
text_rect1.midtop = (SCREEN_WIDTH/2, SCREEN_HEIGHT/2-20)
text_rect2.midtop = (SCREEN_WIDTH/2, SCREEN_HEIGHT/2+15)
screen.blit(text_surface1, text_rect1)
screen.blit(text_surface2, text_rect2)
pygame.display.flip()
# wait for 6 seconds before quitting
pygame.time.wait(6000)
# Update the display
pygame.display.flip()
pygame.quit()
|
"""Corpus Loaders"""
from typing import Optional
from speechcorpusy.interface import AbstractCorpus, ConfCorpus
from speechcorpusy import presets
def load_preset(
name: str,
root: Optional[str] = None,
download: Optional[bool] = None,
conf: Optional[ConfCorpus] = None,
) -> AbstractCorpus:
"""Load preset corpus.
Args:
name: Preset corpus name
root: Adress of the directory under which the corpus archive is found or downloaded
download: Whether to download original corpus if it is not found in `root`
conf: (Advanced) Corpus configuration containing both `root` and `download`
"""
# Design Notes:
# ConfCorpus is verbose wrapper, but useful when used with config manager.
# For both purpose, we provide both way.
# As a result, loader become dirty, but it is good for user.
# Check config inconsistency
# Both `root` and `conf` are provided, but different value
if root and conf and (root is not conf.root):
raise Exception(f"'root' and 'conf.root' is inconsistent: {root} vs {conf.root}")
# Both `download` and `conf` are provided, but different value
if (download is not None) and conf and (download is not conf.download):
msg = f"'download' and 'conf.download' is inconsistent: {download} vs {conf.download}"
raise Exception(msg)
checked_conf = conf or ConfCorpus(root, download or False)
# Load corpus safely
if name in presets.corpus_list:
corpus_cls: AbstractCorpus = getattr(presets, name)
corpus = corpus_cls(checked_conf)
else:
msg1 = f"Corpus '{name}' is not supported by 'speechcurpusy'. "
msg2 = f"Supported presets: {presets.corpus_list}"
raise Exception(msg1+msg2)
return corpus
|
# RAFT's main model class
import os
import os.path as osp
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import moorpy as mp
import pyhams.pyhams as ph
import raft.member2pnl as pnl
import raft.raft_fowt as fowt
from raft.helpers import *
#import F6T1RNA as structural # import turbine structural model functions
# reload the libraries each time in case we make any changes
from importlib import reload
mp = reload(mp)
ph = reload(ph)
pnl = reload(pnl)
FOWT = reload(fowt).FOWT
class Model():
def __init__(self, design, BEM=None, nTurbines=1, w=[], depth=300):
'''
Empty frequency domain model initialization function
design : dict
Dictionary of all the design info from turbine to platform to moorings
nTurbines
could in future be used to set up any number of identical turbines
'''
self.fowtList = []
self.coords = []
self.nDOF = 0 # number of DOFs in system
# ----- process turbine information -----------------------------------------
# No processing actually needed yet - we pass the dictionary directly to RAFT.
# ----- process platform information ----------------------------------------
# No processing actually needed yet - we pass the dictionary directly to RAFT.
# ----- process mooring information ----------------------------------------------
self.ms = mp.System()
self.ms.parseYAML(design['mooring'])
self.potModMaster = getFromDict(design, 'potModMaster', dtype=int, default=0)
self.dlsMax = getFromDict(design, 'dlsMax', default=5.0)
for mi in design['platform']['members']:
mi['dlsMax'] = self.dlsMax
if self.potModMaster==1:
mi['potMod'] = False
elif self.potModMaster==2:
mi['potMod'] = True
design['turbine']['tower']['dlsMax'] = self.dlsMax
self.XiStart = getFromDict(design, 'XiStart', default=0.1)
self.nIter = getFromDict(design, 'nIter', default=15)
self.depth = depth
# If you're modeling OC3 spar, for example, import the manual yaw stiffness needed by the bridle config
if 'yaw stiffness' in design['turbine']:
self.yawstiff = design['turbine']['yaw stiffness']
else:
self.yawstiff = 0
# analysis frequency array
if len(w)==0:
w = np.arange(.05, 3, 0.05) # angular frequencies tp analyze (rad/s)
self.w = np.array(w)
self.nw = len(w) # number of frequencies
self.k = np.zeros(self.nw) # wave number
for i in range(self.nw):
self.k[i] = waveNumber(self.w[i], self.depth)
# set up the FOWT here <<< only set for 1 FOWT for now <<<
self.fowtList.append(FOWT(design, w=self.w, mpb=self.ms.bodyList[0], depth=depth))
self.coords.append([0.0,0.0])
self.nDOF += 6
self.ms.bodyList[0].type = -1 # need to make sure it's set to a coupled type
self.ms.initialize() # reinitialize the mooring system to ensure all things are tallied properly etc.
self.results = {} # dictionary to hold all results from the model
def addFOWT(self, fowt, xy0=[0,0]):
'''adds an already set up FOWT to the frequency domain model solver.'''
self.fowtList.append(fowt)
self.coords.append(xy0)
self.nDOF += 6
# would potentially need to add a mooring system body for it too <<<
def setEnv(self, Hs=8, Tp=12, spectrum='unit', V=10, beta=0, Fthrust=0):
self.env = Env()
self.env.Hs = Hs
self.env.Tp = Tp
self.env.spectrum = spectrum
self.env.V = V
self.env.beta = beta
self.Fthrust = Fthrust
for fowt in self.fowtList:
fowt.setEnv(Hs=Hs, Tp=Tp, V=V, spectrum=spectrum, beta=beta, Fthrust=Fthrust)
def calcSystemProps(self):
'''This gets the various static/constant calculations of each FOWT done.'''
for fowt in self.fowtList:
fowt.calcBEM()
fowt.calcStatics()
fowt.calcHydroConstants()
#fowt.calcDynamicConstants()
## First get mooring system characteristics about undisplaced platform position (useful for baseline and verification)
self.C_moor0 = self.ms.getCoupledStiffness(lines_only=True) # this method accounts for eqiuilibrium of free objects in the system
self.F_moor0 = self.ms.getForces(DOFtype="coupled", lines_only=True)
self.results['properties'] = {} # signal this data is available by adding a section to the results dictionary
def calcMooringAndOffsets(self):
'''Calculates mean offsets and linearized mooring properties for the current load case.
setEnv and calcSystemProps must be called first. This will ultimately become a method for solving mean operating point.
'''
# Now find static equilibrium offsets of platform and get mooring properties about that point
# (This assumes some loads have been applied)
#self.ms.display=2
self.ms.solveEquilibrium3(DOFtype="both", tol=-0.01) #, rmsTol=1.0E-5) # get the system to its equilibrium
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
# range of DOFs for the current turbine
i1 = 0
i2 = 6
print("Equilibrium'3' platform positions/rotations:")
printVec(self.ms.bodyList[0].r6)
r6eq = self.ms.bodyList[0].r6
#self.ms.plot()
print("Surge: {:.2f}".format(r6eq[0]))
print("Pitch: {:.2f}".format(r6eq[4]*180/np.pi))
C_moor = self.ms.getCoupledStiffness(lines_only=True)
F_moor = self.ms.getForces(DOFtype="coupled", lines_only=True) # get net forces and moments from mooring lines on Body
# manually add yaw spring stiffness as compensation until bridle (crow foot) configuration is added
C_moor[5,5] += self.yawstiff
self.C_moor = C_moor
self.F_moor = F_moor
# store results
self.results['means'] = {} # signal this data is available by adding a section to the results dictionary
self.results['means']['platform offset' ] = r6eq
self.results['means']['mooring force' ] = F_moor
#self.results['means']['fairlead tensions'] = ... # <<<
def solveEigen(self):
'''finds natural frequencies of system'''
# total system coefficient arrays
M_tot = np.zeros([self.nDOF,self.nDOF]) # total mass and added mass matrix [kg, kg-m, kg-m^2]
C_tot = np.zeros([self.nDOF,self.nDOF]) # total stiffness matrix [N/m, N, N-m]
# add in mooring stiffness from MoorPy system
C_tot = np.array(self.C_moor0)
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
# range of DOFs for the current turbine
i1 = 0
i2 = 6
# add fowt's terms to system matrices (BEM arrays are not yet included here)
M_tot[i1:i2] += fowt.M_struc + fowt.A_hydro_morison # mass
C_tot[i1:i2] += fowt.C_struc + fowt.C_hydro # stiffness
# calculate natural frequencies (using eigen analysis to get proper values for pitch and roll - otherwise would need to base about CG if using diagonal entries only)
eigenvals, eigenvectors = np.linalg.eig(np.matmul(np.linalg.inv(M_tot), C_tot)) # <<< need to sort this out so it gives desired modes, some are currently a bit messy
# sort to normal DOF order based on which DOF is largest in each eigenvector
ind_list = []
for i in range(5,-1, -1):
vec = np.abs(eigenvectors[i,:]) # look at each row (DOF) at a time (use reverse order to pick out rotational DOFs first)
for j in range(6): # now do another loop in case the index was claimed previously
ind = np.argmax(vec) # find the index of the vector with the largest value of the current DOF
if ind in ind_list: # if a previous vector claimed this DOF, set it to zero in this vector so that we look at the other vectors
vec[ind] = 0.0
else:
ind_list.append(ind) # if it hasn't been claimed before, assign this vector to the DOF
break
ind_list.reverse() # reverse the index list since we made it in reverse order
fns = np.sqrt(eigenvals[ind_list])/2.0/np.pi # apply sorting to eigenvalues and convert to natural frequency in Hz
modes = eigenvectors[:,ind_list] # apply sorting to eigenvectors
print("natural frequencies from eigen values")
printVec(fns)
print("mode shapes from eigen values")
printMat(modes)
# alternative attempt to calculate natural frequencies based on diagonal entries (and taking pitch and roll about CG)
if C_tot[0,0] == 0.0:
zMoorx = 0.0
else:
zMoorx = C_tot[0,4]/C_tot[0,0] # effective z elevation of mooring system reaction forces in x and y directions
if C_tot[1,1] == 0.0:
zMoory = 0.0
else:
zMoory = C_tot[1,3]/C_tot[1,1]
zCG = fowt.rCG_TOT[2] # center of mass in z
zCMx = M_tot[0,4]/M_tot[0,0] # effective z elevation of center of mass and added mass in x and y directions
zCMy = M_tot[1,3]/M_tot[1,1]
print("natural frequencies with added mass")
fn = np.zeros(6)
fn[0] = np.sqrt( C_tot[0,0] / M_tot[0,0] )/ 2.0/np.pi
fn[1] = np.sqrt( C_tot[1,1] / M_tot[1,1] )/ 2.0/np.pi
fn[2] = np.sqrt( C_tot[2,2] / M_tot[2,2] )/ 2.0/np.pi
fn[5] = np.sqrt( C_tot[5,5] / M_tot[5,5] )/ 2.0/np.pi
fn[3] = np.sqrt( (C_tot[3,3] + C_tot[1,1]*((zCMy-zMoory)**2 - zMoory**2) ) / (M_tot[3,3] - M_tot[1,1]*zCMy**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
fn[4] = np.sqrt( (C_tot[4,4] + C_tot[0,0]*((zCMx-zMoorx)**2 - zMoorx**2) ) / (M_tot[4,4] - M_tot[0,0]*zCMx**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
# note that the above lines use off-diagonal term rather than parallel axis theorem since rotation will not be exactly at CG due to effect of added mass
printVec(fn)
# store results
self.results['eigen'] = {} # signal this data is available by adding a section to the results dictionary
self.results['eigen']['frequencies'] = fns
self.results['eigen']['modes' ] = modes
def solveDynamics(self, tol=0.01, conv_plot=1, RAO_plot=1):
'''After all constant parts have been computed, call this to iterate through remaining terms
until convergence on dynamic response. Note that steady/mean quantities are excluded here.
nIter = 2 # maximum number of iterations to allow
'''
nIter = int(self.nIter) + 1 # maybe think of a better name for the first nIter
XiStart = self.XiStart
# total system complex response amplitudes (this gets updated each iteration)
XiLast = np.zeros([self.nDOF,self.nw], dtype=complex) + XiStart # displacement and rotation complex amplitudes [m, rad]
if conv_plot:
fig, ax = plt.subplots(3,1,sharex=True)
c = np.arange(nIter+1) # adding 1 again here so that there are no RuntimeErrors
c = cm.jet((c-np.min(c))/(np.max(c)-np.min(c))) # set up colormap to use to plot successive iteration results
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
i1 = 0 # range of DOFs for the current turbine
i2 = 6
# sum up all linear (non-varying) matrices up front
M_lin = fowt.M_struc[:,:,None] + fowt.A_BEM + fowt.A_hydro_morison[:,:,None] # mass
B_lin = fowt.B_struc[:,:,None] + fowt.B_BEM # damping
C_lin = fowt.C_struc + self.C_moor + fowt.C_hydro # stiffness
F_lin = fowt.F_BEM + fowt.F_hydro_iner # excitation
# start fixed point iteration loop for dynamics <<< would a secant method solve be possible/better? <<<
for iiter in range(nIter):
# ::: re-zero some things that will be added to :::
# total system coefficient arrays
M_tot = np.zeros([self.nDOF,self.nDOF,self.nw]) # total mass and added mass matrix [kg, kg-m, kg-m^2]
B_tot = np.zeros([self.nDOF,self.nDOF,self.nw]) # total damping matrix [N-s/m, N-s, N-s-m]
C_tot = np.zeros([self.nDOF,self.nDOF,self.nw]) # total stiffness matrix [N/m, N, N-m]
F_tot = np.zeros([self.nDOF,self.nw], dtype=complex) # total excitation force/moment complex amplitudes vector [N, N-m]
Z = np.zeros([self.nDOF,self.nDOF,self.nw], dtype=complex) # total system impedance matrix
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
i1 = 0 # range of DOFs for the current turbine
i2 = 6
# get linearized terms for the current turbine given latest amplitudes
B_linearized, F_linearized = fowt.calcLinearizedTerms(XiLast)
# calculate the response based on the latest linearized terms
Xi = np.zeros([self.nDOF,self.nw], dtype=complex) # displacement and rotation complex amplitudes [m, rad]
# add fowt's terms to system matrices (BEM arrays are not yet included here)
M_tot[:,:,:] = M_lin
B_tot[:,:,:] = B_lin + B_linearized[:,:,None]
C_tot[:,:,:] = C_lin[:,:,None]
F_tot[: ,:] = F_lin + F_linearized
for ii in range(self.nw):
# form impedance matrix
Z[:,:,ii] = -self.w[ii]**2 * M_tot[:,:,ii] + 1j*self.w[ii]*B_tot[:,:,ii] + C_tot[:,:,ii]
# solve response (complex amplitude)
Xi[:,ii] = np.matmul(np.linalg.inv(Z[:,:,ii]), F_tot[:,ii] )
if conv_plot:
# Convergence Plotting
# plots of surge response at each iteration for observing convergence
ax[0].plot(self.w, np.abs(Xi[0,:]) , color=c[iiter], label=f"iteration {iiter}")
ax[1].plot(self.w, np.real(Xi[0,:]), color=c[iiter], label=f"iteration {iiter}")
ax[2].plot(self.w, np.imag(Xi[0,:]), color=c[iiter], label=f"iteration {iiter}")
# check for convergence
tolCheck = np.abs(Xi - XiLast) / ((np.abs(Xi)+tol))
if (tolCheck < tol).all():
print(f" Iteration {iiter}, converged, with largest tolCheck of {np.max(tolCheck)} < {tol}")
break
else:
XiLast = 0.2*XiLast + 0.8*Xi # use a mix of the old and new response amplitudes to use for the next iteration
# (uses hard-coded successive under relaxation for now)
print(f" Iteration {iiter}, still going since largest tolCheck is {np.max(tolCheck)} >= {tol}")
if iiter == nIter-1:
print("WARNING - solveDynamics iteration did not converge to the tolerance.")
if conv_plot:
# labels for convergence plots
ax[1].legend()
ax[0].set_ylabel("response magnitude")
ax[1].set_ylabel("response, real")
ax[2].set_ylabel("response, imag")
ax[2].set_xlabel("frequency (rad/s)")
fig.suptitle("Response convergence")
# ------------------------------ preliminary plotting of response ---------------------------------
if RAO_plot:
# RAO plotting
fig, ax = plt.subplots(3,1, sharex=True)
fowt = self.fowtList[0]
ax[0].plot(self.w, np.abs(Xi[0,:]) , 'b', label="surge")
ax[0].plot(self.w, np.abs(Xi[1,:]) , 'g', label="sway")
ax[0].plot(self.w, np.abs(Xi[2,:]) , 'r', label="heave")
ax[1].plot(self.w, np.abs(Xi[3,:])*180/np.pi, 'b', label="roll")
ax[1].plot(self.w, np.abs(Xi[4,:])*180/np.pi, 'g', label="pitch")
ax[1].plot(self.w, np.abs(Xi[5,:])*180/np.pi, 'r', label="yaw")
ax[2].plot(self.w, fowt.zeta, 'k', label="wave amplitude (m)")
ax[0].legend()
ax[1].legend()
ax[2].legend()
#ax[0].set_ylim([0, 1e6])
#ax[1].set_ylim([0, 1e9])
ax[0].set_ylabel("response magnitude (m)")
ax[1].set_ylabel("response magnitude (deg)")
ax[2].set_ylabel("wave amplitude (m)")
ax[2].set_xlabel("frequency (rad/s)")
self.Xi = Xi
self.results['response'] = {} # signal this data is available by adding a section to the results dictionary
return Xi # currently returning the response rather than saving in the model object
def calcOutputs(self):
'''This is where various output quantities of interest are calculated based on the already-solved system response.'''
fowt = self.fowtList[0] # just using a single turbine for now
# ----- system properties outputs -----------------------------
# all values about platform reference point (z=0) unless otherwise noted
if 'properties' in self.results:
self.results['properties']['tower mass'] = fowt.mtower
self.results['properties']['tower CG'] = fowt.rCG_tow
self.results['properties']['substructure mass'] = fowt.msubstruc
self.results['properties']['substructure CG'] = fowt.rCG_sub
self.results['properties']['shell mass'] = fowt.mshell
self.results['properties']['ballast mass'] = fowt.mballast
self.results['properties']['ballast densities'] = fowt.pb
self.results['properties']['total mass'] = fowt.M_struc[0,0]
self.results['properties']['total CG'] = fowt.rCG_TOT
#self.results['properties']['roll inertia at subCG'] = fowt.I44
#self.results['properties']['pitch inertia at subCG'] = fowt.I55
#self.results['properties']['yaw inertia at subCG'] = fowt.I66
self.results['properties']['roll inertia at subCG'] = fowt.M_struc_subCM[3,3]
self.results['properties']['pitch inertia at subCG'] = fowt.M_struc_subCM[4,4]
self.results['properties']['yaw inertia at subCG'] = fowt.M_struc_subCM[5,5]
self.results['properties']['Buoyancy (pgV)'] = fowt.env.rho*fowt.env.g*fowt.V
self.results['properties']['Center of Buoyancy'] = fowt.rCB
self.results['properties']['C stiffness matrix'] = fowt.C_hydro
self.results['properties']['F_lines0'] = self.F_moor0
self.results['properties']['C_lines0'] = self.C_moor0
# 6DOF matrices for the support structure (everything but turbine) including mass, hydrostatics, and mooring reactions
self.results['properties']['M support structure'] = fowt.M_struc_subCM # mass matrix
self.results['properties']['A support structure'] = fowt.A_hydro_morison + fowt.A_BEM[:,:,-1] # hydrodynamic added mass (currently using highest frequency of BEM added mass)
self.results['properties']['C support structure'] = fowt.C_struc_sub + fowt.C_hydro + self.C_moor0 # stiffness
# ----- response outputs (always in standard units) ---------------------------------------
if 'response' in self.results:
RAOmag = abs(self.Xi /fowt.zeta) # magnitudes of motion RAO
self.results['response']['frequencies'] = self.w/2/np.pi # Hz
self.results['response']['wave elevation'] = fowt.zeta
self.results['response']['Xi' ] = self.Xi
self.results['response']['surge RAO' ] = RAOmag[0,:]
self.results['response'][ 'sway RAO' ] = RAOmag[1,:]
self.results['response']['heave RAO' ] = RAOmag[2,:]
self.results['response']['pitch RAO' ] = RAOmag[3,:]
self.results['response'][ 'roll RAO' ] = RAOmag[4,:]
self.results['response'][ 'yaw RAO' ] = RAOmag[5,:]
# save dynamic derived quantities
#self.results['response']['mooring tensions'] = ...
self.results['response']['nacelle acceleration'] = self.w**2 * (self.Xi[0] + self.Xi[4]*fowt.hHub)
'''
# ---------- mooring line fairlead tension RAOs and constraint implementation ----------
for il=1:Platf.Nlines
#aNacRAO{imeto} = -(w').^2 .* (X{imeto}(:,1) + hNac*X{imeto}(:,5)); # Nacelle Accel RAO
#aNac2(imeto) = sum( abs(aNacRAO{imeto}).^2.*S(:,imeto) ) *(w(2)-w(1)); # RMS Nacelle Accel
TfairRAO{imeto}(il,:) = C_lf(il,:,imeto)*rao{imeto}(:,:)'; # get fairlead tension RAO for each line (multiply by dofs)
#RMSTfair{imeto}(il) = sqrt( sum( (abs(TfairRAO{imeto}(il,:))).^2) / length(w) );
#figure
#plot(w,abs(TfairRAO{imeto}(il,:)))
#d=TfairRAO{imeto}(il,:)
RMSTfair{imeto}(il) = sqrt( sum( (abs(TfairRAO{imeto}(il,:)).^2).*S(:,imeto)') *(w(2)-w(1)) );
#RMSTfair
#sumpart = sum( (abs(TfairRAO{imeto}(il,:)).^2).*S(:,imeto)')
#dw=(w(2)-w(1))
end
[Tfair, il] = min( T_lf(:,imeto) );
if Tfair - 3*RMSTfair{imeto}(il) < 0 && Xm < 1 # taut lines only
disp([' REJECTING (mooring line goes slack)'])
fitness = -1;
return; # constraint for slack line!!!
end
if grads
disp(['mooring slackness: ' num2str(Tfair - 3*RMSTfair{imeto}(il))])
end
# ----------- dynamic pitch constraint ----------------------
#disp('checking dynamic pitch');
RMSpitch(imeto) = sqrt( sum( ((abs(rao{imeto}(:,5))).^2).*S(:,imeto) ) *(w(2)-w(1)) ); # fixed April 9th :(
RMSpitchdeg = RMSpitch(imeto)*60/pi;
if (Platf.spitch + RMSpitch(imeto))*180/pi > 10
disp([' REJECTING (static + RMS dynamic pitch > 10)'])
fitness = -1;
return;
end
if grads
disp(['dynamic pitch: ' num2str((Platf.spitch + RMSpitch(imeto))*180/pi)])
end
#figure(1)
#plot(w,S(:,imeto))
#hold on
#figure()
#plot(2*pi./w,abs(Xi{imeto}(:,5)))
#ylabel('pitch response'); xlabel('T (s)')
RMSsurge(imeto) = sqrt( sum( ((abs(rao{imeto}(:,1))).^2).*S(:,imeto) ) *(w(2)-w(1)) );
RMSheave(imeto) = sqrt( sum( ((abs(rao{imeto}(:,3))).^2).*S(:,imeto) ) *(w(2)-w(1)) );
'''
return self.results
def plot(self, hideGrid=False):
'''plots the whole model, including FOWTs and mooring system...'''
# for now, start the plot via the mooring system, since MoorPy doesn't yet know how to draw on other codes' plots
self.ms.bodyList[0].setPosition(np.zeros(6))
self.ms.initialize()
fig, ax = self.ms.plot()
#fig = plt.figure(figsize=(20/2.54,12/2.54))
#ax = Axes3D(fig)
# plot each FOWT
for fowt in self.fowtList:
fowt.plot(ax)
if hideGrid:
ax.set_xticks([]) # Hide axes ticks
ax.set_yticks([])
ax.set_zticks([])
ax.grid(False) # Hide grid lines
plt.grid(b=None)
ax.axis('off')
plt.box(False)
|
#--------------------------------------------------------------------------
# File and Version Information:
# $Id: SegGeometryStore.py 11400 2016-02-23 00:07:50Z [email protected] $
#
# Description:
# Module SegGeometryStore...
#
#------------------------------------------------------------------------
"""
:py:class:`algos.geometry.SegGeometryStore` - is a factory class/method to switch between different device-dependent
segments/sensors to access their pixel geometry uling :py:class:`algos.geometry.SegGeometry` interface.
Usage::
from algos.geometry.SegGeometryStore import sgs
sg = sgs.Create('SENS2X1:V1', pbits=0377)
sg2= sgs.Create('EPIX100:V1', pbits=0377)
sg3= sgs.Create('PNCCD:V1', pbits=0377)
sg4= sgs.Create('ANDOR3D:V1', pbits=0377)
sg.print_seg_info(pbits=0377)
size_arr = sg.size()
rows = sg.rows()
cols = sg.cols()
shape = sg.shape()
pix_size = sg.pixel_scale_size()
area = sg.pixel_area_array()
mask = sg.pixel_mask(mbits=0377)
sizeX = sg.pixel_size_array('X')
sizeX, sizeY, sizeZ = sg.pixel_size_array()
X = sg.pixel_coord_array('X')
X,Y,Z = sg.pixel_coord_array()
xmin = sg.pixel_coord_min('X')
ymax = sg.pixel_coord_max('Y')
xmin, ymin, zmin = sg.pixel_coord_min()
xmax, ymax, zmax = sg.pixel_coord_mas()
...
@see other interface methods in :py:class:`algos.geometry.SegGeometry`, :py:class:`algos.geometry.SegGeometryCspad2x1V1`
This software was developed for the SIT project. If you use all or
part of it, please give an appropriate acknowledgment.
@version $Id: 2013-03-08$
@author Mikhail S. Dubrovin
"""
#--------------------------------
__version__ = "$Revision: 11400 $"
#--------------------------------
from algos.geometry.SegGeometryCspad2x1V1 import cspad2x1_one
from algos.geometry.SegGeometryEpix100V1 import epix2x2_one
from algos.geometry.SegGeometryMatrixV1 import SegGeometryMatrixV1, segment_one, matrix_pars
#------------------------------
class SegGeometryStore() :
"""Factory class for SegGeometry-base objects of different detectors"""
#------------------------------
def __init__(sp) :
pass
#------------------------------
def Create(sp, segname='SENS2X1:V1', pbits=0 ) :
""" Factory method returns device dependent SINGLETON object with interface implementation
"""
if segname=='SENS2X1:V1' : return cspad2x1_one # SegGeometryCspad2x1V1(use_wide_pix_center=False)
if segname=='EPIX100:V1' : return epix2x2_one # SegGeometryEpix100V1(use_wide_pix_center=False)
if segname=='PNCCD:V1' : return segment_one # SegGeometryMatrixV1()
#if segname=='ANDOR3D:V1' : return seg_andor3d # SegGeometryMatrixV1(rows=2048, cols=2048, ...)
if segname[:4]=='MTRX' :
rows, cols, psize_row, psize_col = matrix_pars(segname)
return SegGeometryMatrixV1(rows, cols, psize_row, psize_col,\
pix_size_depth=100,\
pix_scale_size=min(psize_row, psize_col))
return None
#------------------------------
sgs = SegGeometryStore()
#------------------------------
#----------- TEST -------------
#------------------------------
def test_seggeom() :
import sys
from time import time
t0_sec = time()
if len(sys.argv)==1 : print 'For test(s) use command: python', sys.argv[0], '<test-number=1-3>'
elif(sys.argv[1]=='1') :
sg = sgs.Create('SENS2X1:V1', pbits=0377)
sg.print_seg_info(pbits=0377)
elif(sys.argv[1]=='2') :
sg = sgs.Create('EPIX100:V1', pbits=0377)
sg.print_seg_info(pbits=0377)
elif(sys.argv[1]=='3') :
sg = sgs.Create('PNCCD:V1', pbits=0377)
sg.print_seg_info(pbits=0377)
elif(sys.argv[1]=='4') :
sg = sgs.Create('MTRX:512:512:54:54', pbits=0377)
print 'Consumed time for MTRX:512:512:54:54 (sec) =', time()-t0_sec
sg.print_seg_info(pbits=0377)
else : print 'Non-expected arguments: sys.argv=', sys.argv, ' use 0,1,2,...'
#------------------------------
if __name__ == "__main__" :
test_seggeom()
print 'End of test.'
#------------------------------
|
import re
from dataclasses import dataclass
from typing import Iterator, Type
@dataclass
class Token:
value: str
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self.value}")'
class StringToken(Token):
pass
class BraceToken(Token):
pass
class TokenizeError(Exception):
pass
def tokenize_braces(s: str) -> Iterator[Token]:
"""
>>> list(tokenize_braces(""))
[]
>>> list(tokenize_braces("before {braced} after"))
[StringToken("before "), BraceToken("braced"), StringToken(" after")]
>>> list(tokenize_braces("ab{cd}{ef}"))
[StringToken("ab"), BraceToken("cd"), BraceToken("ef")]
"""
for value in re.split("({[^}]*})", s):
if value == "":
continue
if value.startswith("{") and value.endswith("}"):
value = value[1:-1]
token_class: Type[Token] = BraceToken
else:
token_class = StringToken
if "{" in value:
raise TokenizeError("Unexpected '{' encountered")
if "}" in value:
raise TokenizeError("Unexpected '}' encountered")
yield token_class(value)
|
from django.conf.urls import patterns, url
from . import views
from ideahub.apps.accounts.forms import LoginForm
urlpatterns = patterns('',
# TODO: Fix these temporary mappings
url(r'^login/?$', views.login, name='login'),
url(r'^signup/?$', views.signup, name='signup'),
url(r'^get_started/?$', views.get_started, name='get_started'),
url(r'^user/(?P<pk>\d+)/?$', views.user, name='user'),
url(r'^profile/?$', views.profile, name='profile'),
url(r'^logout/?$', 'django.contrib.auth.views.logout', name='logout', kwargs={
'next_page': 'login',
}),
)
|
# Generated with love
import typing
from vkbottle.types import responses
from .access import APIAccessibility
from .method import BaseMethod
class AccountBan(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, owner_id: int = None) -> responses.ok_response.OkResponse:
""" account.ban
From Vk Docs:
Access from user token(s)
:param owner_id:
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.ban", params, response_model=responses.ok_response.OkResponseModel
)
class AccountChangePassword(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self,
new_password: str,
restore_sid: str = None,
change_password_hash: str = None,
old_password: str = None,
) -> responses.account.ChangePassword:
""" account.changePassword
From Vk Docs: Changes a user password after access is successfully restored with the [vk.com/dev/auth.restore|auth.restore] method.
Access from user token(s)
:param restore_sid: Session id received after the [vk.com/dev/auth.restore|auth.restore] method is executed. (If the password is changed right after the access was restored)
:param change_password_hash: Hash received after a successful OAuth authorization with a code got by SMS. (If the password is changed right after the access was restored)
:param old_password: Current user password.
:param new_password: New password that will be set as a current
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.changePassword",
params,
response_model=responses.account.ChangePasswordModel,
)
class AccountGetActiveOffers(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, offset: int = None, count: int = None
) -> responses.account.GetActiveOffers:
""" account.getActiveOffers
From Vk Docs: Returns a list of active ads (offers) which executed by the user will bring him/her respective number of votes to his balance in the application.
Access from user token(s)
:param offset:
:param count: Number of results to return.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getActiveOffers",
params,
response_model=responses.account.GetActiveOffersModel,
)
class AccountGetAppPermissions(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, user_id: int) -> responses.account.GetAppPermissions:
""" account.getAppPermissions
From Vk Docs: Gets settings of the user in this application.
Access from user token(s)
:param user_id: User ID whose settings information shall be got. By default: current user.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getAppPermissions",
params,
response_model=responses.account.GetAppPermissionsModel,
)
class AccountGetBanned(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, offset: int = None, count: int = None
) -> responses.account.GetBanned:
""" account.getBanned
From Vk Docs: Returns a user's blacklist.
Access from user token(s)
:param offset: Offset needed to return a specific subset of results.
:param count: Number of results to return.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getBanned", params, response_model=responses.account.GetBannedModel
)
class AccountGetCounters(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, filter: typing.List = None
) -> responses.account.GetCounters:
""" account.getCounters
From Vk Docs: Returns non-null values of user counters.
Access from user token(s)
:param filter: Counters to be returned.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getCounters",
params,
response_model=responses.account.GetCountersModel,
)
class AccountGetInfo(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, fields: typing.List = None) -> responses.account.GetInfo:
""" account.getInfo
From Vk Docs: Returns current account info.
Access from user token(s)
:param fields: Fields to return. Possible values: *'country' — user country,, *'https_required' — is "HTTPS only" option enabled,, *'own_posts_default' — is "Show my posts only" option is enabled,, *'no_wall_replies' — are wall replies disabled or not,, *'intro' — is intro passed by user or not,, *'lang' — user language. By default: all.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getInfo", params, response_model=responses.account.GetInfoModel
)
class AccountGetProfileInfo(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self,) -> responses.account.GetProfileInfo:
""" account.getProfileInfo
From Vk Docs: Returns the current account info.
Access from user token(s)
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getProfileInfo",
params,
response_model=responses.account.GetProfileInfoModel,
)
class AccountGetPushSettings(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, device_id: str = None
) -> responses.account.GetPushSettings:
""" account.getPushSettings
From Vk Docs: Gets settings of push notifications.
Access from user token(s)
:param device_id: Unique device ID.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.getPushSettings",
params,
response_model=responses.account.GetPushSettingsModel,
)
class AccountRegisterDevice(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self,
token: str,
device_id: str,
device_model: str = None,
device_year: int = None,
system_version: str = None,
settings: str = None,
sandbox: bool = None,
) -> responses.ok_response.OkResponse:
""" account.registerDevice
From Vk Docs: Subscribes an iOS/Android/Windows Phone-based device to receive push notifications
Access from user token(s)
:param token: Device token used to send notifications. (for mpns, the token shall be URL for sending of notifications)
:param device_model: String name of device model.
:param device_year: Device year.
:param device_id: Unique device ID.
:param system_version: String version of device operating system.
:param settings: Push settings in a [vk.com/dev/push_settings|special format].
:param sandbox:
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.registerDevice",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountSaveProfileInfo(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self,
first_name: str = None,
last_name: str = None,
maiden_name: str = None,
screen_name: str = None,
cancel_request_id: int = None,
sex: int = None,
relation: int = None,
relation_partner_id: int = None,
bdate: str = None,
bdate_visibility: int = None,
home_town: str = None,
country_id: int = None,
city_id: int = None,
status: str = None,
) -> responses.account.SaveProfileInfo:
""" account.saveProfileInfo
From Vk Docs: Edits current profile info.
Access from user token(s)
:param first_name: User first name.
:param last_name: User last name.
:param maiden_name: User maiden name (female only)
:param screen_name: User screen name.
:param cancel_request_id: ID of the name change request to be canceled. If this parameter is sent, all the others are ignored.
:param sex: User sex. Possible values: , * '1' – female,, * '2' – male.
:param relation: User relationship status. Possible values: , * '1' – single,, * '2' – in a relationship,, * '3' – engaged,, * '4' – married,, * '5' – it's complicated,, * '6' – actively searching,, * '7' – in love,, * '0' – not specified.
:param relation_partner_id: ID of the relationship partner.
:param bdate: User birth date, format: DD.MM.YYYY.
:param bdate_visibility: Birth date visibility. Returned values: , * '1' – show birth date,, * '2' – show only month and day,, * '0' – hide birth date.
:param home_town: User home town.
:param country_id: User country.
:param city_id: User city.
:param status: Status text.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.saveProfileInfo",
params,
response_model=responses.account.SaveProfileInfoModel,
)
class AccountSetInfo(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, name: str = None, value: str = None
) -> responses.ok_response.OkResponse:
""" account.setInfo
From Vk Docs: Allows to edit the current account info.
Access from user token(s)
:param name: Setting name.
:param value: Setting value.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.setInfo",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountSetNameInMenu(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, user_id: int, name: str = None
) -> responses.ok_response.OkResponse:
""" account.setNameInMenu
From Vk Docs: Sets an application screen name (up to 17 characters), that is shown to the user in the left menu.
Access from user token(s)
:param user_id: User ID.
:param name: Application screen name.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.setNameInMenu",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountSetOffline(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self,) -> responses.ok_response.OkResponse:
""" account.setOffline
From Vk Docs: Marks a current user as offline.
Access from user token(s)
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.setOffline",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountSetOnline(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, voip: bool = None) -> responses.ok_response.OkResponse:
""" account.setOnline
From Vk Docs: Marks the current user as online for 15 minutes.
Access from user token(s)
:param voip: '1' if videocalls are available for current device.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.setOnline",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountSetPushSettings(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self,
device_id: str,
settings: str = None,
key: str = None,
value: typing.List = None,
) -> responses.ok_response.OkResponse:
""" account.setPushSettings
From Vk Docs: Change push settings.
Access from user token(s)
:param device_id: Unique device ID.
:param settings: Push settings in a [vk.com/dev/push_settings|special format].
:param key: Notification key.
:param value: New value for the key in a [vk.com/dev/push_settings|special format].
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.setPushSettings",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountSetSilenceMode(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self,
device_id: str = None,
time: int = None,
peer_id: int = None,
sound: int = None,
) -> responses.ok_response.OkResponse:
""" account.setSilenceMode
From Vk Docs: Mutes push notifications for the set period of time.
Access from user token(s)
:param device_id: Unique device ID.
:param time: Time in seconds for what notifications should be disabled. '-1' to disable forever.
:param peer_id: Destination ID. "For user: 'User ID', e.g. '12345'. For chat: '2000000000' + 'Chat ID', e.g. '2000000001'. For community: '- Community ID', e.g. '-12345'. "
:param sound: '1' — to enable sound in this dialog, '0' — to disable sound. Only if 'peer_id' contains user or community ID.
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.setSilenceMode",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountUnban(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(self, owner_id: int = None) -> responses.ok_response.OkResponse:
""" account.unban
From Vk Docs:
Access from user token(s)
:param owner_id:
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.unban",
params,
response_model=responses.ok_response.OkResponseModel,
)
class AccountUnregisterDevice(BaseMethod):
access_token_type: APIAccessibility = [APIAccessibility.USER]
async def __call__(
self, device_id: str = None, sandbox: bool = None
) -> responses.ok_response.OkResponse:
""" account.unregisterDevice
From Vk Docs: Unsubscribes a device from push notifications.
Access from user token(s)
:param device_id: Unique device ID.
:param sandbox:
"""
params = {
k if not k.endswith("_") else k[:-1]: v
for k, v in locals().items()
if k not in ["self"] and v is not None
}
return await self.request(
"account.unregisterDevice",
params,
response_model=responses.ok_response.OkResponseModel,
)
class Account:
def __init__(self, request):
self.ban = AccountBan(request)
self.change_password = AccountChangePassword(request)
self.get_active_offers = AccountGetActiveOffers(request)
self.get_app_permissions = AccountGetAppPermissions(request)
self.get_banned = AccountGetBanned(request)
self.get_counters = AccountGetCounters(request)
self.get_info = AccountGetInfo(request)
self.get_profile_info = AccountGetProfileInfo(request)
self.get_push_settings = AccountGetPushSettings(request)
self.register_device = AccountRegisterDevice(request)
self.save_profile_info = AccountSaveProfileInfo(request)
self.set_info = AccountSetInfo(request)
self.set_name_in_menu = AccountSetNameInMenu(request)
self.set_offline = AccountSetOffline(request)
self.set_online = AccountSetOnline(request)
self.set_push_settings = AccountSetPushSettings(request)
self.set_silence_mode = AccountSetSilenceMode(request)
self.unban = AccountUnban(request)
self.unregister_device = AccountUnregisterDevice(request)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-03-30 00:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0044_feeseasonapplicationtype'),
]
operations = [
migrations.RemoveField(
model_name='feeseasonapplicationtype',
name='application_type',
),
migrations.RemoveField(
model_name='feeseasonapplicationtype',
name='fee_season',
),
migrations.DeleteModel(
name='FeeSeasonApplicationType',
),
]
|
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
Merge kapture objects for reconstructions.
"""
from kapture.io.binary import array_to_file
from kapture.io.tar import TarCollection
import numpy as np
import os
import shutil
from typing import Dict, List, Union, Optional, Tuple, Type
import kapture
import kapture.io.features
from kapture.utils.logging import getLogger
def _merge_image_features(feature_class_type: Type[Union[kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures]],
feature_type: str,
features_list: Union[List[Optional[kapture.Keypoints]],
List[Optional[kapture.Descriptors]],
List[Optional[kapture.GlobalFeatures]]],
features_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]
) -> Union[kapture.Keypoints, kapture.Descriptors, kapture.GlobalFeatures]:
"""
Merge several features_list (keypoints, descriptors or global features_list) (of same type) in one.
:param feature_class_type: the type of features_list
:param feature_type: the type (name of the folder) of the features
:param features_list: the list of values
:param features_paths: the paths
:param output_path: root path of the features to construct
:param tar_handlers: collection of preloaded tar archives
:return: merged features object of the corresponding type
"""
assert len(features_list) > 0
assert len(features_paths) == len(features_list)
# find no none value
val = [(i, d) for i, d in enumerate(features_list) if d is not None]
assert len(val) > 0
merged_features = val[0][1]
for j, (i, features) in enumerate(val):
assert isinstance(features, feature_class_type)
assert features.type_name == merged_features.type_name
assert features.dtype == merged_features.dtype
assert features.dsize == merged_features.dsize
if feature_class_type == kapture.Descriptors or feature_class_type == kapture.GlobalFeatures:
assert not isinstance(features, kapture.Keypoints) # IDE type check help
assert not isinstance(merged_features, kapture.Keypoints) # IDE type check help
assert features.metric_type == merged_features.metric_type
if feature_class_type == kapture.Descriptors:
assert isinstance(features, kapture.Descriptors) # IDE type check help
assert isinstance(merged_features, kapture.Descriptors) # IDE type check help
assert features.keypoints_type == merged_features.keypoints_type
for name in features:
if j > 0 and name in merged_features:
getLogger().warning(f'{name} was found multiple times.')
else:
merged_features.add(name)
if output_path:
# TODO: uses kapture.io.features_list.get_image_features_dirpath()
in_path = kapture.io.features.get_features_fullpath(feature_class_type,
feature_type,
features_paths[i],
name,
tar_handlers[i])
out_path = kapture.io.features.get_features_fullpath(feature_class_type,
feature_type,
output_path,
name)
if in_path != out_path:
# skip actual copy if file does not actually move.
os.makedirs(os.path.dirname(out_path), exist_ok=True)
if isinstance(in_path, str):
shutil.copy(in_path, out_path)
else:
# in_path is a tuple [str, TarHandler]
# keypoints are not stored in a file, have to read them to be able to copy them
array = in_path[1].get_array_from_tar(in_path[0], features.dtype, features.dsize)
array_to_file(out_path, array)
return merged_features
def _merge_image_features_collection(feature_class_type: Type[Union[kapture.Keypoints,
kapture.Descriptors,
kapture.GlobalFeatures]],
features_list: Union[List[Optional[Dict[str, kapture.Keypoints]]],
List[Optional[Dict[str, kapture.Descriptors]]],
List[Optional[Dict[str, kapture.GlobalFeatures]]]],
features_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]
) -> Union[Dict[str, kapture.Keypoints],
Dict[str, kapture.Descriptors],
Dict[str, kapture.GlobalFeatures]]:
assert len(features_list) > 0
assert len(features_paths) == len(features_list)
# get the union
features_types = set().union(*[features.keys() for features in features_list if features is not None])
if len(features_types) == 0:
return {}
out_collection = {}
for features_type in features_types:
image_features_list = [features[features_type] if features is not None and features_type in features else None
for features in features_list]
image_features = _merge_image_features(feature_class_type, features_type,
image_features_list,
features_paths, output_path,
tar_handlers)
assert isinstance(image_features, feature_class_type)
out_collection[features_type] = image_features
return out_collection
def merge_keypoints(feature_type: str,
keypoints_list: List[Optional[kapture.Keypoints]],
keypoints_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]) -> kapture.Keypoints:
"""
Merge several keypoints in one.
:param keypoints_list: list of keypoints to merge
:param keypoints_paths: keypoints files paths
:param output_path: root path of the merged features files
:param tar_handlers: collection of preloaded tar archives
:return: merged keypoints
"""
keypoints = _merge_image_features(kapture.Keypoints, feature_type, keypoints_list, keypoints_paths,
output_path, tar_handlers)
assert isinstance(keypoints, kapture.Keypoints)
return keypoints
def merge_keypoints_collections(keypoints_collections_list: List[Optional[Dict[str, kapture.Keypoints]]],
keypoints_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]) -> Dict[str, kapture.Keypoints]:
"""
Merge several keypoints collections in one.
:param keypoints_collections_list: list of keypoints collections to merge
:param keypoints_paths: keypoints files paths
:param output_path: root path of the merged features files
:param tar_handlers: collection of preloaded tar archives
:return: merged keypoints collection
"""
return _merge_image_features_collection(kapture.Keypoints, keypoints_collections_list,
keypoints_paths, output_path, tar_handlers)
def merge_descriptors(feature_type: str,
descriptors_list: List[Optional[kapture.Descriptors]],
descriptors_paths: List[str], output_path: str,
tar_handlers: List[TarCollection]) -> kapture.Descriptors:
"""
Merge several descriptors in one.
:param descriptors_list: list of descriptors to merge
:param descriptors_paths: descriptors files paths
:param output_path: root path of the merged features files
:param tar_handlers: collection of preloaded tar archives
:return: merged descriptors
"""
descriptors = _merge_image_features(kapture.Descriptors, feature_type,
descriptors_list, descriptors_paths, output_path, tar_handlers)
assert isinstance(descriptors, kapture.Descriptors)
return descriptors
def merge_descriptors_collections(descriptors_collections_list: List[Optional[Dict[str, kapture.Descriptors]]],
descriptors_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]) -> Dict[str, kapture.Descriptors]:
"""
Merge several descriptors collections in one.
:param descriptors_collections_list: list of descriptors collections to merge
:param descriptors_paths: descriptors files paths
:param output_path: root path of the merged features files
:param tar_handlers: collection of preloaded tar archives
:return: merged descriptors collections
"""
return _merge_image_features_collection(kapture.Descriptors, descriptors_collections_list,
descriptors_paths, output_path, tar_handlers)
def merge_global_features(global_features_list: List[Optional[kapture.GlobalFeatures]],
global_features_paths: List[str], output_path: str,
tar_handlers: List[TarCollection]) -> kapture.GlobalFeatures:
"""
Merge several global features in one.
:param global_features_list: list of global features to merge
:param global_features_paths: global features files paths
:param output_path: root path of the merged features files
:param tar_handlers: collection of preloaded tar archives
:return: merged global features
"""
features = _merge_image_features(kapture.GlobalFeatures, global_features_list, global_features_paths,
output_path, tar_handlers)
assert isinstance(features, kapture.GlobalFeatures)
return features
def merge_global_features_collections(global_features_collections_list: List[Optional[Dict[str,
kapture.GlobalFeatures]]],
global_features_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]) -> Dict[str, kapture.GlobalFeatures]:
"""
Merge several global features collections in one.
:param global_features_collections_list: list of global features collections to merge
:param global_features_paths: global features files paths
:param output_path: root path of the merged features files
:param tar_handlers: collection of preloaded tar archives
:return: merged global features collection
"""
return _merge_image_features_collection(kapture.GlobalFeatures, global_features_collections_list,
global_features_paths, output_path, tar_handlers)
def merge_matches(keypoints_type: str,
matches_list: List[Optional[kapture.Matches]],
matches_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]) -> kapture.Matches:
"""
Merge several matches lists in one.
:param keypoints_type: type of keypoints, name of the keypoints subfolder
:param matches_list: list of matches to merge
:param matches_paths: matches files paths
:param output_path: root path of the merged matches files
:param tar_handlers: collection of preloaded tar archives
:return: merged matches
"""
assert len(matches_list) > 0
assert len(matches_paths) == len(matches_list)
merged_matches = kapture.Matches()
for matches, matches_path, tar_handler in zip(matches_list, matches_paths, tar_handlers):
if matches is None:
continue
for pair in matches:
if pair in merged_matches:
getLogger().warning(f'{pair} was found multiple times.')
else:
merged_matches.add(pair[0], pair[1])
if output_path:
in_path = kapture.io.features.get_matches_fullpath(pair, keypoints_type, matches_path, tar_handler)
out_path = kapture.io.features.get_matches_fullpath(pair, keypoints_type, output_path)
if in_path != out_path:
# skip actual copy if file does not actually move.
os.makedirs(os.path.dirname(out_path), exist_ok=True)
if isinstance(in_path, str):
shutil.copy(in_path, out_path)
else:
# in_path is a tuple [str, TarHandler]
# keypoints are not stored in a file, have to read them to be able to copy them
array = kapture.io.features.image_matches_from_file(in_path)
kapture.io.features.image_matches_to_file(out_path, array)
return merged_matches
def merge_matches_collections(matches_list: List[Optional[Dict[str, kapture.Matches]]],
matches_paths: List[str],
output_path: str,
tar_handlers: List[TarCollection]) -> Dict[str, kapture.Matches]:
"""
Merge several matches collections in one.
:param matches_list: list of matches collections to merge
:param matches_paths: matches files paths
:param output_path: root path of the merged matches files
:param tar_handlers: collection of preloaded tar archives
:return: merged matches collection
"""
assert len(matches_list) > 0
assert len(matches_paths) == len(matches_list)
# get the union
keypoints_types = set().union(*[matches.keys() for matches in matches_list if matches is not None])
if len(keypoints_types) == 0:
return {}
out_collection = {}
for keypoints_type in keypoints_types:
kmatches_list = [matches[keypoints_type] if matches is not None and keypoints_type in matches else None
for matches in matches_list]
merged_matches = merge_matches(keypoints_type,
kmatches_list,
matches_paths,
output_path,
tar_handlers)
assert isinstance(merged_matches, kapture.Matches)
out_collection[keypoints_type] = merged_matches
return out_collection
def merge_points3d_and_observations(pts3d_obs: List[Tuple[Optional[kapture.Points3d], Optional[kapture.Observations]]]
) -> Tuple[kapture.Points3d, kapture.Observations]:
"""
Merge a list of points3d with their observations.
:param pts3d_obs: list of points3d with observations to merge
:return: merged points3d associated to observations
"""
assert len(pts3d_obs) > 0
merged_points3d = kapture.Points3d()
merged_observations = kapture.Observations()
point3d_offset = 0
for points3d, observations in pts3d_obs:
if points3d is None:
continue
merged_points3d = kapture.Points3d(np.vstack([merged_points3d, points3d]))
if observations is not None:
for point3d_idx, keypoints_type, (image_path, keypoint_idx) in kapture.flatten(observations):
merged_observations.add(point3d_idx + point3d_offset, keypoints_type, image_path, keypoint_idx)
point3d_offset += merged_points3d.shape[0]
return merged_points3d, merged_observations
def merge_points3d(points3d_list: List[Optional[kapture.Points3d]]) -> kapture.Points3d:
"""
Merge several points3d lists in one.
:param points3d_list: list of points3d to merge
:return: merged points3d
"""
assert len(points3d_list) > 0
merged_points3d = kapture.Points3d()
for points3d in points3d_list:
if points3d is None:
continue
merged_points3d = kapture.Points3d(np.vstack([merged_points3d, points3d]))
return merged_points3d
|
# https://www.pyimagesearch.com/2021/03/29/multi-template-matching-with-opencv/
import numpy as np
import argparse
import cv2
# Source: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
# Malisiewicz et al.
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(
idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0]))
)
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
# idxs = np.delete(
# idxs, np.concatenate(([last], np.logical_or(overlap > overlapThresh, overlap < 0.7)))
# )
ap = argparse.ArgumentParser()
ap.add_argument(
"-i",
"--image",
type=str,
required=True,
help="path to input image to apply template matching",
)
ap.add_argument(
"-t", "--template", type=str, required=True, help="path to template image"
)
ap.add_argument("-b", "--threshold", type=float, default=0.8, help="matching threshold")
args = vars(ap.parse_args())
print("Loading images")
image = cv2.imread(args["image"])
template = cv2.imread(args["template"])
# Template image spatial dimensions
(tH, tW) = template.shape[:2]
cv2.imshow("Image", image)
cv2.imshow("Template", template)
# Convert to grayscale
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
templateGray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
print("Perform template matching")
result = cv2.matchTemplate(imageGray, templateGray, cv2.TM_CCOEFF_NORMED)
(yCoords, xCoords) = np.where(result >= args["threshold"])
# Image to draw results
clone = image.copy()
print(f"{len(yCoords)} matched locations before NMS")
# Draw matched locations before non-maxima suppression
for (x, y) in zip(xCoords, yCoords):
# Draw bounding box
cv2.rectangle(clone, (x, y), (x + tW, y + tH), (255, 0, 0), 1)
cv2.imshow("Before NMS", clone)
# Initial list of rectangles
rects = []
# Loop over starting coordinates
for (x, y) in zip(xCoords, yCoords):
# np.append(rects, np.array([x,y, x+tW, y+tH]), axis=0)
rects.append((x, y, x + tW, y + tH))
rects = np.array(rects)
# Apply non-maxima suppression
pick = non_max_suppression_fast(rects, 0.2)
print(f"{len(pick)} matched locations after NMS")
# Loop over the final bounding boxes
for (startX, startY, endX, endY) in pick:
cv2.rectangle(image, (startX, startY), (endX, endY), (255, 0, 0), 1)
cv2.imshow("After NMS", image)
cv2.waitKey(0)
|
configfile: 'config_spocd1_pi.yaml'
bedtools = "/usr/local/Cellar/bedtools/2.27.1/bin/bedtools"
rule all:
input:
expand("Processed/v3/mapped/{sample}_unique_sorted_edited_TE.bed", sample = config["samples"]),
expand("Processed/v3/mapped/{sample}_unique_sorted_edited_nTE.bed", sample = config["samples"]),
expand("Processed/v3/mapped/{sample}_unique_sorted_edited_genes.bed", sample = config["samples"]),
expand("Processed/v3/mapped/{sample}_unique_sorted_edited_others.bed", sample = config["samples"])
rule annotate_TEs:
input:
"Processed/v3/mapped/{sample}_unique_sorted_edited.bed"
output:
"Processed/v3/mapped/{sample}_unique_sorted_edited_TE.bed"
params:
bedTE = config["bedTE"]
shell:
"""
bedtools intersect -a {input} -b {params.bedTE} > {output}
"""
rule annotate_nTEs:
input:
"Processed/v3/mapped/{sample}_unique_sorted_edited.bed"
output:
"Processed/v3/mapped/{sample}_unique_sorted_edited_nTE.bed"
params:
bedTE = config["bedTE"]
shell:
"""
bedtools intersect -a {input} -b {params.bedTE} -v > {output}
"""
rule annotate_genes:
input:
"Processed/v3/mapped/{sample}_unique_sorted_edited_nTE.bed"
output:
"Processed/v3/mapped/{sample}_unique_sorted_edited_genes.bed"
params:
bedGenes = config["bedGenes"]
shell:
"""
bedtools intersect -a {input} -b {params.bedGenes} > {output}
"""
rule annotate_others:
input:
"Processed/v3/mapped/{sample}_unique_sorted_edited_nTE.bed"
output:
"Processed/v3/mapped/{sample}_unique_sorted_edited_others.bed"
params:
bedGenes = config["bedGenes"]
shell:
"bedtools intersect -a {input} -b {params.bedGenes} -v > {output}" |
import torch
import argparse
device_ids = [0, 1]
device = torch.device('cuda:{}'.format(min(device_ids)) if torch.cuda.is_available() else 'cpu')
def parse(args):
# 1. arg parser
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=13) # 13
parser.add_argument('--port', type=str, default='2015')
parser.add_argument('--lr', type=float, default=1e-2) # 1e-2
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4) # 0.0001
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--vis_step', type=int, default=100)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--save_path', type=str, default='./saves')
parser.add_argument('--save_file_name', type=str, default='retina_res_50_coco') # FIXME
parser.add_argument('--conf_thres', type=float, default=0.05)
parser.add_argument('--start_epoch', type=int, default=0)
# FIXME choose your dataset root
# parser.add_argument('--data_root', type=str, default='D:\data\\voc')
# parser.add_argument('--data_root', type=str, default='D:\data\coco')
parser.add_argument('--data_root', type=str, default='/home/cvmlserver5/Sungmin/data/coco')
parser.add_argument('--img_path', type=str, default='/home/cvmlserver5/Sungmin/data/coco/images/val2017')
parser.add_argument('--data_type', type=str, default='coco', help='choose voc or coco') # FIXME
parser.add_argument('--num_classes', type=int, default=80)
parser.add_argument('--resize', type=int, default=600) # FIXME
parser.set_defaults(visualization=False)
parser.add_argument('--vis', dest='visualization', action='store_true')
opts = parser.parse_args(args)
return opts |
import csv
import os
import discord
from discord.ext import commands
# ----------------------------------------------------------------------------------------------
# Returns the ping of the bot, useful for testing bot lag and as a simple functionality command
# ----------------------------------------------------------------------------------------------
from src import cal, db
class Create(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='take', help='Create a create events from csv file.')
# @commands.dm_only()
@commands.has_role('Instructor')
async def take(self, ctx):
try:
''' run event creation interface '''
temp = 'data/events/' + str(ctx.message.guild.id)
if not os.path.exists(temp):
os.makedirs(temp)
await ctx.message.attachments[0].save(
temp + '/' + ctx.message.attachments[0].filename)
while True:
if os.path.exists(temp + '/' + ctx.message.attachments[0].filename):
break
if ctx.message.attachments[0].filename.endswith('.csv'):
if ctx.message.attachments[0].filename.startswith('exams'):
await self.read_exams(ctx)
if ctx.message.attachments[0].filename.startswith('assignments'):
await self.read_assignments(ctx)
except Exception as e:
print(e)
# if ctx.message.attachments[0].filename.startswith('ta_office_hours'):
# await event_creation.read_assignments(ctx)
@commands.command(name='eventcsv', help='Create a create events from csv file.')
@commands.has_role('Instructor')
async def get_event_sample_csv(self, ctx):
''' run event creation interface '''
await ctx.send(file=discord.File(r'data\sample_event_csv_files\exams.csv'))
await ctx.send(file=discord.File(r'data\sample_event_csv_files\assignments.csv'))
await ctx.send(file=discord.File(r'data\sample_event_csv_files\ta_office_hours.csv'))
async def read_exams(self, ctx):
temp = 'data/events/' + str(ctx.message.guild.id) + '/'
with open(temp + 'exams.csv', mode='r') as f:
reader = csv.reader(f, delimiter=',')
line_count = 0
for row in reader:
if line_count > 1:
print(f'Testing {", ".join(row)}')
db.mutation_query(
'INSERT INTO exams VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
[ctx.guild.id, row[0], row[1], row[2], row[3], row[4], row[5], row[6]]
)
line_count += 1
await ctx.send('File Submitted and Exams successfully created!')
for guild in self.bot.guilds:
if guild.id == ctx.guild.id:
for channel in guild.text_channels:
if channel.name == 'course-calendar':
await channel.delete()
channel = await guild.create_text_channel('course-calendar')
await cal.display_events(channel)
async def read_assignments(self, ctx):
temp = 'data/events/' + str(ctx.message.guild.id) + '/'
with open(temp + 'assignments.csv', mode='r') as f:
reader = csv.reader(f, delimiter=',')
line_count = 0
for row in reader:
if line_count > 1:
print(f'Testing {", ".join(row)}')
db.mutation_query(
'INSERT INTO assignments VALUES (?, ?, ?, ?, ?, ?, ?, ?)',
[ctx.guild.id, row[0], row[1], row[2], row[3], row[4], row[5]]
)
line_count += 1
await ctx.send('File Submitted and Assignments successfully created!')
for guild in self.bot.guilds:
if guild.id == ctx.guild.id:
for channel in guild.text_channels:
if channel.name == 'course-calendar':
await channel.delete()
channel = await guild.create_text_channel('course-calendar')
await cal.display_events(channel)
# -------------------------------------
# add the file to the bot's cog system
# -------------------------------------
def setup(bot):
bot.add_cog(Create(bot))
# Copyright (c) 2021 War-Keeper
|
import numpy as np
from scipysound import Sound
import phasevocoder
#TODO ease in playback adjustments
class SoundPlus(Sound):
def __init__(self, y, sr, chunk_size=2048):
self._init_offset = 0
super().__init__(y, sr, chunk_size)
def _init_stretching(self):
super()._init_stretching()
self._i1, self._i2 = self._init_offset, self._init_offset
# variables for smooth volume changes
self._volume = 1.0
self._volume_cur = 0.0
self._volume_orig = 0.0
self._volume_step = 0
self._volume_steps = 25
def _next_chunk(self):
# calculate adjustment factors
shift_factor = 2.0 ** (1.0*self.pitch_shift / 12.0)
adj_stretch_factor = self.stretch_factor / shift_factor
# apply playback modification
chunk = super()._time_stretcher(adj_stretch_factor)
if np.round(self.pitch_shift, 1) != 0:
chunk = phasevocoder.speedx(chunk, shift_factor)
# apply volume multiplier
chunk *= self._volume_cur
# exponentially adjust volume
# self.cur_volume = self.cur_volume + (self._volume - self._volume_cur) * 0.2
# sinusoidally adjust volume
if self._volume_step <= self._volume_steps:
self._volume_cur = self._ease_sinusoidal(orig = self._volume_orig,
target = self._volume,
step = self._volume_step,
max_steps = self._volume_steps)
self._volume_step += 1
return chunk
@property
def chunks(self):
""" Returns a chunk iterator over the sound. """
if not hasattr(self, '_it'):
class ChunkIterator(object):
def __iter__(iter):
return iter
def __next__(iter):
chunk = self._next_chunk()
return chunk
next = __next__
self._it = ChunkIterator()
return self._it
def _ease_sinusoidal(self, orig, target, step, max_steps):
adj = target - orig
progress = step / max_steps
cur_adj = adj * (1 - np.cos(progress * np.pi)) / 2.0
return orig + cur_adj
def navigate(self, offset):
self._init_offset = offset
self._init_stretching()
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
self._volume_orig = self._volume_cur
self._volume_step = 0
if __name__ == '__main__':
from aupyom import Sampler
import sys
sampler = Sampler(sr=44100)
sound = SoundPlus.from_file(sys.argv[1])
print("done loading sound")
sound.navigate(100000)
sampler.play(sound)
|
#import stuff
import sqlite3, string, random
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
from contextlib import closing
#configuration
DATABASE = '/tmp/loonus.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
#create app
app = Flask(__name__)
app.config.from_object(__name__)
#db functions
def connect_db():
return sqlite3.connect(app.config["DATABASE"])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select original, shorturl from entries order by id desc')
entries = [dict(original=row[0], shorturl='loon.us/'+row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
def shorturlcalc(originalurl):
letterarray = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(letterarray) for _ in range(6))
@app.route('/add', methods=['POST'])
def add_entry():
su = shorturlcalc(request.form['original'])
g.db.execute('insert into entries (original, shorturl) values (?, ?)',[request.form['original'], su])
g.db.commit()
flash('New tinyurl created')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run(debug=True) |
"""Defines the configuration to be loaded before running any experiment"""
from configobj import ConfigObj
import string
class Config(object):
def __init__(self, filename: string):
"""
Read from a config file
:param filename: name of the file to read from
"""
self.filename = filename
config = ConfigObj(self.filename)
self.config = config
# Comments on the experiments running
self.comment = config["comment"]
# Model name and location to store
self.model_path = config["train"]["model_path"]
# path to the model
self.pretrain_model_path = config["train"]["pretrain_model_path"]
# Normals
self.normals = config["train"].as_bool("normals")
# number of training examples
self.num_train = config["train"].as_int("num_train")
self.num_val = config["train"].as_int("num_val")
self.num_test = config["train"].as_int("num_test")
self.num_points = config["train"].as_int("num_points")
self.grid_size = config["train"].as_int("grid_size")
# Weight to the loss function for stretching
self.loss_weight = config["train"].as_float("loss_weight")
# dataset
self.dataset_path = config["train"]["dataset"]
# Proportion of train dataset to use
self.proportion = config["train"].as_float("proportion")
# Number of epochs to run during training
self.epochs = config["train"].as_int("num_epochs")
# batch size, based on the GPU memory
self.batch_size = config["train"].as_int("batch_size")
# Mode of training, 1: supervised, 2: RL
self.mode = config["train"].as_int("mode")
# Learning rate
self.lr = config["train"].as_float("lr")
# Number of epochs to wait before decaying the learning rate.
self.patience = config["train"].as_int("patience")
# Optimizer: RL training -> "sgd" or supervised training -> "adam"
self.optim = config["train"]["optim"]
# Epsilon for the RL training, not applicable in Supervised training
self.accum = config["train"].as_int("accum")
# Whether to schedule the learning rate or not
self.lr_sch = config["train"].as_bool("lr_sch")
def write_config(self, filename):
"""
Write the details of the experiment in the form of a config file.
This will be used to keep track of what experiments are running and
what parameters have been used.
:return:
"""
self.config.filename = filename
self.config.write()
def get_all_attribute(self):
"""
This function prints all the values of the attributes, just to cross
check whether all the data types are correct.
:return: Nothing, just printing
"""
for attr, value in self.__dict__.items():
print(attr, value)
if __name__ == "__main__":
file = Config("config_synthetic.yml")
print(file.write_config()) |
#!/usr/bin/env python3
from os import environ
from github import Github
gh_ref = environ['GITHUB_REPOSITORY']
gh_sha = environ['INPUT_SHA']
exit(1)
print('Getting status of %s @ %s...' % (gh_ref, gh_sha))
status = Github(environ['INPUT_TOKEN']
).get_repo(gh_ref).get_commit(sha=gh_sha).get_combined_status()
for item in status.statuses:
print('· %s: %s' % (item.context, item.state))
if status.state != 'success':
print('Status not successful. Skipping...')
|
from __future__ import annotations # define out of order or recursive
from typing import Any, Dict, List, Optional
from errors import LoxRuntimeError
from loxcallable import LoxCallable
from loxfunction import LoxFunction
from token_class import Token
class LoxClass(LoxCallable):
def __init__(
self, name: str, superclass: Optional[LoxClass], methods: Dict[str, LoxFunction]
):
self.name = name
self.methods = methods
self.superclass = superclass
def __repr__(self):
return self.name
def findMethod(self, name: str) -> Optional[LoxFunction]:
# python dict.get
if name in self.methods:
return self.methods.get(name)
if self.superclass is not None:
return self.superclass.findMethod(name)
return None
def call(self, interpreter, arguments: List[Any]) -> Any:
instance: LoxInstance = LoxInstance(self)
initializer: Optional[LoxFunction] = self.findMethod("init")
if initializer is not None:
initializer.bind(instance).call(interpreter, arguments)
return instance
def arity(self) -> int:
initializer: Optional[LoxFunction] = self.findMethod("init")
if initializer is None:
return 0
else:
return initializer.arity()
class LoxInstance:
def __init__(self, klass: LoxClass):
self.klass: LoxClass = klass
self.fields: Dict[str, Any] = dict()
def get(self, name: Token) -> Any:
if name.lexeme in self.fields:
return self.fields.get(name.lexeme)
method: Optional[LoxFunction] = self.klass.findMethod(name.lexeme)
if method is not None:
return method.bind(self)
raise LoxRuntimeError(name, "Undefined property '" + name.lexeme + "'.")
def set(self, name: Token, value: Any):
self.fields.update({name.lexeme: value})
def __repr__(self):
return self.klass.name + " instance"
|
include("$(MPY_DIR)/extmod/uasyncio/manifest.py")
freeze("$(MPY_DIR)/drivers/dht", "dht.py")
freeze("$(MPY_DIR)/drivers/lm75", "lm75.py")
freeze("$(MPY_DIR)/drivers/display", ("lcd160cr.py", "lcd160cr_test.py"))
freeze("$(MPY_DIR)/drivers/onewire", "onewire.py")
|
import pytest
import collections
from os import path
from shconfparser.reader import Reader
from shconfparser.shsplit import ShowSplit
from shconfparser.parser import Parser
class TestParser:
@pytest.fixture
def setup(self):
file_path = path.abspath('data/shcommands.txt')
p = Parser()
file_data = p.read(file_path)
p.split(file_data)
yield p
def test_data_parser(self, setup):
data = setup.s.shcmd_dict
assert 'version' in data
result = setup.parse_data(data['version'])
assert result != {}
assert 'R1 uptime is 10 minutes' in result
def test_tree_parser(self, setup):
data = setup.s.shcmd_dict
assert 'running' in data
result = setup.parse_tree(data['running'])
assert result != {}
assert 'line vty 0 4' in result
def test_table_parser(self, setup):
data = setup.s.shcmd_dict
assert 'cdp_neighbors' in data
header = ['Device ID', 'Local Intrfce', 'Holdtme', 'Capability', 'Platform', 'Port ID']
result = setup.parse_table(data['cdp_neighbors'], header)
assert result != []
assert type(result[0]) is dict
assert 'Device ID' in result[0]
assert 'R2' == result[0]['Device ID']
def test_table_parser_multiple_line(self, setup):
data = {'cdp_neighbors': ['R1#show cdp neighbors',
'Capability Codes: R - Router, T - Trans Bridge, B - Source Route Bridge',
'S - Switch, H - Host, I - IGMP, r - Repeater', '',
'Device ID Local Intrfce Holdtme Capability Platform Port ID',
'ajskdjfajfajlsfjabcdefgh',
' Fas 0/0 164 R S I 3725 Fas 0/0',
'R1#']}
assert 'cdp_neighbors' in data
header = ['Device ID', 'Local Intrfce', 'Holdtme', 'Capability', 'Platform', 'Port ID']
result = setup.parse_table(data['cdp_neighbors'], header)
assert result != []
assert type(result[0]) is dict
assert 'Device ID' in result[0]
assert '3725' == result[0]['Platform']
def test_table_parser_header_mismatch(self, setup):
data = setup.s.shcmd_dict
assert 'cdp_neighbors' in data
header = [' Device ID', 'Local Intrfce', 'Holdtme', 'Capability', 'Platform', 'Port ID']
result = setup.parse_table(data['cdp_neighbors'], header)
assert result == None
# TODO: need to check log message
def test_dump(self, setup):
data = setup.s.shcmd_dict
assert type(setup.dump(data)) is str
|
import numpy as np
import multiprocessing
import functools
import itertools
from count_table import NoisedCountTable, BlockResult, calculate_mean_and_aggregation_error, implicit_ae
"""
Python hdpview
"""
def run(block, epsilon, ratio, prng, alpha=2, beta=1.2, gamma=1.0, theta=None, verbose=False):
"""Run HDPView
1st phase, divide blocks.
2nd phase, perturbation.
Prepare parameters and execute HDPView
Args:
block (CountTable): block
epsilon (float): privacy budget
ratio (float): ubdget ratio of block division and perturbation, 0 to 1 value
prng (np.random.RandomState): random state
alpha (float), beta(float), gamma(float)
verbose (bool)
"""
seed = prng.randint(0, 2949672950)
block.set_random(seed)
if verbose:
print("seed: ", seed)
n_dash = block.size()
kappa = np.ceil(np.log2(n_dash)*beta)
epsilon_r = epsilon * ratio
epsilon_p = epsilon * (1 - ratio)
if theta is None:
theta = 1/epsilon_p
epsilon_cut = (1 - gamma) * epsilon_r / kappa
lamb = ((2 * alpha - 1)/(alpha - 1) + 1) * (2 / (gamma * epsilon_r))
delta = lamb*np.log(alpha)
# prepare shared memories for parallelization
manager = multiprocessing.Manager()
block_queue = manager.Queue()
block_queue.put(block)
block_result_list = []
MAX_PROCESS = multiprocessing.cpu_count()-1
pool = multiprocessing.Pool(MAX_PROCESS)
while True:
async_results = []
while not block_queue.empty():
result = pool.apply_async(
recursive_bisection, (block_queue.get(), block_queue, epsilon_cut, kappa, theta, lamb, delta, verbose)
)
async_results.append(result)
results = list(itertools.chain.from_iterable([ r.get() for r in async_results ]))
block_result_list.extend(results)
if block_queue.empty():
break
block_result_list.sort(key=functools.cmp_to_key(range__gt__))
for block_result in block_result_list:
mean, ae = calculate_mean_and_aggregation_error(block, block_result.domain_dict)
block_result.mean = mean
block_result.aggregation_error = ae
pe = prng.laplace(0.0, 1.0 / epsilon_p)
block_result.perturbation_error = pe
return NoisedCountTable.from_count_table(block, block_result_list), block_result_list
def recursive_bisection(block, block_queue, epsilon_cut, depth_max, theta, lamb, delta, verbose=False):
"""Random cut and random converge
Args:
block_queue (multiprocessing.Queue): Shared queue to store blocks to be executed
Returns:
[{"range": {int: (int,int)}, "mondrian_budget": float, "depth": int}]
"""
# Random cut
if verbose:
print('Before cut', block.domain_dict)
if block.depth > depth_max:
axis, index = cut_random(block)
else:
axis, index = cut_exp_mech(block, epsilon_cut)
if verbose:
print(axis, index)
left_block, right_block = block.split(axis, index)
# Random converge
converged_block_results = []
if left_block.size() == 1:
converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth))
elif random_converge(left_block, left_block.depth, theta, lamb, delta):
converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth))
else:
block_queue.put(left_block)
if right_block.size() == 1:
converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth))
elif random_converge(right_block, right_block.depth, theta, lamb, delta):
converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth))
else:
block_queue.put(right_block)
return converged_block_results
def range__gt__(block_result, other):
for dim, dom_range in block_result.domain_dict.items():
other_range = other.domain_dict[dim]
if dom_range > other_range:
return 1
elif dom_range < other_range:
return -1
return 0
def exp_mech(prng, eps , scores, targets, sensitivity):
"""Exponential mechanism
"""
if sensitivity == 0:
index = prng.choice(len(targets))
return targets[index]
np_scores = np.array(scores)
score_max = np.max(np_scores)
weights = np.exp((eps*(np_scores - score_max)) / (2*sensitivity))
total_weight = np.sum(weights)
cum_weights = np.cumsum(weights) / total_weight
index = prng.rand()
return targets[cum_weights.searchsorted(index)]
def cut_random(block):
dim_list = list(block.cardinality_dict.keys())
while True:
d = block.prng.choice(dim_list)
cardinality = block.cardinality_dict[d]
if cardinality > 1:
break
i = block.prng.choice(cardinality-1)
return (d, i)
# left AE + right AE
def cut_exp_mech(block, epsilon_cut_per_depth):
scores = []
targets = []
for d in block.index:
cardinality = block.cardinality_dict[d]
for i in np.arange(cardinality-1):
scores.append(- np.sum(block.split_values(d, i)))
targets.append((d, i))
return exp_mech(
block.prng,
epsilon_cut_per_depth,
scores,
targets,
2*(2 - 2/block.size())
)
def random_converge(block, depth, theta, lamb, delta):
ae = implicit_ae(block.values(), block.zero_num())
b = ae - (delta*depth)
b = max(b, (theta + 2 - delta))
noise = block.prng.laplace(0.0, scale=lamb)
noisy_b = b + noise
return noisy_b <= theta
|
def two_point_crossover(parent_1, parent_2, crossover_point_1, crossover_point_2):
mask = ['0' for _ in range(len(parent_1))]
for i in range(crossover_point_1, crossover_point_2 + 1):
mask[i] = '1'
crossover_mask = ''.join(mask)
offspring_1 = []
offspring_2 = []
for parent_1_dna, parent_2_dna, mask_value in zip(parent_1, parent_2, crossover_mask):
if mask_value == '1':
offspring_1.append(parent_1_dna)
offspring_2.append(parent_2_dna)
else:
offspring_1.append(parent_2_dna)
offspring_2.append(parent_1_dna)
return [''.join(offspring_1), ''.join(offspring_2)]
parent1 = '11111111111'
parent2 = '00000000000'
offsprings = two_point_crossover(parent1, parent2, 2, 5)
print("\n Two point crossover")
print("Parent 1 : " + parent1)
print("Parent 2 : " + parent2)
print("Offspring 1 : " + offsprings[0])
print("Offspring 2 : " + offsprings[1])
|
"""
From mitbbs for Facebook
"""
def count_zero_for_factorial(n):
i = 1
count = 0
while i <= n:
num = i
while num % 5 == 0:
count += 1
num /= 5
i += 1
return count
def fact(n):
if n == 1:
return n
return n * fact(n-1)
N = 51
print count_zero_for_factorial(N)
print fact(N)
|
# -*- coding: utf-8 -*-
from odoo import http
# class CowinSettings(http.Controller):
# @http.route('/cowin_settings/cowin_settings/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/cowin_settings/cowin_settings/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('cowin_settings.listing', {
# 'root': '/cowin_settings/cowin_settings',
# 'objects': http.request.env['cowin_settings.cowin_settings'].search([]),
# })
# @http.route('/cowin_settings/cowin_settings/objects/<model("cowin_settings.cowin_settings"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('cowin_settings.object', {
# 'object': obj
# }) |
import os
from conans import ConanFile, tools
class ExprtkConan(ConanFile):
name = "exprtk"
version = "20181202"
description = "ExprTk is a simple to use, easy to integrate and extremely efficient run-time mathematical expression parser and evaluation engine"
topics = ("conan", "exprtk", "math-expressions", "parser")
url = "https://github.com/kylemacfarlan/conan-exprtk"
homepage = "https://github.com/ArashPartow/exprtk"
author = "Kyle Macfarlan <[email protected]>"
license = "MIT"
no_copy_source = True
_source_subfolder = "source_subfolder"
def source(self):
download_url = "https://github.com/ArashPartow/exprtk"
commit_id = "88acc921e28d9e80b0c61663257bd8ff7997bcb8"
sha256 = "430829a20b469cb584d75815cee2c693dda2feac6e63c407d17029d5cf5e26e9"
tools.get("{}/archive/{}.zip".format(download_url, commit_id), sha256=sha256)
os.rename("{}-{}".format(self.name, commit_id), self._source_subfolder)
def _extract_license(self, file):
file_content = tools.load(file)
expect = "MIT *"
license_contents = file_content[2:file_content.find(expect) + len(expect)]
tools.save(os.path.join(self.package_folder, "licenses", "LICENSE"), license_contents)
def package(self):
header_file = "exprtk.hpp"
self._extract_license(os.path.join(self.source_folder, self._source_subfolder, header_file))
self.copy(pattern=header_file, dst="include", src=self._source_subfolder)
def package_id(self):
self.info.header_only()
|
from datetime import datetime
import pytest
from loguru import logger
import app.internal.google_connect as google_connect
from app.routers.event import create_event
from app.database.models import OAuthCredentials
from app.routers.user import create_user
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from googleapiclient.http import HttpMock
@pytest.fixture
def google_events_mock():
return [
{
"kind": "calendar#event",
"etag": "somecode",
"id": "somecode",
"status": "confirmed",
"htmlLink": "https://www.google.com/calendar/event?eid=somecode",
"created": "2021-01-13T09:10:02.000Z",
"updated": "2021-01-13T09:10:02.388Z",
"summary": "some title",
"creator": {
"email": "someemail",
"self": True
},
"organizer": {
"email": "someemail",
"self": True
},
"start": {
"dateTime": "2021-02-25T13:00:00+02:00"
},
"end": {
"dateTime": "2021-02-25T14:00:00+02:00"
},
"iCalUID": "somecode",
"sequence": 0,
"reminders": {
"useDefault": True
}
},
{
"kind": "calendar#event",
"etag": "somecode",
"id": "somecode",
"status": "confirmed",
"htmlLink": "https://www.google.com/calendar/event?eid=somecode",
"created": "2021-01-13T09:10:02.000Z",
"updated": "2021-01-13T09:10:02.388Z",
"summary": "some title to all day event",
"creator": {
"email": "someemail",
"self": True
},
"organizer": {
"email": "someemail",
"self": True
},
"start": {
"date": "2021-02-25"
},
"end": {
"date": "2021-02-25"
},
"iCalUID": "somecode",
"sequence": 0,
"location": 'somelocation',
"reminders": {
"useDefault": True
}
}
]
@pytest.fixture
def credentials():
cred = Credentials(
token="somecode",
refresh_token="somecode",
token_uri="some_uri",
client_id="somecode",
client_secret="some_secret",
expiry=datetime(2021, 1, 28)
)
return cred
@pytest.mark.usefixtures("user", "session", "google_events_mock")
def test_push_events_to_db(google_events_mock, user, session):
assert google_connect.push_events_to_db(google_events_mock, user, session)
@pytest.mark.usefixtures("user", "session", "google_events_mock")
def test_db_cleanup(google_events_mock, user, session):
for event in google_events_mock:
location = None
title = event['summary']
# support for all day events
if 'dateTime' in event['start'].keys():
# part time event
start = datetime.fromisoformat(event['start']['dateTime'])
end = datetime.fromisoformat(event['end']['dateTime'])
else:
# all day event
start = event['start']['date'].split('-')
start = datetime(
year=int(start[0]),
month=int(start[1]),
day=int(start[2])
)
end = event['end']['date'].split('-')
end = datetime(
year=int(end[0]),
month=int(end[1]),
day=int(end[2])
)
if 'location' in event.keys():
location = event['location']
create_event(
db=session,
title=title,
start=start,
end=end,
owner_id=user.id,
location=location,
is_google_event=True
)
assert google_connect.cleanup_user_google_calendar_events(
user, session)
@pytest.mark.usefixtures("session")
def test_get_credentials_from_db(session):
user = create_user(session=session,
username='new_test_username',
password='new_test_password',
email='[email protected]',
language_id=1)
credentials = OAuthCredentials(
owner=user,
token="somecode",
refresh_token="somecode",
token_uri="some_uri",
client_id="somecode",
client_secret="some_secret",
expiry=datetime(2021, 2, 22)
)
session.add(credentials)
session.commit()
assert user.oauth_credentials is not None
session.close()
return_val = google_connect.get_credentials_from_db(user)
assert return_val
@pytest.mark.usefixtures("session", "user", "credentials")
def test_refresh_token(mocker, session, user, credentials):
mocker.patch(
'google.oauth2.credentials.Credentials.refresh',
return_value=logger.debug('refreshed')
)
assert google_connect.refresh_token(credentials, user, session)
mocker.patch(
'google.oauth2.credentials.Credentials.expired',
return_value=False
)
assert google_connect.refresh_token(credentials, user, session)
@pytest.mark.usefixtures("session", "user", "credentials")
def test_get_current_year_events(mocker, user, session, credentials):
class mock_events:
def __init__(self, service):
self.service = service
def list(self, *args):
request = self.service.events().list(calendarId='primary',
timeMin=datetime(
2021, 1, 1).isoformat(),
timeMax=datetime(
2022, 1, 1).isoformat(),
singleEvents=True,
orderBy='startTime'
)
http = HttpMock(
'calendar-linux.json',
{'status': '200'}
)
response = request.execute(http=http)
return response
http = HttpMock(
'./tests/calendar-discovery.json',
{'status': '200'}
)
service = build('calendar', 'v3', http=http)
mocker.patch(
'googleapiclient.discovery.build',
return_value=service,
events=service
)
mocker.patch(
'googleapiclient.discovery.Resource',
events=mock_events(service)
)
assert google_connect.get_current_year_events(credentials, user, session)
@pytest.mark.usefixtures("user", "session",
"google_connect_test_client", "credentials")
def test_google_sync(mocker, google_connect_test_client, session, credentials):
create_user(session=session,
username='new_test_username',
password='new_test_password',
email='[email protected]',
language_id=1)
mocker.patch(
'app.routers.google_connect.get_credentials',
return_value=credentials
)
mocker.patch(
'app.routers.google_connect.fetch_save_events',
return_value=None
)
connect = google_connect_test_client.get(
'google/sync',
headers={
"referer": 'http://testserver/'
})
assert connect.ok
# second case
mocker.patch(
'app.routers.google_connect.get_credentials',
return_value=None
)
connect = google_connect_test_client.get(
'google/sync',
headers={
"referer": 'http://testserver/'
})
assert connect.ok
def test_is_client_secret_none():
answer = google_connect.is_client_secret_none()
assert answer is not None
@pytest.mark.usefixtures("session")
def test_clean_up_old_credentials_from_db(session):
google_connect.clean_up_old_credentials_from_db(session)
assert len(session.query(OAuthCredentials)
.filter_by(user_id=None).all()) == 0
@pytest.mark.usefixtures("session", 'user', 'credentials')
def test_get_credentials_from_consent_screen(mocker, session,
user, credentials):
mocker.patch(
'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file',
return_value=mocker.Mock(name='flow', **{
"credentials": credentials,
"run_local_server": mocker.Mock(name='run_local_server',
return_value=logger.debug(
'running server'))
})
)
mocker.patch(
'app.internal.google_connect.is_client_secret_none',
return_value=False
)
assert google_connect.get_credentials_from_consent_screen(
user, session) == credentials
@pytest.mark.usefixtures("session")
def test_create_google_event(session):
user = create_user(session=session,
username='new_test_username',
password='new_test_password',
email='[email protected]',
language_id=1)
event = google_connect.create_google_event(
'title',
datetime(2021, 1, 1, 15, 15),
datetime(2021, 1, 1, 15, 30),
user,
'location',
session
)
assert event.title == 'title'
@pytest.mark.usefixtures("session", "user", 'credentials')
def test_get_credentials(mocker, session, user, credentials):
user = create_user(
session=session,
username='new_test_username',
password='new_test_password',
email='[email protected]',
language_id=1
)
mocker.patch(
'app.internal.google_connect.get_credentials_from_consent_screen',
return_value=credentials
)
assert google_connect.get_credentials(user=user,
session=session) == credentials
mocker.patch(
'app.internal.google_connect.get_credentials',
return_value=credentials
)
mocker.patch(
'app.internal.google_connect.refresh_token',
return_value=credentials
)
assert google_connect.get_credentials(user=user,
session=session) == credentials
@pytest.mark.usefixtures("session", "user",
'credentials', 'google_events_mock')
def test_fetch_save_events(mocker, session, user, credentials,
google_events_mock):
mocker.patch(
'app.internal.google_connect.get_current_year_events',
return_value=google_events_mock
)
assert google_connect.fetch_save_events(credentials,
user, session) is None
@pytest.mark.usefixtures("session", "user", 'credentials')
def test_push_credentials_to_db(session, user, credentials):
assert google_connect.push_credentials_to_db(credentials, user, session)
|
import threading
from threading import Thread, Semaphore, Lock
import random
from time import sleep
#configurable variables
STASH = 25
BUCKET_SIZE = 5
NUM_FROLFERS = 5
#Locking Structures
stashLock = Lock()
fieldLock = Lock()
stashEmpty = Semaphore(0)
stashFull = Semaphore(0)
#other global vars
discs_on_field = 0
rng = random.Random()
rng.seed(50)
#aux functions
def delimiter():
print("#################################################################")
def frolfer(thread_id):
global STASH, BUCKET_SIZE, NUM_FROLFERS
global discs_on_field
global rng
global stashLock, fieldLock, stashEmpty, stashFull
bucket = 0
while True:
while bucket == 0:
stashLock.acquire()
print ("Frolfer", thread_id, "calling for a bucket")
if STASH < BUCKET_SIZE:
stashEmpty.release() # stash is empty. Signal cart
stashFull.acquire() # wait for stash to be full
if STASH < BUCKET_SIZE: # if cart STILL didn't bring enough discs
stashLock.release()
continue # go back to top of while bucket == 0 loop
if STASH >= BUCKET_SIZE:
STASH -= BUCKET_SIZE # acquire a bucket
bucket += BUCKET_SIZE
print ("Frolfer", thread_id, "got", bucket, "discs; Stash =", STASH)
stashLock.release()
for i in range(0, bucket):
fieldLock.acquire()
discs_on_field += 1
print ("Frolfer", thread_id, "threw disc", i)
fieldLock.release()
sleep(rng.random() * 5)
bucket = 0
def cart():
global STASH, BUCKET_SIZE, NUM_FROLFERS
global discs_on_field
global rng
global stashLock, fieldLock, stashEmpty, stashFull
while True:
stashEmpty.acquire() # block until stash is empty
fieldLock.acquire()
sleep(rng.random() * 2)
delimiter()
initial_stash = STASH
discs_collected = discs_on_field
print("Stash =", initial_stash,"; Cart entering field")
STASH += discs_on_field
discs_on_field = 0
print("Cart done, gathered", discs_collected, "dics; Stash = ", STASH)
delimiter()
fieldLock.release()
stashFull.release() # signal frolfers that are waiting on the stash to release
sleep(rng.random() * 5)
def main():
cart_t = Thread(target = cart)
cart_t.start()
for i in range(NUM_FROLFERS):
frolfer_t = Thread(target=frolfer, args=[i])
frolfer_t.start()
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-28 20:27
from __future__ import unicode_literals
from django.db import migrations
def forward(apps, schema_editor):
Area = apps.get_model('guides','Area')
for area in [
'ART: Applications and Real-Time',
'INT: Internet',
'OPS: Operations and Management',
'RTG: Routing',
'SEC: Security',
'TSG: Transport',
"UNKNOWN: I don't know yet",
]:
Area.objects.create(area=area)
def reverse(apps, schema_editor):
Area = apps.get_model('guides','Area')
Area.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('guides', '0006_make_area_a_class'),
]
operations = [
migrations.RunPython(forward, reverse)
]
|
#!/usr/bin/python
# Classification (U)
"""Program: initate_dump.py
Description: Integration testing of initate_dump in elastic_db_dump.py.
Usage:
test/integration/elastic_db_dump/initate_dump.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
import shutil
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import elastic_db_dump
import lib.gen_libs as gen_libs
import elastic_lib.elastic_class as elastic_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Unit testing initilization.
test_i_option_multi_db -> Test database with multiple database names.
test_i_option_one_db -> Test database with one database name.
test_i_option_missing_db -> Test database with incorrect database name.
test_no_i_option -> Test database dump with no -i option.
test_initate_dump -> Test datbase dump is created.
tearDown -> Clean up of integration testing.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.base_dir = "test/integration/elastic_db_dump"
self.test_path = os.path.join(os.getcwd(), self.base_dir)
self.config_path = os.path.join(self.test_path, "config")
self.cfg = gen_libs.load_module("elastic", self.config_path)
self.args_array = {}
self.phy_repo_dir = os.path.join(self.cfg.phy_repo_dir,
self.cfg.repo_name)
self.elr = elastic_class.ElasticSearchRepo(self.cfg.host,
self.cfg.port)
if self.elr.repo_dict:
print("ERROR: Test environment not clean - repositories exist.")
self.skipTest("Pre-conditions not met.")
else:
_, _ = self.elr.create_repo(
self.cfg.repo_name, os.path.join(self.cfg.repo_dir,
self.cfg.repo_name))
self.els = elastic_class.ElasticSearchDump(
self.cfg.host, self.cfg.port, repo=self.cfg.repo_name)
def test_i_option_multi_db(self):
"""Function: test_i_option_multi_db
Description: Test database with multiple database names.
Arguments:
"""
# Capture 2 databases/indices name in Elasticsearch.
dbs = [str(y[2]) for y in [
x.split() for x in self.els.els.cat.indices().splitlines()]][0:2]
self.args_array = {"-i": dbs}
elastic_db_dump.initate_dump(self.els, args_array=self.args_array)
dir_path = os.path.join(self.cfg.phy_repo_dir, self.cfg.repo_name,
"indices")
# Count number of databases/indices dumped to repository.
cnt = len([name for name in os.listdir(dir_path)
if os.path.isdir(os.path.join(dir_path, name))])
self.assertEqual(cnt, 2)
def test_i_option_one_db(self):
"""Function: test_i_option_one_db
Description: Test database with one database name.
Arguments:
"""
# Capture the first database/indice name in Elasticsearch.
dbs = [str([x.split()
for x in self.els.els.cat.indices().splitlines()][0][2])]
self.args_array = {"-i": dbs}
elastic_db_dump.initate_dump(self.els, args_array=self.args_array)
dir_path = os.path.join(self.cfg.phy_repo_dir, self.cfg.repo_name,
"indices")
# Count number of databases/indices dumped to repository.
cnt = len([name for name in os.listdir(dir_path)
if os.path.isdir(os.path.join(dir_path, name))])
self.assertEqual(cnt, 1)
def test_i_option_missing_db(self):
"""Function: test_i_option_missing_db
Description: Test database with incorrect database name.
Arguments:
"""
self.args_array = {"-i": ["Incorrect_Database_Name"]}
elastic_db_dump.initate_dump(self.els, args_array=self.args_array)
# If index dump directory exists, then test is a failure.
self.assertFalse(
os.path.isdir(os.path.join(self.cfg.repo_dir, "indices")))
def test_no_i_option(self):
"""Function: test_no_i_option
Description: Test database dump with no -i option.
Arguments:
"""
elastic_db_dump.initate_dump(self.els, args_array=self.args_array)
self.assertTrue(self.els.dump_list)
def test_initate_dump(self):
"""Function: test_initate_dump
Description: Test datbase dump is created.
Arguments:
"""
elastic_db_dump.initate_dump(self.els, args_array=self.args_array)
self.assertTrue(self.els.dump_list)
def tearDown(self):
"""Function: tearDown
Description: Clean up of integration testing.
Arguments:
"""
err_flag, status_msg = self.elr.delete_repo(self.cfg.repo_name)
if err_flag:
print("Error: Failed to remove repository '%s'"
% self.cfg.repo_name)
print("Reason: '%s'" % (status_msg))
if os.path.isdir(self.phy_repo_dir):
shutil.rmtree(self.phy_repo_dir)
if __name__ == "__main__":
unittest.main()
|
class Solution:
def findLatestStep(self, arr: List[int], m: int) -> int:
n = len(arr)
parent, size, result, bitsArray, count = [x for x in range(n)], [1] * n, -1, [0] * n, Counter()
def find(x):
if parent[x] == x:
return x
else:
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
xParent, yParent = find(x), find(y)
if xParent == yParent:
return
if size[xParent] < size[yParent]:
xParent, yParent = yParent, xParent
parent[yParent] = xParent
size[xParent] += size[yParent]
size[yParent] = size[xParent]
def getSize(x):
return size[find(x)]
for step, index in enumerate(arr, start = 1):
index -= 1
bitsArray[index], currentSize = 1, 1
if index - 1 >= 0 and bitsArray[index - 1] == 1:
leftSize = getSize(index - 1)
union(index, index - 1)
currentSize += leftSize
count[leftSize] -= 1
if index + 1 < n and bitsArray[index + 1] == 1:
rightSize = getSize(index + 1)
union(index, index + 1)
currentSize += rightSize
count[rightSize] -= 1
count[currentSize] += 1
if count[m] > 0:
result = step
return result |
# -*- coding: utf-8 -*-
"""
Solace
======
*a multilingual support system*
Solace is a multilingual support system developed at Plurk
for end user support. The application design is heavily
influenced by bulletin boards like phpBB and the new
stackoverflow programming community site.
For more information consult the `README` file or have a
look at the `website <http://opensource.plurk.com/solace/>`_.
"""
# we require setuptools because of dependencies and testing.
# we may provide a distutils fallback later.
from setuptools import setup
extra = {}
try:
import babel
except ImportError:
pass
else:
extra['message_extractors'] = {
'solace': [
('**.py', 'python', None),
('**/templates/**', 'jinja2', None),
('**.js', 'javascript', None)
]
}
try:
from solace import scripts
except ImportError:
pass
else:
extra['cmdclass'] = {
'runserver': scripts.RunserverCommand,
'initdb': scripts.InitDatabaseCommand,
'reset': scripts.ResetDatabase,
'make_testdata': scripts.MakeTestData,
'compile_catalog': scripts.CompileCatalogEx
}
try:
import webdepcompress
except ImportError:
pass
else:
extra['webdepcompress_manager'] = 'solace.packs.pack_mgr'
setup(
name='Plurk_Solace',
version='0.1',
url='http://opensource.plurk.com/solace/',
license='BSD',
author='Plurk Inc.',
author_email='[email protected]',
description='Multilangual User Support Platform',
long_description=__doc__,
packages=['solace', 'solace.views', 'solace.i18n', 'solace.utils'],
zip_safe=False,
platforms='any',
test_suite='solace.tests.suite',
install_requires=[
'Werkzeug>=0.5.1',
'Jinja2',
'Babel',
'SQLAlchemy>=0.5.5',
'creoleparser',
'simplejson',
'webdepcompress',
'translitcodec'
],
tests_require=[
'lxml',
'html5lib'
], **extra
)
|
from spaceone.core.service import *
from spaceone.identity.manager.endpoint_manager import EndpointManager
@authentication_handler
@authorization_handler
@mutation_handler
@event_handler
class EndpointService(BaseService):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.endpoint_mgr: EndpointManager = self.locator.get_manager('EndpointManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@append_query_filter(['service'])
@append_keyword_filter(['service'])
def list(self, params):
"""
Args:
params (dict): {
'query': 'dict (spaceone.api.core.v1.Query)',
'service': 'str'
}
Returns:
results (list): list of endpoint_vo
total_count (int)
"""
return self.endpoint_mgr.list_endpoints(params.get('query', {}))
|
#!/usr/bin/env python3
import os
import sys
import textwrap
self_path = os.path.dirname(os.path.realpath(__file__));
f = open(self_path + "/unicode/CaseFolding.txt", "r")
status_list = [ "C", "F" ]
folding_list = [ dict(), dict(), dict() ]
# Filter the foldings for "full" folding.
for line in f:
comment_off = line.find("#")
if comment_off >= 0:
line = line[:comment_off]
line = line.strip()
if not line:
continue
raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3)
if not status.strip() in status_list:
continue
codepoint = int(raw_codepoint.strip(), 16)
mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")]
mapping_len = len(mapping)
if mapping_len in range(1, 4):
folding_list[mapping_len-1][codepoint] = mapping
else:
assert(False)
f.close()
# If we assume that (index0 ... index-1) makes a range (as defined below),
# check that the newly provided index is compatible with the range too; i.e.
# verify that the range can be extended without breaking its properties.
#
# Currently, we can handle ranges which:
#
# (1) either form consecutive sequence of codepoints and which map that range
# to other consecutive range of codepoints (of the same length);
#
# (2) or a consecutive sequence of codepoints with step 2 where each codepoint
# CP is mapped to the codepoint CP+1
# (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...).
#
# Note: When the codepoints in the range are mapped to multiple codepoints,
# only the 1st mapped codepoint is considered. All the other ones have to be
# shared by all the mappings covered by the range.
def is_range_compatible(folding, codepoint_list, index0, index):
N = index - index0
codepoint0 = codepoint_list[index0]
codepoint1 = codepoint_list[index0+1]
codepointN = codepoint_list[index]
mapping0 = folding[codepoint0]
mapping1 = folding[codepoint1]
mappingN = folding[codepointN]
# Check the range type (1):
if codepoint1 - codepoint0 == 1 and codepointN - codepoint0 == N \
and mapping1[0] - mapping0[0] == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - mapping0[0] == N and mappingN[1:] == mapping0[1:]:
return True
# Check the range type (2):
if codepoint1 - codepoint0 == 2 and codepointN - codepoint0 == 2 * N \
and mapping0[0] - codepoint0 == 1 \
and mapping1[0] - codepoint1 == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - codepointN == 1 and mappingN[1:] == mapping0[1:]:
return True
return False
def mapping_str(list, mapping):
return ",".join("0x{:04x}".format(x) for x in mapping)
for mapping_len in range(1, 4):
folding = folding_list[mapping_len-1]
codepoint_list = list(folding)
index0 = 0
count = len(folding)
records = list()
data_records = list()
while index0 < count:
index1 = index0 + 1
while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1):
index1 += 1
if index1 - index0 > 2:
# Range of codepoints
records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]]))
index0 = index1
else:
# Single codepoint
records.append("S(0x{:04x})".format(codepoint_list[index0]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
index0 += 1
sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
|
#TODO add offline mode
from asciimatics.event import KeyboardEvent
from asciimatics.widgets import *
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, StopApplication, NextScene
import sys
import os
try:
import magic
except ImportError:
pass
from gui.bar import *
from gui.mainplaylist import *
from gui.browser import *
from gui.clock import *
from gui.equalizer import *
from gui.playlists import *
from gui.visualization import *
from gui.medialib import *
from gui.artistInfo import *
from gui.lyrics import *
from player import Player
from gui.presenter import *
from gui.search import *
from lastfm_client import *
from lyricsWiki import *
from soundcloud_client import SoundcloudClient
from db import *
SCR = 1
f = open('config', 'rb')
data = f.read().decode('utf-8')
config = json.loads(data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
import pathlib
pathlib.Path(config.cash_folder).mkdir(parents=True, exist_ok=True)
pathlib.Path(config.playlist_folder).mkdir(parents=True, exist_ok=True)
if config.useInternet:
canConnectToSC = True
try:
sc = SoundcloudClient(
config.sound_cloud.client_id,
config.sound_cloud.client_secret,
config.sound_cloud.username,
config.sound_cloud.password,
config.sound_cloud.bpm,
config.sound_cloud.search_pages)
except:
canConnectToSC = False
if config.useInternet:
lastfm = Lastfm(config.lastfm.apikey, config.lastfm.lang)
lyricsWiki = LyricsWiki()
upBar = Bar()
upBar.parse(config, UP_BAR)
downBar = Bar()
downBar.parse(config, DOWN_BAR)
player = Player(config)
presenter = Presenter(config)
presenter.setPlayer(player)
if config.useInternet:
presenter.setSoundCloud(sc)
presenter.setLastfm(lastfm)
presenter.setLyricsWiki(lyricsWiki)
lastfm.setPresenter(presenter)
db = Database()
db.PATH = config.root_dir
presenter.setDb(db)
def init(screen, old_scene):
if config.useInternet:
sc.setPresenter(presenter)
browser = BrowserFrame(screen, upBar, downBar, config)
browser.setPresenter(presenter)
medialib = MedialibFrame(screen, upBar, downBar, config)
medialib.setPresenter(presenter)
playlists = PlaylistsFrame(screen, upBar, downBar, config)
playlists.setPresenter(presenter)
equalizer = EqualizerFrame(screen, upBar, downBar, config)
equalizer.setPresenter(presenter)
viz = VisualizationFrame(screen, upBar, downBar, config)
viz.setPresenter(presenter)
clock = ClockFrame(screen, upBar, downBar, config)
clock.setPresenter(presenter)
if config.useInternet:
artistinfo = ArtistInfoFrame(screen, upBar, downBar, config)
artistinfo.setPresenter(presenter)
if config.useInternet:
lyrics = LyricsFrame(screen, upBar, downBar, config)
lyrics.setPresenter(presenter)
search = SearchFrame(screen, upBar, downBar, config)
search.setPresenter(presenter)
mainplaylist = MainPlaylistFrame(screen, upBar, downBar, config)
mainplaylist.setPresenter(presenter)
presenter.setBrowser(browser)
presenter.setMainPlaylist(mainplaylist)
presenter.setPlaylists(playlists)
presenter.setEqualizer(equalizer)
presenter.setClock(clock)
presenter.setUpBar(upBar)
presenter.setDownBar(downBar)
presenter.setVisualization(viz)
presenter.setMedialib(medialib)
if config.useInternet:
presenter.setArtistInfo(artistinfo)
presenter.setLyrics(lyrics)
presenter.setSearch(search)
player.setPresenter(presenter)
presenter.run()
screens = [Scene([mainplaylist], -1, name="MainPlaylist"),
Scene([browser], -1, name="Browser"),
Scene([medialib], -1, name="Medialib"),
Scene([playlists], -1, name="Playlists"),
Scene([equalizer], -1, name="Equalizer"),
Scene([viz], -1, name="Visualizer")]
if config.useInternet:
screens.append(Scene([artistinfo], -1, name="ArtistInfo"))
screens.append(Scene([lyrics], -1, name="Lyrics"))
screens.append(Scene([clock], -1, name="Clock"))
screens.append(Scene([search], -1, name="Search"))
screen.play(screens,
stop_on_resize=True, start_scene=old_scene)
def openFile(fname):
path = config.cash_folder + "/cash.json" if config.cash_folder[len(config.cash_folder)-1] != "/" else "cash.json"
playlist = loadPlaylist(path)
tag = getTagFromPath(fname)
tag.id = 0
for t in playlist:
t.id += 1
playlist = [tag] + playlist
savePlaylist(playlist, path)
player.playlist = playlist
#player.play()
def printHelp():
from gui.dialog_info import (CONTROL_INFO, CLOCK_INFO, PLAYER_CONTROL_INFO,
MAINPLAYLIST_INFO, PLAYLISTS_INFO, BROWSER_INFO, EQUALIZER_INFO,
MEDIALIB_INFO, SEARCH_INFO, VIZUALIZER_INFO)
text = "-db - create db (need delete old db)\n"+\
"-h --help - print help\n" + CONTROL_INFO + "\n"+ CLOCK_INFO + "\n"+ PLAYER_CONTROL_INFO + "\n"+\
MAINPLAYLIST_INFO + "\n"+ PLAYLISTS_INFO + "\n"+ BROWSER_INFO + "\n"+ EQUALIZER_INFO + "\n"+\
MEDIALIB_INFO + "\n"+ SEARCH_INFO + "\n"+ VIZUALIZER_INFO + "\n"
print(text)
def createDb():
#TODO delete old db if exist
db.walk()
def argParse():
lenargs = len(sys.argv)
if lenargs == 2 and (sys.argv[1] != "-h" and sys.argv[1] != "--help" and
sys.argv[1] != "-db"):
#TODO format test
openFile(sys.argv[1])
elif lenargs == 2 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
printHelp()
sys.exit()
elif lenargs == 2 and sys.argv[1] == "-db":
createDb()
sys.exit()
argParse()
last_scene = None
while True:
try:
Screen.wrapper(init, catch_interrupt=False, arguments=[last_scene])
path = config.cash_folder + "/cash.json" if config.cash_folder[len(config.cash_folder)-1] != "/" else "cash.json"
savePlaylist(player.playlist, path)
player.destructor()
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene
|
"""
A simple library of functions that provide feature importances
for scikit-learn random forest regressors and classifiers.
MIT License
Terence Parr, http://parrt.cs.usfca.edu
Kerem Turgutlu, https://www.linkedin.com/in/kerem-turgutlu-12906b65
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble import forest
from sklearn.model_selection import cross_val_score
from sklearn.base import clone
from sklearn.metrics import r2_score
from scipy.stats import spearmanr
from pandas.api.types import is_numeric_dtype
from matplotlib.colors import ListedColormap
from copy import copy
import warnings
def importances(model, X_valid, y_valid, features=None, n_samples=5000, sort=True, metric=None, sample_weights = None):
"""
Compute permutation feature importances for scikit-learn models using
a validation set.
Given a Classifier or Regressor in model
and validation X and y data, return a data frame with columns
Feature and Importance sorted in reverse order by importance.
The validation data is needed to compute model performance
measures (accuracy or R^2). The model is not retrained.
You can pass in a list with a subset of features interesting to you.
All unmentioned features will be grouped together into a single meta-feature
on the graph. You can also pass in a list that has sublists like:
[['latitude', 'longitude'], 'price', 'bedrooms']. Each string or sublist
will be permuted together as a feature or meta-feature; the drop in
overall accuracy of the model is the relative importance.
The model.score() method is called to measure accuracy drops.
This version that computes accuracy drops with the validation set
is much faster than the OOB, cross validation, or drop column
versions. The OOB version is a less vectorized because it needs to dig
into the trees to get out of examples. The cross validation and drop column
versions need to do retraining and are necessarily much slower.
This function used OOB not validation sets in 1.0.5; switched to faster
test set version for 1.0.6. (breaking API change)
:param model: The scikit model fit to training data
:param X_valid: Data frame with feature vectors of the validation set
:param y_valid: Series with target variable of validation set
:param features: The list of features to show in importance graph.
These can be strings (column names) or lists of column
names. E.g., features = ['bathrooms', ['latitude', 'longitude']].
Feature groups can overlap, with features appearing in multiple.
:param n_samples: How many records of the validation set to use
to compute permutation importance. The default is
5000, which we arrived at by experiment over a few data sets.
As we cannot be sure how all data sets will react,
you can pass in whatever sample size you want. Pass in -1
to mean entire validation set. Our experiments show that
not too many records are needed to get an accurate picture of
feature importance.
:param sort: Whether to sort the resulting importances
:param metric: Metric in the form of callable(model, X_valid, y_valid, sample_weights) to evaluate for,
if not set default's to model.score()
:param sample_weights: set if a different weighting is required for the validation samples
return: A data frame with Feature, Importance columns
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1)
X_train, y_train = ..., ...
X_valid, y_valid = ..., ...
rf.fit(X_train, y_train)
imp = importances(rf, X_valid, y_valid)
"""
def flatten(features):
all_features = set()
for sublist in features:
if isinstance(sublist, str):
all_features.add(sublist)
else:
for item in sublist:
all_features.add(item)
return all_features
if not features:
# each feature in its own group
features = X_valid.columns.values
else:
req_feature_set = flatten(features)
model_feature_set = set(X_valid.columns.values)
# any features left over?
other_feature_set = model_feature_set.difference(req_feature_set)
if len(other_feature_set) > 0:
# if leftovers, we need group together as single new feature
features.append(list(other_feature_set))
X_valid, y_valid = sample(X_valid, y_valid, n_samples)
X_valid = X_valid.copy(deep=False) # we're modifying columns
baseline = None
if callable(metric):
baseline = metric(model, X_valid, y_valid, sample_weights)
else:
baseline = model.score(X_valid, y_valid, sample_weights)
imp = []
m = None
for group in features:
if isinstance(group, str):
save = X_valid[group].copy()
X_valid[group] = np.random.permutation(X_valid[group])
if callable(metric):
m = metric(model, X_valid, y_valid, sample_weights)
else:
m = model.score(X_valid, y_valid, sample_weights)
X_valid[group] = save
else:
save = {}
for col in group:
save[col] = X_valid[col].copy()
for col in group:
X_valid[col] = np.random.permutation(X_valid[col])
if callable(metric):
m = metric(model, X_valid, y_valid, sample_weights)
else:
m = model.score(X_valid, y_valid, sample_weights)
for col in group:
X_valid[col] = save[col]
imp.append(baseline - m)
# Convert and groups/lists into string column names
labels = []
for col in features:
if isinstance(col, list):
labels.append('\n'.join(col))
else:
labels.append(col)
I = pd.DataFrame(data={'Feature': labels, 'Importance': np.array(imp)})
I = I.set_index('Feature')
if sort:
I = I.sort_values('Importance', ascending=False)
return I
def sample(X_valid, y_valid, n_samples):
if n_samples < 0: n_samples = len(X_valid)
n_samples = min(n_samples, len(X_valid))
if n_samples < len(X_valid):
ix = np.random.choice(len(X_valid), n_samples)
X_valid = X_valid.iloc[ix].copy(deep=False) # shallow copy
y_valid = y_valid.iloc[ix].copy(deep=False)
return X_valid, y_valid
def sample_rows(X, n_samples):
if n_samples < 0: n_samples = len(X)
n_samples = min(n_samples, len(X))
if n_samples < len(X):
ix = np.random.choice(len(X), n_samples)
X = X.iloc[ix].copy(deep=False) # shallow copy
return X
def oob_importances(rf, X_train, y_train, n_samples=5000):
"""
Compute permutation feature importances for scikit-learn
RandomForestClassifier or RandomForestRegressor in arg rf.
Given training X and y data, return a data frame with columns
Feature and Importance sorted in reverse order by importance.
The training data is needed to compute out of bag (OOB)
model performance measures (accuracy or R^2). The model
is not retrained.
By default, sample up to 5000 observations to compute feature importances.
return: A data frame with Feature, Importance columns
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1, oob_score=True)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = oob_importances(rf, X_train, y_train)
"""
if isinstance(rf, RandomForestClassifier):
return permutation_importances(rf, X_train, y_train, oob_classifier_accuracy, n_samples)
elif isinstance(rf, RandomForestRegressor):
return permutation_importances(rf, X_train, y_train, oob_regression_r2_score, n_samples)
return None
def cv_importances(model, X_train, y_train, k=3):
"""
Compute permutation feature importances for scikit-learn models using
k-fold cross-validation (default k=3).
Given a Classifier or Regressor in model
and training X and y data, return a data frame with columns
Feature and Importance sorted in reverse order by importance.
Cross-validation observations are taken from X_train, y_train.
The model.score() method is called to measure accuracy drops.
return: A data frame with Feature, Importance columns
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = cv_importances(rf, X_train, y_train)
"""
def score(model):
cvscore = cross_val_score(
model, # which model to use
X_train, y_train, # what training data to split up
cv=k) # number of folds/chunks
return np.mean(cvscore)
X_train = X_train.copy(deep=False) # shallow copy
baseline = score(model)
imp = []
for col in X_train.columns:
save = X_train[col].copy()
X_train[col] = np.random.permutation(X_train[col])
m = score(model)
X_train[col] = save
imp.append(baseline - m)
I = pd.DataFrame(data={'Feature': X_train.columns, 'Importance': np.array(imp)})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=False)
return I
def permutation_importances(rf, X_train, y_train, metric, n_samples=5000):
imp = permutation_importances_raw(rf, X_train, y_train, metric, n_samples)
I = pd.DataFrame(data={'Feature':X_train.columns, 'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=False)
return I
def dropcol_importances(rf, X_train, y_train, metric=None, X_valid = None, y_valid = None, sample_weights = None):
"""
Compute drop-column feature importances for scikit-learn.
Given a RandomForestClassifier or RandomForestRegressor in rf
and training X and y data, return a data frame with columns
Feature and Importance sorted in reverse order by importance.
A clone of rf is trained once to get the baseline score and then
again, once per feature to compute the drop in either the model's .score() output
or a custom metric callable in the form of metric(model, X_valid, y_valid). In case of a custom metric
the X_valid and y_valid parameters should be set.
return: A data frame with Feature, Importance columns
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1, oob_score=True)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = dropcol_importances(rf, X_train, y_train)
"""
rf_ = clone(rf)
rf_.random_state = 999
rf_.fit(X_train, y_train)
baseline = rf_.oob_score_
imp = []
for col in X_train.columns:
X = X_train.drop(col, axis=1)
rf_ = clone(rf)
rf_.random_state = 999
rf_.fit(X, y_train)
if callable(metric):
o = metric(rf_, X_valid, y_valid, sample_weights)
else:
o = rf_.score(X_valid, y_valid, sample_weights)
imp.append(baseline - o)
imp = np.array(imp)
I = pd.DataFrame(data={'Feature':X_train.columns, 'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=False)
return I
def oob_dropcol_importances(rf, X_train, y_train):
"""
Compute drop-column feature importances for scikit-learn.
Given a RandomForestClassifier or RandomForestRegressor in rf
and training X and y data, return a data frame with columns
Feature and Importance sorted in reverse order by importance.
A clone of rf is trained once to get the baseline score and then
again, once per feature to compute the drop in out of bag (OOB)
score.
return: A data frame with Feature, Importance columns
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1, oob_score=True)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = oob_dropcol_importances(rf, X_train, y_train)
"""
rf_ = clone(rf)
rf_.random_state = 999
rf_.fit(X_train, y_train)
baseline = rf_.oob_score_
imp = []
for col in X_train.columns:
X = X_train.drop(col, axis=1)
rf_ = clone(rf)
rf_.random_state = 999
rf_.fit(X, y_train)
o = rf_.oob_score_
imp.append(baseline - o)
imp = np.array(imp)
I = pd.DataFrame(data={'Feature':X_train.columns, 'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=False)
return I
def importances_raw(rf, X_train, y_train, n_samples=5000):
if isinstance(rf, RandomForestClassifier):
return permutation_importances_raw(rf, X_train, y_train, oob_classifier_accuracy, n_samples)
elif isinstance(rf, RandomForestRegressor):
return permutation_importances_raw(rf, X_train, y_train, oob_regression_r2_score, n_samples)
return None
def permutation_importances_raw(rf, X_train, y_train, metric, n_samples=5000):
"""
Return array of importances from pre-fit rf; metric is function
that measures accuracy or R^2 or similar. This function
works for regressors and classifiers.
"""
X_sample, y_sample = sample(X_train, y_train, n_samples)
if not hasattr(rf, 'estimators_'):
rf.fit(X_sample, y_sample)
baseline = metric(rf, X_sample, y_sample)
X_train = X_sample.copy(deep=False) # shallow copy
y_train = y_sample
imp = []
for col in X_train.columns:
save = X_train[col].copy()
X_train[col] = np.random.permutation(X_train[col])
m = metric(rf, X_train, y_train)
X_train[col] = save
imp.append(baseline - m)
return np.array(imp)
def oob_classifier_accuracy(rf, X_train, y_train):
"""
Compute out-of-bag (OOB) accuracy for a scikit-learn random forest
classifier. We learned the guts of scikit's RF from the BSD licensed
code:
https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/ensemble/forest.py#L425
"""
X = X_train.values
y = y_train.values
n_samples = len(X)
n_classes = len(np.unique(y))
predictions = np.zeros((n_samples, n_classes))
for tree in rf.estimators_:
unsampled_indices = _generate_unsampled_indices(tree.random_state, n_samples)
tree_preds = tree.predict_proba(X[unsampled_indices, :])
predictions[unsampled_indices] += tree_preds
predicted_class_indexes = np.argmax(predictions, axis=1)
predicted_classes = [rf.classes_[i] for i in predicted_class_indexes]
oob_score = np.mean(y == predicted_classes)
return oob_score
def oob_regression_r2_score(rf, X_train, y_train):
"""
Compute out-of-bag (OOB) R^2 for a scikit-learn random forest
regressor. We learned the guts of scikit's RF from the BSD licensed
code:
https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/ensemble/forest.py#L702
"""
X = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
y = y_train.values if isinstance(y_train, pd.Series) else y_train
n_samples = len(X)
predictions = np.zeros(n_samples)
n_predictions = np.zeros(n_samples)
for tree in rf.estimators_:
unsampled_indices = _generate_unsampled_indices(tree.random_state, n_samples)
tree_preds = tree.predict(X[unsampled_indices, :])
predictions[unsampled_indices] += tree_preds
n_predictions[unsampled_indices] += 1
if (n_predictions == 0).any():
warnings.warn("Too few trees; some variables do not have OOB scores.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
oob_score = r2_score(y, predictions)
return oob_score
def plot_importances(df_importances, save=None, xrot=0, tickstep=3,
label_fontsize=12,
figsize=None, scalefig=(1.0, 1.0), show=True):
"""
Given an array or data frame of importances, plot a horizontal bar chart
showing the importance values.
:param df_importances: A data frame with Feature, Importance columns
:type df_importances: pd.DataFrame
:param save: A filename identifying where to save the image.
:param xrot: Degrees to rotate importance (X axis) labels
:type xrot: int
:param tickstep: How many ticks to skip in X axis
:type tickstep: int
:param label_fontsize: The font size for the column names and x ticks
:type label_fontsize: int
:param figsize: Specify width and height of image (width,height)
:type figsize: 2-tuple of floats
:param scalefig: Scale width and height of image (widthscale,heightscale)
:type scalefig: 2-tuple of floats
:param show: Execute plt.show() if true (default is True). Sometimes
we want to draw multiple things before calling plt.show()
:type show: bool
:return: None
SAMPLE CODE
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1, oob_score=True)
X_train, y_train = ..., ...
rf.fit(X_train, y_train)
imp = importances(rf, X_test, y_test)
plot_importances(imp)
"""
I = df_importances
# this is backwards but seems to undo weird reversed order in barh()
I = I.sort_values('Importance', ascending=True)
if figsize:
fig = plt.figure(figsize=figsize)
elif scalefig:
fig = plt.figure()
w, h = fig.get_size_inches()
fig.set_size_inches(w * scalefig[0], h * scalefig[1], forward=True)
else:
fig = plt.figure()
ax = plt.gca()
labels = []
for col in I.index:
if isinstance(col, list):
labels.append('\n'.join(col))
else:
labels.append(col)
for tick in ax.get_xticklabels():
tick.set_size(label_fontsize)
for tick in ax.get_yticklabels():
tick.set_size(label_fontsize)
ax.barh(np.arange(len(I.index)), I.Importance, height=0.6, tick_label=labels)
# rotate x-ticks
if xrot is not None:
plt.xticks(rotation=xrot)
# xticks freq
xticks = ax.get_xticks()
nticks = len(xticks)
new_ticks = xticks[np.arange(0, nticks, step=tickstep)]
ax.set_xticks(new_ticks)
if save:
plt.savefig(save, bbox_inches="tight", pad_inches=0.03)
if show:
plt.show()
def oob_dependences(rf, X_train, n_samples=5000):
"""
Given a random forest model, rf, and training observation independent
variables in X_train (a dataframe), compute the OOB R^2 score using each var
as a dependent variable. We retrain rf for each var. Only numeric columns are considered.
By default, sample up to 5000 observations to compute feature dependencies.
:return: Return a DataFrame with Feature/Dependence values for each variable. Feature is the dataframe index.
"""
numcols = [col for col in X_train if is_numeric_dtype(X_train[col])]
X_train = sample_rows(X_train, n_samples)
df_dep = pd.DataFrame(columns=['Feature','Dependence'])
df_dep = df_dep.set_index('Feature')
for col in numcols:
X, y = X_train.drop(col, axis=1), X_train[col]
rf.fit(X, y)
df_dep.loc[col] = rf.oob_score_
df_dep = df_dep.sort_values('Dependence', ascending=False)
return df_dep
def feature_dependence_matrix(rf, X_train, n_samples=5000):
"""
Given training observation independent variables in X_train (a dataframe),
compute the feature importance using each var as a dependent variable.
We retrain a random forest for each var as target using the others as
independent vars. Only numeric columns are considered.
By default, sample up to 5000 observations to compute feature dependencies.
:return: a non-symmetric data frame with the dependence matrix where each row is the importance of each var to the row's var used as a model target.
"""
numcols = [col for col in X_train if is_numeric_dtype(X_train[col])]
X_train = sample_rows(X_train, n_samples)
df_dep = pd.DataFrame(index=X_train.columns, columns=['Dependence']+X_train.columns.tolist())
for i in range(len(numcols)):
col = numcols[i]
X, y = X_train.drop(col, axis=1), X_train[col]
rf.fit(X,y)
#imp = rf.feature_importances_
imp = permutation_importances_raw(rf, X, y, oob_regression_r2_score, n_samples)
imp = np.insert(imp, i, 1.0)
df_dep.iloc[i] = np.insert(imp, 0, rf.oob_score_) # add overall dependence
return df_dep
def feature_corr_matrix(df):
"""
Return the Spearman's rank-order correlation between all pairs
of features as a matrix with feature names as index and column names.
The diagonal will be all 1.0 as features are self correlated.
Spearman's correlation is the same thing as converting two variables
to rank values and then running a standard Pearson's correlation
on those ranked variables. Spearman's is nonparametric and does not
assume a linear relationship between the variables; it looks for
monotonic relationships.
:param df_train: dataframe containing features as columns, and
without the target variable.
:return: a data frame with the correlation matrix
"""
corr = np.round(spearmanr(df).correlation, 4)
df_corr = pd.DataFrame(data=corr, index=df.columns, columns=df.columns)
return df_corr
def plot_corr_heatmap(df,
threshold=0.6,
cmap=None,
figsize=None,
value_fontsize=12, label_fontsize=14,
xrot=80,
save=None,
show=True):
"""
Display the feature spearman's correlation matrix as a heatmap with
any abs(value)>threshold appearing with background color.
Spearman's correlation is the same thing as converting two variables
to rank values and then running a standard Pearson's correlation
on those ranked variables. Spearman's is nonparametric and does not
assume a linear relationship between the variables; it looks for
monotonic relationships.
SAMPLE CODE
from rfpimp import plot_corr_heatmap
plot_corr_heatmap(df_train, save='/tmp/corrheatmap.svg',
figsize=(7,5), label_fontsize=13, value_fontsize=11)
"""
corr = np.round(spearmanr(df).correlation, 4)
filtered = copy(corr)
filtered = np.abs(filtered) # work with abs but display negatives later
mask = np.ones_like(corr)
filtered[np.tril_indices_from(mask)] = -9999
if not cmap:
cw = plt.get_cmap('coolwarm')
cmap = ListedColormap([cw(x) for x in np.arange(.6, .85, 0.01)])
elif isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cm = copy(cmap)
cm.set_under(color='white')
if figsize:
plt.figure(figsize=figsize)
plt.imshow(filtered, cmap=cm, vmin=threshold, vmax=1, aspect='equal')
width, height = filtered.shape
for x in range(width):
for y in range(height):
if x < y:
plt.annotate(str(np.round(corr[x, y], 2)), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',
fontsize=value_fontsize)
plt.colorbar()
plt.xticks(range(width), df.columns, rotation=xrot, horizontalalignment='right',
fontsize=label_fontsize)
plt.yticks(range(width), df.columns, verticalalignment='center',
fontsize=label_fontsize)
if save:
plt.savefig(save, bbox_inches="tight", pad_inches=0.03)
if show:
plt.show()
def jeremy_trick_RF_sample_size(n):
# Jeremy's trick; hmm.. this won't work as a separate function?
# def batch_size_for_node_splitting(rs, n_samples):
# forest.check_random_state(rs).randint(0, n_samples, 20000)
# forest._generate_sample_indices = batch_size_for_node_splitting
forest._generate_sample_indices = \
(lambda rs, n_samples: forest.check_random_state(rs).randint(0, n_samples, n))
def jeremy_trick_reset_RF_sample_size():
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from frappe import _
import frappe.utils
import frappe.async
import frappe.sessions
import frappe.utils.file_manager
import frappe.desk.form.run_method
from frappe.utils.response import build_response
import datetime
from datetime import date,datetime
import requests
import pytz
@frappe.whitelist()
def ruta(login_manager):
ruta = frappe.db.get_value("User", login_manager.user,"ruta_login")
frappe.errprint(ruta)
frappe.local.response["home_page"] = ruta
|
# codiing=utf-8
import logging
from flask import Blueprint, request, abort, g
bp = Blueprint("ws", __name__, url_prefix="/ws")
log = logging.getLogger(__name__)
@bp.before_request
def ensure_ws():
ws = request.environ.get('wsgi.websocket')
if not ws:
return abort(400)
g.ws = ws
@bp.route("")
def index():
ws = g.ws
while True:
message = ws.receive()
if message is None:
break
log.debug("message: %s", message)
ws.send(message)
ws.close()
return "", 203
|
# my_app/tests.py
from django.test import TestCase
from .models import School
class SchoolTest(TestCase):
fixtures = ["fake.json", ]
def test_average_school(self):
school = School.objects.get(id=1)
average = school.average()
# مقداری که من انتظار دارم دریافت کنم
ext_avg = "19.24"
self.assertEqual(ext_avg, average)
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
import pytest
from case import Mock
from kombu import Exchange, Queue
from celery import uuid
from celery.app.amqp import Queues, utf8dict
from celery.five import keys
from celery.utils.time import to_utc
class test_TaskConsumer:
def test_accept_content(self, app):
with app.pool.acquire(block=True) as con:
app.conf.accept_content = ['application/json']
assert app.amqp.TaskConsumer(con).accept == {
'application/json',
}
assert app.amqp.TaskConsumer(con, accept=['json']).accept == {
'application/json',
}
class test_ProducerPool:
def test_setup_nolimit(self, app):
app.conf.broker_pool_limit = None
try:
delattr(app, '_pool')
except AttributeError:
pass
app.amqp._producer_pool = None
pool = app.amqp.producer_pool
assert pool.limit == app.pool.limit
assert not pool._resource.queue
r1 = pool.acquire()
r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
def test_setup(self, app):
app.conf.broker_pool_limit = 2
try:
delattr(app, '_pool')
except AttributeError:
pass
app.amqp._producer_pool = None
pool = app.amqp.producer_pool
assert pool.limit == app.pool.limit
assert pool._resource.queue
p1 = r1 = pool.acquire()
p2 = r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
assert p2 is r1
assert p1 is r2
r1.release()
r2.release()
class test_Queues:
def test_queues_format(self):
self.app.amqp.queues._consume_from = {}
assert self.app.amqp.queues.format() == ''
def test_with_defaults(self):
assert Queues(None) == {}
def test_add(self):
q = Queues()
q.add('foo', exchange='ex', routing_key='rk')
assert 'foo' in q
assert isinstance(q['foo'], Queue)
assert q['foo'].routing_key == 'rk'
def test_setitem_adds_default_exchange(self):
q = Queues(default_exchange=Exchange('bar'))
assert q.default_exchange
queue = Queue('foo', exchange=None)
queue.exchange = None
q['foo'] = queue
assert q['foo'].exchange == q.default_exchange
@pytest.mark.parametrize('ha_policy,qname,q,qargs,expected', [
(None, 'xyz', 'xyz', None, None),
(None, 'xyz', 'xyz', {'x-foo': 'bar'}, {'x-foo': 'bar'}),
('all', 'foo', Queue('foo'), None, {'x-ha-policy': 'all'}),
('all', 'xyx2',
Queue('xyx2', queue_arguments={'x-foo': 'bari'}),
None,
{'x-ha-policy': 'all', 'x-foo': 'bari'}),
(['A', 'B', 'C'], 'foo', Queue('foo'), None, {
'x-ha-policy': 'nodes',
'x-ha-policy-params': ['A', 'B', 'C']}),
])
def test_with_ha_policy(self, ha_policy, qname, q, qargs, expected):
queues = Queues(ha_policy=ha_policy, create_missing=False)
queues.add(q, queue_arguments=qargs)
assert queues[qname].queue_arguments == expected
def test_select_add(self):
q = Queues()
q.select(['foo', 'bar'])
q.select_add('baz')
assert sorted(keys(q._consume_from)) == ['bar', 'baz', 'foo']
def test_deselect(self):
q = Queues()
q.select(['foo', 'bar'])
q.deselect('bar')
assert sorted(keys(q._consume_from)) == ['foo']
def test_with_ha_policy_compat(self):
q = Queues(ha_policy='all')
q.add('bar')
assert q['bar'].queue_arguments == {'x-ha-policy': 'all'}
def test_add_default_exchange(self):
ex = Exchange('fff', 'fanout')
q = Queues(default_exchange=ex)
q.add(Queue('foo'))
assert q['foo'].exchange.name == 'fff'
def test_alias(self):
q = Queues()
q.add(Queue('foo', alias='barfoo'))
assert q['barfoo'] is q['foo']
@pytest.mark.parametrize('queues_kwargs,qname,q,expected', [
({'max_priority': 10},
'foo', 'foo', {'x-max-priority': 10}),
({'max_priority': 10},
'xyz', Queue('xyz', queue_arguments={'x-max-priority': 3}),
{'x-max-priority': 3}),
({'max_priority': 10},
'moo', Queue('moo', queue_arguments=None),
{'x-max-priority': 10}),
({'ha_policy': 'all', 'max_priority': 5},
'bar', 'bar',
{'x-ha-policy': 'all', 'x-max-priority': 5}),
({'ha_policy': 'all', 'max_priority': 5},
'xyx2', Queue('xyx2', queue_arguments={'x-max-priority': 2}),
{'x-ha-policy': 'all', 'x-max-priority': 2}),
({'max_priority': None},
'foo2', 'foo2',
None),
({'max_priority': None},
'xyx3', Queue('xyx3', queue_arguments={'x-max-priority': 7}),
{'x-max-priority': 7}),
])
def test_with_max_priority(self, queues_kwargs, qname, q, expected):
queues = Queues(**queues_kwargs)
queues.add(q)
assert queues[qname].queue_arguments == expected
class test_default_queues:
@pytest.mark.parametrize('name,exchange,rkey', [
('default', None, None),
('default', 'exchange', None),
('default', 'exchange', 'routing_key'),
('default', None, 'routing_key'),
])
def test_setting_default_queue(self, name, exchange, rkey):
self.app.conf.task_queues = {}
self.app.conf.task_default_exchange = exchange
self.app.conf.task_default_routing_key = rkey
self.app.conf.task_default_queue = name
assert self.app.amqp.queues.default_exchange.name == exchange or name
queues = dict(self.app.amqp.queues)
assert len(queues) == 1
queue = queues[name]
assert queue.exchange.name == exchange or name
assert queue.exchange.type == 'direct'
assert queue.routing_key == rkey or name
class test_AMQP_proto1:
def test_kwargs_must_be_mapping(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v1(uuid(), 'foo', kwargs=[1, 2])
def test_args_must_be_list(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v1(uuid(), 'foo', args='abc')
def test_countdown_negative(self):
with pytest.raises(ValueError):
self.app.amqp.as_task_v1(uuid(), 'foo', countdown=-1232132323123)
def test_as_task_message_without_utc(self):
self.app.amqp.utc = False
self.app.amqp.as_task_v1(uuid(), 'foo', countdown=30, expires=40)
class test_AMQP:
def setup(self):
self.simple_message = self.app.amqp.as_task_v2(
uuid(), 'foo', create_sent_event=True,
)
self.simple_message_no_sent_event = self.app.amqp.as_task_v2(
uuid(), 'foo', create_sent_event=False,
)
def test_kwargs_must_be_mapping(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v2(uuid(), 'foo', kwargs=[1, 2])
def test_args_must_be_list(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v2(uuid(), 'foo', args='abc')
def test_countdown_negative(self):
with pytest.raises(ValueError):
self.app.amqp.as_task_v2(uuid(), 'foo', countdown=-1232132323123)
def test_Queues__with_ha_policy(self):
x = self.app.amqp.Queues({}, ha_policy='all')
assert x.ha_policy == 'all'
def test_Queues__with_max_priority(self):
x = self.app.amqp.Queues({}, max_priority=23)
assert x.max_priority == 23
def test_send_task_message__no_kwargs(self):
self.app.amqp.send_task_message(Mock(), 'foo', self.simple_message)
def test_send_task_message__properties(self):
prod = Mock(name='producer')
self.app.amqp.send_task_message(
prod, 'foo', self.simple_message_no_sent_event,
foo=1, retry=False,
)
assert prod.publish.call_args[1]['foo'] == 1
def test_send_task_message__headers(self):
prod = Mock(name='producer')
self.app.amqp.send_task_message(
prod, 'foo', self.simple_message_no_sent_event,
headers={'x1x': 'y2x'},
retry=False,
)
assert prod.publish.call_args[1]['headers']['x1x'] == 'y2x'
def test_send_task_message__queue_string(self):
prod = Mock(name='producer')
self.app.amqp.send_task_message(
prod, 'foo', self.simple_message_no_sent_event,
queue='foo', retry=False,
)
kwargs = prod.publish.call_args[1]
assert kwargs['routing_key'] == 'foo'
assert kwargs['exchange'] == ''
def test_send_task_message__broadcast_without_exchange(self):
from kombu.common import Broadcast
evd = Mock(name='evd')
self.app.amqp.send_task_message(
Mock(), 'foo', self.simple_message, retry=False,
routing_key='xyz', queue=Broadcast('abc'),
event_dispatcher=evd,
)
evd.publish.assert_called()
event = evd.publish.call_args[0][1]
assert event['routing_key'] == 'xyz'
assert event['exchange'] == 'abc'
def test_send_event_exchange_direct_with_exchange(self):
prod = Mock(name='prod')
self.app.amqp.send_task_message(
prod, 'foo', self.simple_message_no_sent_event, queue='bar',
retry=False, exchange_type='direct', exchange='xyz',
)
prod.publish.assert_called()
pub = prod.publish.call_args[1]
assert pub['routing_key'] == 'bar'
assert pub['exchange'] == ''
def test_send_event_exchange_direct_with_routing_key(self):
prod = Mock(name='prod')
self.app.amqp.send_task_message(
prod, 'foo', self.simple_message_no_sent_event, queue='bar',
retry=False, exchange_type='direct', routing_key='xyb',
)
prod.publish.assert_called()
pub = prod.publish.call_args[1]
assert pub['routing_key'] == 'bar'
assert pub['exchange'] == ''
def test_send_event_exchange_string(self):
evd = Mock(name='evd')
self.app.amqp.send_task_message(
Mock(), 'foo', self.simple_message, retry=False,
exchange='xyz', routing_key='xyb',
event_dispatcher=evd,
)
evd.publish.assert_called()
event = evd.publish.call_args[0][1]
assert event['routing_key'] == 'xyb'
assert event['exchange'] == 'xyz'
def test_send_task_message__with_delivery_mode(self):
prod = Mock(name='producer')
self.app.amqp.send_task_message(
prod, 'foo', self.simple_message_no_sent_event,
delivery_mode=33, retry=False,
)
assert prod.publish.call_args[1]['delivery_mode'] == 33
def test_send_task_message__with_receivers(self):
from case import patch
mocked_receiver = ((Mock(), Mock()), Mock())
with patch('celery.signals.task_sent.receivers', [mocked_receiver]):
self.app.amqp.send_task_message(Mock(), 'foo', self.simple_message)
def test_routes(self):
r1 = self.app.amqp.routes
r2 = self.app.amqp.routes
assert r1 is r2
class test_as_task_v2:
def test_raises_if_args_is_not_tuple(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v2(uuid(), 'foo', args='123')
def test_raises_if_kwargs_is_not_mapping(self):
with pytest.raises(TypeError):
self.app.amqp.as_task_v2(uuid(), 'foo', kwargs=(1, 2, 3))
def test_countdown_to_eta(self):
now = to_utc(datetime.utcnow()).astimezone(self.app.timezone)
m = self.app.amqp.as_task_v2(
uuid(), 'foo', countdown=10, now=now,
)
assert m.headers['eta'] == (now + timedelta(seconds=10)).isoformat()
def test_expires_to_datetime(self):
now = to_utc(datetime.utcnow()).astimezone(self.app.timezone)
m = self.app.amqp.as_task_v2(
uuid(), 'foo', expires=30, now=now,
)
assert m.headers['expires'] == (
now + timedelta(seconds=30)).isoformat()
def test_eta_to_datetime(self):
eta = datetime.utcnow()
m = self.app.amqp.as_task_v2(
uuid(), 'foo', eta=eta,
)
assert m.headers['eta'] == eta.isoformat()
def test_callbacks_errbacks_chord(self):
@self.app.task
def t(i):
pass
m = self.app.amqp.as_task_v2(
uuid(), 'foo',
callbacks=[t.s(1), t.s(2)],
errbacks=[t.s(3), t.s(4)],
chord=t.s(5),
)
_, _, embed = m.body
assert embed['callbacks'] == [utf8dict(t.s(1)), utf8dict(t.s(2))]
assert embed['errbacks'] == [utf8dict(t.s(3)), utf8dict(t.s(4))]
assert embed['chord'] == utf8dict(t.s(5))
|
from django.contrib.sessions.models import Session
from django.core import management
from django.core.management import BaseCommand
from django.utils import timezone
from shop.models.orders import OrderDetail
from shop.models.accounts import Company, Contact, Address
class Command(BaseCommand):
help = "Cleans session table as well as any shopping carts associated with these expired sessions"
def handle(self, *args, **kwargs):
inactive_sessions = Session.objects.filter(expire_date__lte=timezone.now())
inactive_orders = OrderDetail.objects.filter(session__in=inactive_sessions, state__isnull=True)
inactive_contacts = Contact.objects.filter(session__in=inactive_sessions)
inactive_addresses = Address.objects.filter(contact__in=inactive_contacts)
company_list = inactive_contacts.values_list('company', flat=True)
inactive_companies = Company.objects.filter(id__in=company_list)
print(str(inactive_sessions.count()) + ", " + str(inactive_orders.count()) + ", " + str(
inactive_addresses.count()))
inactive_companies.delete()
inactive_orders.delete()
management.call_command('clearsessions', verbosity=0)
|
import numpy as np
def make_it(x):
"""Ensures that x is an iterator.
If x is not iterable, wrap it as a one-elemened tuple.
"""
try:
return iter(x)
except TypeError:
x = (x,)
return iter(x)
@np.vectorize
def replace_nan(x, rep=np.infty):
"""Replace any nan in x with rep."""
return rep if np.isnan(x) else x
|
#!/usr/bin/env python3
# Copyright (C) 2019-2020 All rights reserved.
# FILENAME: examples/cookies.py
# VERSION: 0.2.1
# CREATED: 2021-08-19 14:02
# AUTHOR: Aekasitt Guruvanich <[email protected]>
# DESCRIPTION:
#
# HISTORY:
#*************************************************************
from fastapi import FastAPI, Request, Depends
from fastapi.responses import JSONResponse
from fastapi.templating import Jinja2Templates
from fastapi_csrf_protect import CsrfProtect
from fastapi_csrf_protect.exceptions import CsrfProtectError
from pydantic import BaseModel
app = FastAPI()
templates = Jinja2Templates(directory='templates')
class CsrfSettings(BaseModel):
secret_key:str = 'asecrettoeverybody'
@CsrfProtect.load_config
def get_csrf_config():
return CsrfSettings()
@app.get('/form')
def form(request: Request, csrf_protect:CsrfProtect = Depends()):
'''
Returns form template.
'''
response = templates.TemplateResponse('form.html', { 'request': request })
csrf_protect.set_csrf_cookie(response)
return response
@app.post('/posts', response_class=JSONResponse)
def create_post(request: Request, csrf_protect:CsrfProtect = Depends()):
'''
Creates a new Post
'''
csrf_protect.validate_csrf_in_cookies(request)
# Do stuff
@app.exception_handler(CsrfProtectError)
def csrf_protect_exception_handler(request: Request, exc: CsrfProtectError):
return JSONResponse(status_code=exc.status_code, content={ 'detail': exc.message }) |
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class User(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active': 'bool',
'avatar_url': 'str',
'created': 'datetime',
'description': 'str',
'email': 'str',
'followers_count': 'int',
'following_count': 'int',
'full_name': 'str',
'id': 'int',
'is_admin': 'bool',
'language': 'str',
'last_login': 'datetime',
'location': 'str',
'login': 'str',
'prohibit_login': 'bool',
'restricted': 'bool',
'starred_repos_count': 'int',
'visibility': 'str',
'website': 'str'
}
attribute_map = {
'active': 'active',
'avatar_url': 'avatar_url',
'created': 'created',
'description': 'description',
'email': 'email',
'followers_count': 'followers_count',
'following_count': 'following_count',
'full_name': 'full_name',
'id': 'id',
'is_admin': 'is_admin',
'language': 'language',
'last_login': 'last_login',
'location': 'location',
'login': 'login',
'prohibit_login': 'prohibit_login',
'restricted': 'restricted',
'starred_repos_count': 'starred_repos_count',
'visibility': 'visibility',
'website': 'website'
}
def __init__(self, active=None, avatar_url=None, created=None, description=None, email=None, followers_count=None, following_count=None, full_name=None, id=None, is_admin=None, language=None, last_login=None, location=None, login=None, prohibit_login=None, restricted=None, starred_repos_count=None, visibility=None, website=None): # noqa: E501
"""User - a model defined in Swagger""" # noqa: E501
self._active = None
self._avatar_url = None
self._created = None
self._description = None
self._email = None
self._followers_count = None
self._following_count = None
self._full_name = None
self._id = None
self._is_admin = None
self._language = None
self._last_login = None
self._location = None
self._login = None
self._prohibit_login = None
self._restricted = None
self._starred_repos_count = None
self._visibility = None
self._website = None
self.discriminator = None
if active is not None:
self.active = active
if avatar_url is not None:
self.avatar_url = avatar_url
if created is not None:
self.created = created
if description is not None:
self.description = description
if email is not None:
self.email = email
if followers_count is not None:
self.followers_count = followers_count
if following_count is not None:
self.following_count = following_count
if full_name is not None:
self.full_name = full_name
if id is not None:
self.id = id
if is_admin is not None:
self.is_admin = is_admin
if language is not None:
self.language = language
if last_login is not None:
self.last_login = last_login
if location is not None:
self.location = location
if login is not None:
self.login = login
if prohibit_login is not None:
self.prohibit_login = prohibit_login
if restricted is not None:
self.restricted = restricted
if starred_repos_count is not None:
self.starred_repos_count = starred_repos_count
if visibility is not None:
self.visibility = visibility
if website is not None:
self.website = website
@property
def active(self):
"""Gets the active of this User. # noqa: E501
Is user active # noqa: E501
:return: The active of this User. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this User.
Is user active # noqa: E501
:param active: The active of this User. # noqa: E501
:type: bool
"""
self._active = active
@property
def avatar_url(self):
"""Gets the avatar_url of this User. # noqa: E501
URL to the user's avatar # noqa: E501
:return: The avatar_url of this User. # noqa: E501
:rtype: str
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url):
"""Sets the avatar_url of this User.
URL to the user's avatar # noqa: E501
:param avatar_url: The avatar_url of this User. # noqa: E501
:type: str
"""
self._avatar_url = avatar_url
@property
def created(self):
"""Gets the created of this User. # noqa: E501
:return: The created of this User. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this User.
:param created: The created of this User. # noqa: E501
:type: datetime
"""
self._created = created
@property
def description(self):
"""Gets the description of this User. # noqa: E501
the user's description # noqa: E501
:return: The description of this User. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this User.
the user's description # noqa: E501
:param description: The description of this User. # noqa: E501
:type: str
"""
self._description = description
@property
def email(self):
"""Gets the email of this User. # noqa: E501
:return: The email of this User. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this User.
:param email: The email of this User. # noqa: E501
:type: str
"""
self._email = email
@property
def followers_count(self):
"""Gets the followers_count of this User. # noqa: E501
user counts # noqa: E501
:return: The followers_count of this User. # noqa: E501
:rtype: int
"""
return self._followers_count
@followers_count.setter
def followers_count(self, followers_count):
"""Sets the followers_count of this User.
user counts # noqa: E501
:param followers_count: The followers_count of this User. # noqa: E501
:type: int
"""
self._followers_count = followers_count
@property
def following_count(self):
"""Gets the following_count of this User. # noqa: E501
:return: The following_count of this User. # noqa: E501
:rtype: int
"""
return self._following_count
@following_count.setter
def following_count(self, following_count):
"""Sets the following_count of this User.
:param following_count: The following_count of this User. # noqa: E501
:type: int
"""
self._following_count = following_count
@property
def full_name(self):
"""Gets the full_name of this User. # noqa: E501
the user's full name # noqa: E501
:return: The full_name of this User. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this User.
the user's full name # noqa: E501
:param full_name: The full_name of this User. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def id(self):
"""Gets the id of this User. # noqa: E501
the user's id # noqa: E501
:return: The id of this User. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this User.
the user's id # noqa: E501
:param id: The id of this User. # noqa: E501
:type: int
"""
self._id = id
@property
def is_admin(self):
"""Gets the is_admin of this User. # noqa: E501
Is the user an administrator # noqa: E501
:return: The is_admin of this User. # noqa: E501
:rtype: bool
"""
return self._is_admin
@is_admin.setter
def is_admin(self, is_admin):
"""Sets the is_admin of this User.
Is the user an administrator # noqa: E501
:param is_admin: The is_admin of this User. # noqa: E501
:type: bool
"""
self._is_admin = is_admin
@property
def language(self):
"""Gets the language of this User. # noqa: E501
User locale # noqa: E501
:return: The language of this User. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this User.
User locale # noqa: E501
:param language: The language of this User. # noqa: E501
:type: str
"""
self._language = language
@property
def last_login(self):
"""Gets the last_login of this User. # noqa: E501
:return: The last_login of this User. # noqa: E501
:rtype: datetime
"""
return self._last_login
@last_login.setter
def last_login(self, last_login):
"""Sets the last_login of this User.
:param last_login: The last_login of this User. # noqa: E501
:type: datetime
"""
self._last_login = last_login
@property
def location(self):
"""Gets the location of this User. # noqa: E501
the user's location # noqa: E501
:return: The location of this User. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this User.
the user's location # noqa: E501
:param location: The location of this User. # noqa: E501
:type: str
"""
self._location = location
@property
def login(self):
"""Gets the login of this User. # noqa: E501
the user's username # noqa: E501
:return: The login of this User. # noqa: E501
:rtype: str
"""
return self._login
@login.setter
def login(self, login):
"""Sets the login of this User.
the user's username # noqa: E501
:param login: The login of this User. # noqa: E501
:type: str
"""
self._login = login
@property
def prohibit_login(self):
"""Gets the prohibit_login of this User. # noqa: E501
Is user login prohibited # noqa: E501
:return: The prohibit_login of this User. # noqa: E501
:rtype: bool
"""
return self._prohibit_login
@prohibit_login.setter
def prohibit_login(self, prohibit_login):
"""Sets the prohibit_login of this User.
Is user login prohibited # noqa: E501
:param prohibit_login: The prohibit_login of this User. # noqa: E501
:type: bool
"""
self._prohibit_login = prohibit_login
@property
def restricted(self):
"""Gets the restricted of this User. # noqa: E501
Is user restricted # noqa: E501
:return: The restricted of this User. # noqa: E501
:rtype: bool
"""
return self._restricted
@restricted.setter
def restricted(self, restricted):
"""Sets the restricted of this User.
Is user restricted # noqa: E501
:param restricted: The restricted of this User. # noqa: E501
:type: bool
"""
self._restricted = restricted
@property
def starred_repos_count(self):
"""Gets the starred_repos_count of this User. # noqa: E501
:return: The starred_repos_count of this User. # noqa: E501
:rtype: int
"""
return self._starred_repos_count
@starred_repos_count.setter
def starred_repos_count(self, starred_repos_count):
"""Sets the starred_repos_count of this User.
:param starred_repos_count: The starred_repos_count of this User. # noqa: E501
:type: int
"""
self._starred_repos_count = starred_repos_count
@property
def visibility(self):
"""Gets the visibility of this User. # noqa: E501
User visibility level option: public, limited, private # noqa: E501
:return: The visibility of this User. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this User.
User visibility level option: public, limited, private # noqa: E501
:param visibility: The visibility of this User. # noqa: E501
:type: str
"""
self._visibility = visibility
@property
def website(self):
"""Gets the website of this User. # noqa: E501
the user's website # noqa: E501
:return: The website of this User. # noqa: E501
:rtype: str
"""
return self._website
@website.setter
def website(self, website):
"""Sets the website of this User.
the user's website # noqa: E501
:param website: The website of this User. # noqa: E501
:type: str
"""
self._website = website
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(User, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, User):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import rulr.Utils
from rulr.Utils.Event import Event
import numpy as np
import cv2
import base64
class Base(rulr.Utils.Viewable):
def __init__(self, value_or_property):
super().__init__()
self.on_set = Event()
# TODO : Needs testing if this works (so far always false which is as expected)
if not isinstance(value_or_property, Property):
# Can't perform assignment in a lambda, so we split these into actual functions
self.get = self._value_getter
self.set = self._value_setter
self.set(value_or_property)
else:
self.wrapped_property = value_or_property
self.get = value_or_property.get
# self.set will not be defined for read-only variables
if value_or_property.set is not None:
self.set = self._set_parameter
def formatter(self, value):
return value
def _value_getter(self):
return self.value
def _value_setter(self, value):
self.value = self.formatter(value)
self.on_set()
def _set_parameter(self, value):
formatted_value = self.formatter(value)
self.wrapped_property.set(formatted_value)
self.on_set()
def get_client_formatted(self):
## Use default getter
return self.get()
class Float(Base):
def formatter(self, value):
return float(value)
class Vector(Base):
def formatter(self, value):
return np.array(value, dtype=float)
class BoundVector(Vector):
def __init__(self, value, lowerLimit, upperLimit, step = 0.0):
super().__init__(value)
self.lowerLimit = lowerLimit
self.upperLimit = upperLimit
self.step = step
class Matrix(Base):
def formatter(self, value):
return np.array(value, dtype=float)
class Image(Base):
def __init__(self, value = [[]]):
super().__init__(value)
def formatter(self, value):
return np.array(value, dtype=float)
def get_client_formatted(self):
value = self.get()
if value.size == 0:
return ""
else:
success, encoded_result = cv2.imencode('.png', value)
del success
image_buffer_b64 = base64.b64encode(encoded_result.tostring())
return image_buffer_b64.decode("utf-8")
class Bool(Base):
def formatter(self, value):
return bool(value)
class Property():
def __init__(self, get, set = None):
self.get = get
self.set = set |
from ._binarystream import _BinaryStream
from collections import OrderedDict
from struct import *
class WORLDGRAPH:
def __init__(self, stream):
self._stream = stream
self._vertices = OrderedDict()
self._edges = OrderedDict()
self._outgouingEdges = {}
self._vertexUid = 0
def read(self):
raw = _BinaryStream(self._stream)
edgeCount = raw.read_int32()
for i in range(edgeCount):
_from = self.addVertex(raw.read_int64(), raw.read_int32())
_dest = self.addVertex(raw.read_int64(), raw.read_int32())
_edge = self.addEdge(_from, _dest)
transitionCount = raw.read_int32()
for i in range(transitionCount):
dir = raw.read_char()
type = raw.read_char()
skill = raw.read_int32()
lenght = raw.read_int32()
criterion = ""
if lenght > 0:
bytes = raw.read_bytes(lenght)
criterion = unpack(">" + str(lenght) + 's', bytes)[0].decode('utf8')
transitionMapId = raw.read_int64()
cell = raw.read_int32()
id = raw.read_int64()
_edge.addTransition(dir, type, skill, criterion, transitionMapId, cell, id)
return self
def readString(self, raw):
lenght = raw.read_int32()
return raw._unpack(str(lenght) + 's', lenght)
def addVertex(self, mapId, zone):
vertex = None
if not mapId in self._vertices:
self._vertices[mapId] = OrderedDict()
else:
vertex = self._vertices[mapId][zone]
if vertex is None:
self._vertexUid = self._vertexUid + 1
vertex = Vertex(mapId, zone, self._vertexUid)
self._vertices[mapId][zone] = vertex.newVertex()
return vertex
def addEdge(self, _from, _dest):
edge = self.getEdge(_from, _dest)
if edge is not None:
return edge
if not self.doesVertexExist(_from) or not self.doesVertexExist(_dest):
return None
edge = Edge(_from, _dest).newEdge()
if not _from._uid in self._edges:
self._edges[_from._uid] = OrderedDict()
self._edges[_from._uid][_dest._uid] = edge
if not _from._uid in self._outgouingEdges:
self._outgouingEdges[_from._uid] = edge
self._outgouingEdges[_from._uid] = edge
return edge
def getEdge(self, _from, _dest):
if _from._uid in self._edges:
return self._edges[str(_from._uid)][str(_dest._uid)]
else:
return None
def doesVertexExist(self, vertex):
if vertex._mapId in self._vertices:
if vertex._zoneId in self._vertices[vertex._mapId]:
return True
return False
class Vertex:
def __init__(self, mapId, zoneId, vertexUid):
self._mapId = mapId
self._zoneId = zoneId
self._uid = vertexUid
def newVertex(self):
return self
class Edge:
def __init__(self, _from, _dest):
self._from = _from
self._to = _dest
self._transitions = []
def newEdge(self):
return self
def addTransition(self, dir, type, skill, criterion, transitionMapId, cell, id):
self._transitions.append(Transition(dir, type, skill, criterion, transitionMapId, cell, id).newTransition())
class Transition:
def __init__(self, dir, type, skill, criterion, transitionMapId, cell, id):
self._dir = dir
self._type = type
self._skill = skill
self._criterion = criterion
self._transitionMapId = transitionMapId
self._cell = cell
self._id = id
def newTransition(self):
return self |
from serpent.window_controller import WindowController
import subprocess
import shlex
import re
class LinuxWindowController(WindowController):
def __init__(self):
pass
def locate_window(self, name):
return subprocess.check_output(shlex.split(f"xdotool search --onlyvisible --name \"^{name}$\"")).decode("utf-8").strip()
def move_window(self, window_id, x, y):
subprocess.call(shlex.split(f"xdotool windowmove {window_id} {x} {y}"))
def resize_window(self, window_id, width, height):
subprocess.call(shlex.split(f"xdotool windowsize {window_id} {width} {height}"))
def focus_window(self, window_id):
subprocess.call(shlex.split(f"xdotool windowactivate {window_id}"))
def is_window_focused(self, window_id):
focused_window_id = subprocess.check_output(shlex.split("xdotool getwindowfocus")).decode("utf-8").strip()
return focused_window_id == window_id
def get_focused_window_name(self):
focused_window_id = subprocess.check_output(shlex.split("xdotool getwindowfocus")).decode("utf-8").strip()
return subprocess.check_output(shlex.split(f"xdotool getwindowname {focused_window_id}")).decode("utf-8").strip()
def get_window_geometry(self, window_id):
geometry = dict()
window_geometry = subprocess.check_output(shlex.split(f"xdotool getwindowgeometry {window_id}")).decode("utf-8").strip()
size = re.match(r"\s+Geometry: ([0-9]+x[0-9]+)", window_geometry.split("\n")[2]).group(1).split("x")
geometry["width"] = int(size[0])
geometry["height"] = int(size[1])
window_information = subprocess.check_output(shlex.split(f"xwininfo -id {window_id}")).decode("utf-8").strip()
geometry["x_offset"] = int(re.match(r"\s+Absolute upper-left X:\s+([0-9]+)", window_information.split("\n")[2]).group(1))
geometry["y_offset"] = int(re.match(r"\s+Absolute upper-left Y:\s+([0-9]+)", window_information.split("\n")[3]).group(1))
return geometry
|
from PySide2 import QtWidgets
import sys
from Python.pyside2.main import Ui_Form
class MainWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.setWindowTitle("Learning")
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
start = MainWidget()
start.show()
sys.exit(app.exec_())
|
import numpy as np
import math
import scipy.sparse.linalg
from scipy.sparse import csr_matrix as csr
from scipy.sparse import bmat
# Class containing all the data that should be distributed per proc
class fineProc():
def __init__(self):
# Local dofs and data
self.dim = 2
self.n = 0 # Local number of dofs
self.nI = 0 # Number of interior nodes
self.nB = 0 # Number of interface nodes
self.A = None # Local problem matrix
self.b = None # Local problem vector
# Mesh info
self.nE = 0 # Number of elements
self.nN = 0 # Number of nodes
self.nNbors = 0 # Number of edges per element
self.elemEdge = None # Element-edge connectivity, size(nE,nNbors)
self.signs = None # Sign map for each edge, size(nE,nNbors)
self.eemap_size = None # CSR Indexes for the edge-edge map, size(nE+1)
self.eemap = None # CSR data for the edge-edge map, size(eemap_size[nE])
self.edgeNodes = None # Nodes for each edge, size(n,2)
self.nemap_size = None # CSR Indexes for the node-edge map
self.nemap = None # CSR data for the node-edge map
# Communication
self.rank = 0 # Rank of this processor
self.nP = 0 # Number of neighboring procs
self.procs = None # List of neighboring procs
self.com_size = None # Communication vector sizes for dofs
self.com_loc = None # Communication vector local indexes for dofs
self.com_glob = None # Communication vector global indexes for dofs
self.globalTag = None # Global ordering of each edge
# BDDC-specific variables
self.nO = 0 # Number of local objects
self.obj_size = None # Number of edges for each object
self.obj_dofs = None # Edges for each object, written in obj_size[iO]:obj_size[iO+1]
self.obj_node = None # Interior nodes for each object, written in obj_size[iO]:obj_size[iO+1]-1
self.obj_id1 = None # Object ID1: Minimal Global ID of object edges
self.obj_id2 = None # Object ID2: Minimal global ID of object neighboring partitions
self.obj_sign = None # Orientation of edges inside the object
self.obj_cts = None # Number of constraints for each object
self.nC = 0 # Number of local constraints
self.C_size = None # Sizes for C
self.C = None # Local Constraint matrix
self.W = None # Weights for all edges
self.Phi = None # Local coarse eigenvectors
self.Lambda = None # Local coarse lagrange multipliers
self.Aib = None
self.invAii = None
self.invFine = None
def initAll(self):
self.getObjects()
self.getConstraints()
self.initFine()
def getObjects(self):
# Get neighboring processors for every boundary edge
numNbors = np.zeros(self.nB,dtype=int)
nbors = -np.ones((self.nB*self.nP),dtype=int)
for iP in range(self.nP):
for iE in range(self.com_size[iP],self.com_size[iP+1]):
e = self.com_loc[iE]-self.nI
nbors[e*self.nP + numNbors[e]] = self.procs[iP]
numNbors[e] += 1
# Select objects:
# > In 2D, we only have edges and all edges border 2 processors (i.e. numNbors[e] == 1)
# > In 3D, Faces border 2 processors (i.e. numNbors[e] == 1)
# and Edges border > 2 processors (i.e. numNbors[e] > 1 )
maxNumObjects = int(self.nP*(self.nP+1)/2) # nP choose 2
self.obj_size = np.zeros(maxNumObjects+1,dtype=int)
self.obj_dofs = np.zeros(self.nB,dtype=int)
self.obj_sign = np.zeros(self.nB,dtype=int)
self.obj_node = -np.ones((self.nB,2),dtype=int)
self.obj_id1 = np.zeros(maxNumObjects,dtype=int)
self.obj_id2 = np.zeros(maxNumObjects,dtype=int)
iO = 0
visited = np.zeros(self.nB, dtype=bool)
if (self.dim == 3) : visited = numNbors+1 < 3 # Eliminate face edges (set them as True)
for iB in range(self.nB):
if (not visited[iB]): # Start a new object
self.obj_dofs[self.obj_size[iO+1]] = iB + self.nI
self.obj_sign[self.obj_size[iO+1]] = 1
i_com = np.where(self.com_loc == iB + self.nI)[0][0]
self.obj_id1[iO] = self.com_glob[i_com]
self.obj_id2[iO] = int(min(self.rank,np.amin(nbors[iB*self.nP:iB*self.nP+numNbors[iB]])))
index_id1 = self.obj_size[iO+1]
self.obj_size[iO+1] += 1
iNode = 0 # Restart node numbering
# Follow the edge chain until exhausted,
# only a 2-position queue is needed (2 outmost edges of the chain)
q = [iB + self.nI,0]
q_sign = [1,0]
iq = 1
while(iq > 0):
iq = iq - 1
e = q[iq] # Current edge (local numbering)
ie = e-self.nI # Current edge (boundary numbering)
e_sign = q_sign[iq]
visited[ie] = True
e_nbors = nbors[ie*self.nP:ie*self.nP+numNbors[ie]] # Neighboring partitions
# Loop through the neighbors of e, looking for next possible edge
for e2 in self.eemap[self.eemap_size[e]:self.eemap_size[e+1]]:
ie2 = e2-self.nI
if (e2 >= self.nI and not visited[ie2]):
e2_nbors = nbors[ie2*self.nP:ie2*self.nP+numNbors[ie2]]
sameObject = (numNbors[ie2] == numNbors[ie]) and np.all(e_nbors == e2_nbors)
if (sameObject): # If in the same object
q[iq] = e2
self.obj_dofs[self.obj_size[iO+1]] = e2 # Put new edge in the object
# Select edge sign + get bridge node between e and e2
if(self.edgeNodes[e,0] == self.edgeNodes[e2,1]):
q_sign[iq] = e_sign
self.obj_sign[self.obj_size[iO+1]] = e_sign
self.obj_node[ie ,0] = iNode
self.obj_node[ie2,1] = iNode
bridgeNode = self.edgeNodes[e,0]
elif (self.edgeNodes[e,1] == self.edgeNodes[e2,0]):
q_sign[iq] = e_sign
self.obj_sign[self.obj_size[iO+1]] = e_sign
self.obj_node[ie ,1] = iNode
self.obj_node[ie2,0] = iNode
bridgeNode = self.edgeNodes[e,1]
elif (self.edgeNodes[e,0] == self.edgeNodes[e2,0]):
q_sign[iq] = -e_sign
self.obj_sign[self.obj_size[iO+1]] = -e_sign
self.obj_node[ie ,0] = iNode
self.obj_node[ie2,0] = iNode
bridgeNode = self.edgeNodes[e,0]
else: # (self.edgeNodes[e,1] == self.edgeNodes[e2,1])
q_sign[iq] = -e_sign
self.obj_sign[self.obj_size[iO+1]] = -e_sign
self.obj_node[ie ,1] = iNode
self.obj_node[ie2,1] = iNode
bridgeNode = self.edgeNodes[e,1]
i_com = np.where(self.com_loc == e2)[0][0]
self.obj_id1[iO] = min(self.obj_id1[iO],self.com_glob[i_com])
if (self.obj_id1[iO] == self.com_glob[i_com]): index_id1 = self.obj_size[iO+1]
self.obj_size[iO+1] += 1
iNode += 1
iq = iq + 1
if (self.obj_sign[index_id1] == -1): self.obj_sign[self.obj_size[iO]:self.obj_size[iO+1]] = - self.obj_sign[self.obj_size[iO]:self.obj_size[iO+1]]
iO += 1
self.obj_size[iO+1] = self.obj_size[iO]
self.nO = iO
def getConstraints(self):
# Get neighboring processors for every boundary edge
numNbors = np.zeros(self.nB,dtype=int)
for iP in range(self.nP):
for iE in range(self.com_size[iP],self.com_size[iP+1]):
e = self.com_loc[iE]-self.nI
numNbors[e] += 1
# Number of constraints
self.nC = 0
self.obj_cts = np.zeros(self.nO, dtype=int)
for iO in range(self.nO):
# Pathological case: The object is a single edge
if (self.obj_size[iO+1]-self.obj_size[iO] == 1):
self.obj_cts[iO] = 1
self.nC += 1
# Regular case: Chain length > 1
else:
self.obj_cts[iO] = 2
self.nC += 2
# Create and fill C
nC_max = 2*(self.obj_size[self.nO] + 2 * (self.nB - self.obj_size[self.nO]))
iC = 0
self.C_size = np.zeros(self.nC+1,dtype=int)
self.C_idx = np.zeros(nC_max,dtype=int)
self.C = np.zeros(nC_max)
for iO in range(self.nO):
# First Constraint
nE = self.obj_size[iO+1]-self.obj_size[iO]
dofs_E = self.obj_dofs[self.obj_size[iO]:self.obj_size[iO+1]]
sign_E = self.obj_sign[self.obj_size[iO]:self.obj_size[iO+1]]
self.C_size[iC+1] = self.C_size[iC] + nE
self.C_idx[self.C_size[iC]:self.C_size[iC+1]] = dofs_E
self.C[self.C_size[iC]:self.C_size[iC+1]] = sign_E
iC += 1
# Second Constraint
if (self.obj_cts[iO] == 2):
nF = 0
dofs_F = None
if (self.dim == 3): # Only faces in 3D case
nF_max = 0
for e in dofs_E : nF_max += self.eemap_size[e+1] - self.eemap_size[e]
dofs_F = np.zeros(nF_max,dtype=int)
for e in dofs_E: # For each edge in E
ie = e - self.nI
for e2 in self.eemap[self.eemap_size[e]:self.eemap_size[e+1]]: # Loop through neighbors
ie2 = e2 - self.nI
if (ie2 >= 0 and numNbors[ie2]+1 == 2): # If neighbor == face edge
self.obj_node[ie2 ,:] = -1
# Select only if they share interior node, and save which
# node they share.
if (self.obj_node[ie ,0] != -1):
if (self.edgeNodes[e2,0] == self.edgeNodes[e,0]):
self.obj_node[ie2 ,0] = self.obj_node[ie ,0]
dofs_F[nF] = e2
nF += 1
elif (self.edgeNodes[e2,1] == self.edgeNodes[e,0]):
self.obj_node[ie2 ,1] = self.obj_node[ie ,0]
dofs_F[nF] = e2
nF += 1
if (self.obj_node[ie ,1] != -1):
if (self.edgeNodes[e2,0] == self.edgeNodes[e,1]):
self.obj_node[ie2 ,0] = self.obj_node[ie ,1]
dofs_F[nF] = e2
nF += 1
elif (self.edgeNodes[e2,1] == self.edgeNodes[e,1]):
self.obj_node[ie2 ,1] = self.obj_node[ie ,1]
dofs_F[nF] = e2
nF += 1
# Constraint in the new basis
Cnew = np.ones(nE+nF)
Cnew[0] = 0.0
Cnew[nE:] = 0.0
# Change of basis
Gt = np.zeros((nE+nF,nE+nF))
Gt[0,:nE] = sign_E # Phi^t
for j in range(nE): # G_EN^t
ie = dofs_E[j] - self.nI
if (self.obj_node[ie ,0] != -1): Gt[self.obj_node[ie ,0]+1,j] = -1.0
if (self.obj_node[ie ,1] != -1): Gt[self.obj_node[ie ,1]+1,j] = 1.0
for j in range(nF): # G_FN^t
ie = dofs_F[j] - self.nI
if (self.obj_node[ie ,0] != -1): Gt[self.obj_node[ie ,0]+1,j+nE] = -1.0
if (self.obj_node[ie ,1] != -1): Gt[self.obj_node[ie ,1]+1,j+nE] = 1.0
if (self.obj_node[ie ,0] != -1 and self.obj_node[ie ,1] != -1): print('>>>>>> WARNING!!!')
for j in range(nF): # G_FF^t
Gt[j+nE,j+nE] = 1.0
self.C_size[iC+1] = self.C_size[iC] + nE + nF # Cardinal(E) + Cardinal(F)
self.C_idx[self.C_size[iC]:self.C_size[iC]+nE] = dofs_E
if (self.dim == 3): self.C_idx[self.C_size[iC]+nE:self.C_size[iC+1]] = dofs_F[:nF]
self.C[self.C_size[iC]:self.C_size[iC+1]] = np.linalg.solve(Gt,Cnew)
iC += 1
def initFine(self):
# Invert local problem
self.Aii = self.A[:self.nI,:self.nI]
self.invAii = scipy.sparse.linalg.factorized(self.Aii)
self.Aib = self.A[:self.nI,self.nI:]
self.C_csr = csr((self.C,self.C_idx,self.C_size),shape=(self.nC,self.nI+self.nB))
Aaux = bmat([[self.A , self.C_csr.transpose() ] ,
[self.C_csr , None ] ])
self.invFine = scipy.sparse.linalg.factorized(Aaux)
# Get local eigenvectors from Newmann problem
self.Phi = np.zeros((self.nB+self.nI, self.nC))
self.Lambda = np.zeros((self.nC, self.nC))
for j in range(self.nC):
x = np.zeros(self.nI + self.nB + self.nC)
x[self.nI + self.nB + j] = 1.0
y = self.invFine(x)
self.Phi[:,j] = y[:self.nI+self.nB]
self.Lambda[:,j] = y[self.nI+self.nB:]
# Get weights
self.W = np.ones((self.n,1))
for iP in range(self.nP):
for iE in range(self.com_size[iP],self.com_size[iP+1]):
self.W[self.com_loc[iE]] += 1
for iE in range(self.n):
self.W[iE] = 1.0 / self.W[iE]
|
"""
Implementation of the OpenWeatherMap API.
"""
__author__ = '[email protected] (Ajay Roopakalu)'
OPEN_WEATHER_MAP_URL = 'http://api.openweathermap.org/data/2.5'
WEATHER_URL = '/weather'
import requests
import log
logging = log.Log(__name__)
def GetCurrentExternalWeather(app_id, city_id):
"""Get current weather data at the given city_id.
Args:
app_id: (str) OpenWeatherMap API key.
city_id: (str) OpenWeatherMap City ID.
Returns:
Dict containing temperature and humidity data for given location.
"""
params = {
'APPID': app_id,
'id': city_id,
'units': 'imperial'
}
response = requests.get(
OPEN_WEATHER_MAP_URL + WEATHER_URL,
params=params)
if response.status_code != 200:
logging.exception('Unexpected response: %s', response.text)
response_parsed = response.json()
if 'main' not in response_parsed or 'temp' not in response_parsed['main']:
logging.exception('Expected fields not in response: %s', response.text)
return response_parsed['main']
|
# This is a regression example where we calculate the slope of the correlation
# between the variable x and y
mult = lambda x, y: x * y
x = [70, 57, 63, 70, 53, 75, 58]
y = [1, 1, 1, 1, 2, 2, 1]
s = 0
for i in range(len(x)):
s += mult(x[i], y[i])
b = (s - (1/7) * 9 * 446)/(28816 - (1/7) * 446**2)
a = (1/7) * sum(y) - b * (1/7) * sum(x)
print(36 * b + a)
|
import sys
from optparse import OptionParser
from datetime import datetime, timedelta
import os
import json
from haralyzer import HarParser, HarPage
import harparser
import numpy as np
SILENCE_THRESH = 2.0
TDT = 0.95 #95th percentile of bytes cumulative
def compute_aft(path):
print("\tComputing AFT, input-video=%s ..." % path)
aft = compute_speed_index_from_video(path, path[0:-4] + ".speedindex.out", path[0:-4] + "_frames")
aft = max(0.0, aft) #prevent negative in case of incorrect
return aft
def compute_tdt(entries, silence_thresh):
print("\tComputing TDT ...")
if silence_thresh==None:
silence_thresh=SILENCE_THRESH
#find init and end times
global_end_time = 0
init_time = datetime.strptime(entries[0][u'startedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
last_byte_ts = 0.0
selected_entries = []
for entry in entries:
#find corresponding domain
url = entry[u'request'][u'url']
#NOTE filter suspicious entries
if "alog" in url or "goupdate" in url or "gofor" in url or "adpush" in url or "goload" in url or "crashlytics" in url or "scorecardresearch" in url or "for-channel" in url:
continue
#entry timings
start_time = datetime.strptime(entry[u'startedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
start_from_init = (start_time-init_time).seconds + (start_time-init_time).microseconds/1e6
total_time_secs = entry[u'time']/1e3
#skip if 0-byte request, 0-byte response (e.g., tls handshake)
total_bytes = entry[u'request'][u'bodySize'] + entry[u'response'][u'bodySize']
if total_bytes == 0:
continue
#check for Stop, based on silence threshold (TODO: this won't be needed once we have the partitions of the waterfall)
if start_from_init - last_byte_ts > silence_thresh:
break
#add to subset of valid entries
selected_entries.append(entry)
#update end time
end_from_init = start_from_init + total_time_secs
if end_from_init > last_byte_ts:
last_byte_ts = end_from_init
# print "Last byte ts = " + str(last_byte_ts)
global_end_time = (datetime.strptime(entry[u'startedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ") + timedelta(milliseconds=entry[u'time']))
#build our ByteIndex (as a CDF)
interval_len = (global_end_time-init_time).seconds + (global_end_time-init_time).microseconds/1e6
#print "Interval length (secs) = " + str(interval_len)
timeline = (np.arange(0.0, interval_len+1e-3, 1e-3)).tolist() # 1ms resolution
byte_list = [0] * len(timeline)
percent_list = [0] * len(timeline)
byte_count = 0
for entry in selected_entries:
#entry timings
start_time = datetime.strptime(entry[u'startedDateTime'], "%Y-%m-%dT%H:%M:%S.%fZ")
start_from_init = (start_time-init_time).seconds + (start_time-init_time).microseconds/1e6
total_time_secs = entry[u'time']/1e3
end_from_init = start_from_init + total_time_secs
#entry size
total_bytes = entry[u'request'][u'bodySize'] + entry[u'response'][u'bodySize']
#skip if 0-byte request, 0-byte response (e.g., tls handshake)
if total_bytes == 0:
continue
byte_count+=total_bytes
time_counter = 0.0
pos = 0
while(time_counter <= start_from_init):
time_counter+=1e-3
pos+=1
while time_counter <= end_from_init:
byte_list[pos] += total_bytes * ( (time_counter-start_from_init) / total_time_secs )
pos+=1
time_counter+=1e-3
#print byte_list[pos]
while pos<len(timeline):
byte_list[pos] += total_bytes
#print byte_list[pos]
time_counter+=1e-3
pos+=1
#create percentile list
for i in range(0, len(timeline)):
percent_list[i] = float(byte_list[i])/byte_count
tdt = None
#get 99th percentile time
for i in range(0, len(timeline)):
if percent_list[i]>=TDT:
tdt = timeline[i]
break
if tdt == None:
tdt = 0
print("\tTDT=%f" % tdt)
return tdt, selected_entries
def compute_speed_index_from_video(video_path, outfile, frames_path):
# os.system("python ./../visualmetrics.py --video " + video_path + " --screenshot " + video_path + ".png -d frames --notification > " + outfile)
try:
os.system("python ./visualmetrics.py --video " + video_path + " -d " + frames_path + " --notification > " + outfile)
except:
print("Error while running visualmetrics.py. Skipping")
with open(outfile) as f:
lines = f.readlines()
str_visual_progress = (lines[3].split(": ")[1]).split(", ")
si_time_secs = []
si_perc_prog = []
i=0
for prog in str_visual_progress:
#avoid 100% speedindex progress in the middle (speedindex curve not monotonic)
if int(prog.split('=')[1].split('%')[0])/1e2 == 1.0 and i!=(len(str_visual_progress)-1):
i+=1
continue
si_time_secs.append(int(prog.split('=')[0])/1e3)
si_perc_prog.append(int(prog.split('=')[1].split('%')[0])/1e2)
i+=1
#NOTE repair speedindex progress list if no monotonic (small ripples may appear..)
ref = si_perc_prog[0]
new_si_perc_prog = []
for item in si_perc_prog:
if item < ref:
new_si_perc_prog.append(ref)
else:
new_si_perc_prog.append(item)
ref = item
#get AFT
for i in range(0, len(new_si_perc_prog)):
if new_si_perc_prog[i] == 1.0:
aft = si_time_secs[i]
print("\tAFT=%f" % aft)
return aft
|
info = {
"UNIT_NUMBERS": {
"walâ": 0,
"isá": 1,
"dalawá": 2,
"tatló": 3,
"ápat": 4,
"limá": 5,
"anim": 6,
"pitó": 7,
"waló": 8,
"siyám": 9
},
"DIRECT_NUMBERS": {
"sampû": 10
},
"TENS": {},
"HUNDREDS": {},
"BIG_POWERS_OF_TEN": {
"daán": 100,
"libó": 1000,
"milyón": 1000000,
"bilyón": 1000000000,
"trilyón": 1000000000000,
"katrilyón": 1000000000000000
},
"SKIP_TOKENS": [],
"USE_LONG_SCALE": False
}
|
import sys
#import nltk
import math
import time
import string
import numpy as np
import gensim
import tagger as t
import pybrain
from pybrain.datasets import supervised
from pybrain.tools.shortcuts import buildNetwork
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
def main():
# start timer
time.clock()
# Training the Tagger:
# open training data
#infile = open("../data/gold/simple_gold_revised_emojis.txt", "r")
infile = open("../results/pos_tagged_4_fold_cv.txt", "r")
t_sents = infile.readlines()
infile.close()
#train_sents = []
train_sents = list(t_sents[102:])
#train_sents = list(t_sents)
# open CMU training data
infile = open("../data/gold/cmu_all_gold.txt")
sents = infile.readlines()
cmu_train_sents = sents
#cmu_train_sents = sents[:1328]
#cmu_train_sents.extend(sents[1428:])
#cmu_train_sents = []
infile.close()
all_sents = list()
all_sents.extend(train_sents)
all_sents.extend(cmu_train_sents)
# Standard implementation of domain adaptation
domain_list = ['tweet']*len(train_sents)
#domain_list.extend(['tweet']*len(cmu_train_sents))
domain_list.extend(['cmu']*len(cmu_train_sents))
#domain_list = None
# Initializing the tagger
tagger = t.tagger(brown_cluster_path='../tools/TweeboParser/pretrained_models/twitter_brown_clustering_full',
word2vec_path='../tools/word2vec/word2vec_twitter_model.bin'
#word2vec_path= '../tools/word2vec/glove.6B/glove.6B.300d.txt',
#word2vec_path= '../tools/word2vec/glove.840B.300d/glove.840B.300d.txt'
#word2vec_path= '../tools/word2vec/glove.twitter.27B/glove.twitter.27B.200d.txt',
#word2vec_path= '../tools/word2vec/GoogleNews-vectors-negative300.bin'
#wiktionary_path='../data/wiktionary'
)
#tagged_sents = tagger.cross_validation(train_sents, domain_list, len(train_sents), folds=4)
#tagger.output_tagged(tagged_sents, '../results/pos_tagged_4_fold_cv.txt',)
tagger.train(all_sents, domain_list)
tagger.save_clf(path='../classifiers/POS-tagger.pkl')
# Using the tagger to tag dev set data
# open Corpus development data
#infile = open("../data/content/simple_content_emoji.txt", "r")
infile = open("../data/gold/simple_gold_revised_emojis.txt", "r")
#infile = open("../data/gold/test_final.txt", "r")
print('Reading Dev')
train_Dev = infile.readlines()[:200]
infile.close()
dev_tokens, _ = tagger.preprocess(train_Dev)
print('Testing Dev')
tagged_sents = tagger.tag_sents(dev_tokens, 'tweet')
print('Writing Results')
tagger.output_tagged(tagged_sents, '../results/pos_tagged_cv.txt')
infile = open("../data/content/test_final_content.txt", "r")
print('Reading Dev')
train_test = infile.readlines()[:200]
infile.close()
test_tokens, _ = tagger.preprocess(train_test)
print('Testing Dev')
tagged_sents = tagger.tag_sents(test_tokens, 'tweet')
print('Writing Results')
tagger.output_tagged(tagged_sents, '../results/pos_tagged_test_cv.txt')
'''
infile = open("../data/gold/cmu_test_gold.txt", "r")
train_cmu = infile.readlines()
cmu_tokens, _ = tagger.preprocess(train_cmu)
tagged_sents = tagger.tag_sents(cmu_tokens, 'cmu')
tagger.output_tagged(tagged_sents, '../results/cmu_pos_tagged_cv.txt')
'''
print("Time: " + str(time.clock()) + ' sec')
if __name__ == "__main__":
main()
import eval_parsing
|
# stdlib imports
import re
# third party imports
import numpy as np
import logging
from strec.subtype import SubductionSelector
def get_weights(origin, config):
"""Get list of GMPEs and their weights for a given earthquake.
Args:
origin (Origin object): ShakeMap Origin object, containing earthquake
info.
config (dict-like): Configuration information regarding earthquake
type.
Returns:
tuple: Tuple with elements that are:
- list of strings indicating the GMPEs selected for this
earthquake.
- ndarray (float) of GMPE weights.
- Pandas series containing STREC output.
"""
tprobs, strec_results = get_probs(origin, config)
gmpelist = []
weightlist = []
# remove all probabilities that are == 0
probs = {}
for key, value in tprobs.items():
if value > 0.0:
probs[key] = value
all_keylist = list(probs.keys())
# let's have the code default to use the slab data
if config["tectonic_regions"]["subduction"]:
use_slab = config["tectonic_regions"]["subduction"]["use_slab"]
else:
use_slab = True
for region, rdict in config["tectonic_regions"].items():
if (region == "subduction") and use_slab:
if "crustal" in probs or "subduction_0" in probs:
if "crustal" in probs:
topkey = "crustal"
else:
topkey = "subduction_0"
gmpelist += rdict["crustal"]["gmpe"]
weightlist.append(probs[topkey])
if "interface" in probs or "subduction_1" in probs:
if "interface" in probs:
midkey = "interface"
else:
midkey = "subduction_1"
gmpelist += rdict["interface"]["gmpe"]
weightlist.append(probs[midkey])
if "intraslab" in probs or "subduction_2" in probs:
if "intraslab" in probs:
botkey = "intraslab"
else:
botkey = "subduction_2"
gmpelist += rdict["intraslab"]["gmpe"]
weightlist.append(probs[botkey])
else:
pat = re.compile(region + "_")
keylist = sorted(list(filter(pat.search, all_keylist)))
if len(keylist):
for key in keylist:
weightlist.append(probs[key])
idx = int(key.split("_")[1])
gmpelist.append(rdict["gmpe"][idx])
weightlist = np.array(weightlist)
logging.debug(f"gmpelist: {gmpelist}")
logging.debug(f"weightlist: {weightlist}")
gmmdict = {"gmpelist": gmpelist, "weightlist": weightlist}
#
# Here we get the region-specific ipe, gmice, and ccf. If they are
# not specified in the config, we use None, and let the value
# fall back to whatever is specified in the system config.
#
if strec_results["TectonicRegion"] == "Active":
gmmdict["ipe"] = config["tectonic_regions"]["acr"].get("ipe", None)
gmmdict["gmice"] = config["tectonic_regions"]["acr"].get("gmice", None)
gmmdict["ccf"] = config["tectonic_regions"]["acr"].get("ccf", None)
elif strec_results["TectonicRegion"] == "Stable":
gmmdict["ipe"] = config["tectonic_regions"]["scr"].get("ipe", None)
gmmdict["gmice"] = config["tectonic_regions"]["scr"].get("gmice", None)
gmmdict["ccf"] = config["tectonic_regions"]["scr"].get("ccf", None)
elif strec_results["TectonicRegion"] == "Subduction":
gmmdict["ipe"] = config["tectonic_regions"]["subduction"].get("ipe", None)
gmmdict["gmice"] = config["tectonic_regions"]["subduction"].get("gmice", None)
gmmdict["ccf"] = config["tectonic_regions"]["subduction"].get("ccf", None)
elif strec_results["TectonicRegion"] == "Volcanic":
gmmdict["ipe"] = config["tectonic_regions"]["volcanic"].get("ipe", None)
gmmdict["gmice"] = config["tectonic_regions"]["volcanic"].get("gmice", None)
gmmdict["ccf"] = config["tectonic_regions"]["volcanic"].get("ccf", None)
return gmmdict, strec_results
def get_probs(origin, config):
"""Calculate probabilities for each earthquake type.
The results here contain probabilities that can be rolled up in many ways:
- The probabilities of acr, scr, volcanic, and subduction should sum to
one.
- The probabilities of acr_X,scr_X,volcanic_X, crustal, interface and
intraslab
should sum to 1.
- The probabilities of acr_X should sum to acr, and so on.
Args:
origin (Origin object): ShakeMap Origin object, containing earthquake
info.
config (dict-like): Configuration information regarding earthquake
type.
Returns:
(dict, dict):
Probabilities for each earthquake type, with fields:
- acr Probability that the earthquake is in an active region.
- acr_X Probability that the earthquake is in a depth layer of
ACR, starting from the top.
- scr Probability that the earthquake is in a stable region.
- scr_X Probability that the earthquake is in a depth layer of
SCR, starting from the top.
- volcanic Probability that the earthquake is in a volcanic
region.
- volcanic_X Probability that the earthquake is in a depth layer
of Volcanic, starting from the top.
- subduction Probability that the earthquake is in a subduction
zone.
- crustal Probability that the earthquake is in the crust above
an interface.
- interface Probability that the earthquake is on the interface.
- intraslab Probability that the earthquake is in the slab below
interface.
STREC results
"""
selector = SubductionSelector()
lat, lon, depth, mag = origin.lat, origin.lon, origin.depth, origin.mag
if origin.id is not None and not origin.id.startswith(origin.netid):
eid = origin.netid + origin.id
else:
eid = origin.id
tensor_params = None
if hasattr(origin, "moment"):
tensor_params = origin.moment
strec_results = selector.getSubductionType(
lat, lon, depth, eid, tensor_params=tensor_params
)
region_probs = get_region_probs(eid, depth, strec_results, config)
in_subduction = strec_results["TectonicRegion"] == "Subduction"
above_slab = not np.isnan(strec_results["SlabModelDepth"])
use_slab = config["tectonic_regions"]["subduction"]["use_slab"]
if use_slab:
if in_subduction:
subduction_probs = get_subduction_probs(
strec_results, depth, mag, config, above_slab
)
for key, value in subduction_probs.items():
subduction_probs[key] = value * region_probs["subduction"]
# If we are in a subduction zone then we don't want the
# keys for subduction_0, 1, 2 (which are the generic vertical
# subduction subtypes that are not informed by the slab model because
# it isn't available)
if "subduction_0" in region_probs:
del region_probs["subduction_0"]
if "subduction_1" in region_probs:
del region_probs["subduction_1"]
if "subduction_2" in region_probs:
del region_probs["subduction_2"]
else:
# If we are NOT in a subduction zone we may or may not need subduction
# probabilities (depending on distance and the configured taper). But
# either way, we will not have access to the slab model and so we have
# to use the generic vertical subtypes
subduction_probs = {
"crustal": region_probs["subduction_0"],
"interface": region_probs["subduction_1"],
"intraslab": region_probs["subduction_2"],
}
region_probs.update(subduction_probs)
else:
logging.info('"use_slab" is False so no slab used in finding GMPE ' "weights.")
return (region_probs, strec_results)
def get_region_probs(eid, depth, strec_results, config):
"""
Calculate the regional probabilities (not including subduction interface
etc).
Args:
eid (str): Earthquake ID (i.e., us1000cdn0)
depth (float): Depth of earthquake.
strec_results (Series): Pandas series containing STREC output.
config (dict-like): Configuration information regarding earthquake
type.
Returns:
dict: Probabilities for each earthquake type, with fields:
- acr Probability that the earthquake is in an active region.
- acr_X Probability that the earthquake is in a depth layer of
ACR, starting from the top.
- scr Probability that the earthquake is in a stable region.
- scr_X Probability that the earthquake is in a depth layer of
SCR, starting from the top.
- volcanic Probability that the earthquake is in a volcanic
region.
- volcanic_X Probability that the earthquake is in a depth layer
of Volcanic, starting from the top.
- subduction Probability that the earthquake is in a subduction
zone.
"""
region_probs = {}
region_mapping = {
"scr": "DistanceToStable",
"acr": "DistanceToActive",
"volcanic": "DistanceToVolcanic",
"subduction": "DistanceToSubduction",
}
layer_probs = {}
for region, rdict in config["tectonic_regions"].items():
distance = strec_results[region_mapping[region]]
# If we're considering subduction zones but not IN a subduction zone
x1 = 0.0
p2 = 0.0
p1 = 1.0
x2 = rdict["horizontal_buffer"]
region_prob = get_probability(distance, x1, p1, x2, p2)
region_probs[region] = region_prob
region_layer_probs = {}
# now do the weights for each depth zone
for i in range(0, len(rdict["min_depth"])):
# First, taper from -1 to 0 for the lower end
x1 = rdict["min_depth"][i] - rdict["vertical_buffer"] / 2
p1 = -1.0
x2 = rdict["min_depth"][i]
p2 = 0.0
p_layer1 = get_probability(depth, x1, p1, x2, p2)
# Then, taper from 0 to -1 for the higher end
x1 = rdict["max_depth"][i]
p1 = 0.0
x2 = rdict["max_depth"][i] + rdict["vertical_buffer"] / 2
p2 = -1.0
p_layer2 = get_probability(depth, x1, p1, x2, p2)
# Lastly, combine to get probability curve for layer i
region_layer_probs["%s_%i" % (region, i)] = 1 + p_layer1 + p_layer2
probsum = sum([lp for lp in list(region_layer_probs.values())])
if probsum > 0:
for key, value in region_layer_probs.items():
region_layer_probs[key] = value / probsum
# running list of all region layer probabilities
layer_probs.update(region_layer_probs)
# divide the weights by the total weight
probsum = sum(list(region_probs.values()))
for region, prob in region_probs.items():
region_probs[region] = prob / probsum
pat = re.compile(region)
layerkeys = list(layer_probs.keys())
reg_layers = list(filter(pat.search, layerkeys))
for reg_layer in reg_layers:
layer_probs[reg_layer] = layer_probs[reg_layer] * region_probs[region]
region_probs.update(layer_probs)
return region_probs
def get_subduction_probs(strec_results, depth, mag, config, above_slab):
"""Get probabilities of earthquake being crustal, interface or intraslab.
Args:
strec_results (Series): Pandas series containing STREC output.
depth (float): Depth of earthquake.
mag (float): Earthquake magnitude.
config (dict-like): Configuration information regarding earthquake
type.
above_slab (bool): Is earthquake above a defined slab?
Returns:
dict: Probabilities for each earthquake type, with fields:
- crustal Probability that the earthquake is in the crust above
an interface.
- interface Probability that the earthquake is on the interface.
- intraslab Probability that the earthquake is in the slab below
interface.
"""
subcfg = config["subduction"]
if above_slab:
# the angle between moment tensor and slab
kagan = strec_results["KaganAngle"] # can be nan
# Depth to slab
slab_depth = strec_results["SlabModelDepth"]
# Error in depth to slab
slab_depth_error = strec_results["SlabModelDepthUncertainty"]
# what is the effective bottom of the interface zone?
max_interface_depth = strec_results["SlabModelMaximumDepth"]
# Calculate the probability of interface given the
# (absolute value of) difference between hypocenter and depth to slab.
dz = np.abs(depth - slab_depth)
x1 = subcfg["p_int_hypo"]["x1"] + slab_depth_error
x2 = subcfg["p_int_hypo"]["x2"] + slab_depth_error
p1 = subcfg["p_int_hypo"]["p1"]
p2 = subcfg["p_int_hypo"]["p2"]
p_int_hypo = get_probability(dz, x1, p1, x2, p2)
# Calculate probability of interface given Kagan's angle
if np.isfinite(kagan):
x1 = subcfg["p_int_kagan"]["x1"]
x2 = subcfg["p_int_kagan"]["x2"]
p1 = subcfg["p_int_kagan"]["p1"]
p2 = subcfg["p_int_kagan"]["p2"]
p_int_kagan = get_probability(kagan, x1, p1, x2, p2)
else:
p_int_kagan = subcfg["p_kagan_default"]
# Calculate probability that event occurred above bottom of seismogenic
# zone, given to us by the Slab model.
x1 = max_interface_depth + subcfg["p_int_sz"]["x1"]
x2 = max_interface_depth + subcfg["p_int_sz"]["x2"]
p1 = subcfg["p_int_sz"]["p1"]
p2 = subcfg["p_int_sz"]["p2"]
p_int_sz = get_probability(depth, x1, p1, x2, p2)
# Calculate combined probability of interface
p_int = p_int_hypo * p_int_kagan * p_int_sz
# Calculate probability that the earthquake lies above the slab
# and is thus crustal.
x1 = subcfg["p_crust_slab"]["x1"]
x2 = subcfg["p_crust_slab"]["x2"]
p1 = subcfg["p_crust_slab"]["p1"]
p2 = subcfg["p_crust_slab"]["p2"]
p_crust_slab = get_probability((depth - slab_depth), x1, p1, x2, p2)
# Calculate probability that the earthquake lies within the crust
x1 = subcfg["p_crust_hypo"]["x1"]
x2 = subcfg["p_crust_hypo"]["x2"]
p1 = subcfg["p_crust_hypo"]["p1"]
p2 = subcfg["p_crust_hypo"]["p2"]
p_crust_hypo = get_probability(depth, x1, p1, x2, p2)
# Calculate probability of crustal
p_crustal = (1 - p_int) * p_crust_slab * p_crust_hypo
# Calculate probability of intraslab
p_slab = 1 - (p_int + p_crustal)
else:
slab_depth = subcfg["default_slab_depth"]
# Calculate the probability that an earthquake is interface
# given magnitude
x1 = subcfg["p_int_mag"]["x1"]
p1 = subcfg["p_int_mag"]["p1"]
x2 = subcfg["p_int_mag"]["x2"]
p2 = subcfg["p_int_mag"]["p2"]
p_int_mag = get_probability(mag, x1, p1, x2, p2)
# Calculate the probability that the earthquake is
# interface given depth (two part function).
# upper portion of function
x1 = subcfg["p_int_dep_no_slab_upper"]["x1"]
p1 = subcfg["p_int_dep_no_slab_upper"]["p1"]
x2 = subcfg["p_int_dep_no_slab_upper"]["x2"]
p2 = subcfg["p_int_dep_no_slab_upper"]["p2"]
p_int_depth_upper = get_probability(depth, x1, p1, x2, p2)
# lower portion of function
x1 = subcfg["p_int_dep_no_slab_lower"]["x1"]
p1 = subcfg["p_int_dep_no_slab_lower"]["p1"]
x2 = subcfg["p_int_dep_no_slab_lower"]["x2"]
p2 = subcfg["p_int_dep_no_slab_lower"]["p2"]
p_int_depth_lower = get_probability(depth, x1, p1, x2, p2)
p_int_depth = p_int_depth_upper + p_int_depth_lower
# This functional form is used so that the probability of interface
# inflates and appraoches 1 as magnitude gets large, assuming
# that the ramp function for p_int_mag is zero at small magnitudes
# and 1 at large magnitudes.
p_int = p_int_depth + (1 - p_int_depth) * p_int_mag
if depth > slab_depth:
p_crustal = 0.0
p_slab = 1 - p_int
else:
p_crustal = 1 - p_int
p_slab = 0.0
probs = {"crustal": p_crustal, "interface": p_int, "intraslab": p_slab}
return probs
def get_probability(x, x1, p1, x2, p2):
"""Calculate probability using a ramped function.
The subsections and parameters below reflect a series of ramp functions
we use to calculate various probabilities.
p1 |----+
| \
| \
| \
p2 | +-------
|
+-----------------
x1 x2
Args:
x (float): Quantity for which we want corresponding probability.
x1 (float): Minimum X value.
p1 (float): Probability at or below minimum X value.
x2 (float): Maximum X value.
p2 (float): Probability at or below maximum X value.
Returns:
float: Probability at input x value.
"""
if x <= x1:
prob = p1
elif x >= x2:
prob = p2
else:
slope = (p1 - p2) / (x1 - x2)
intercept = p1 - slope * x1
prob = x * slope + intercept
return prob
|
"""
Main file to calculate the embeddings with OGRE/DOGRE/WOGRE, and performing link prediction and node classification task.
In order to calculate the embedding, you first must have an edge list file:
"datasets/name_of_dataset.txt" - An edge list txt file. If the graph is unweighted it consists of 2 columns: source, target (with no title, source and target share an edge).
If the graph is weighted, it consists of 3 columns: source target weight.
Example for unweighted graph:
1 2
2 3
1 4
1 3
Example for weighted graph:
1 2 3
1 3 0.3
1 4 4.5
2 4 0.98
You can see examples for this format in "datasets" directory.
If you want to peform vertex classification task or GCN is your initial embedding, you must have labels file:
"labels/{name_of_dataset}_tags.txt" - A txt file which consists of 2 columns: node, label (no title). Notice all node must have labels!
Example:
1 0
2 0
3 1
4 2
Another possibilty is having a .mat file as in NRL_Benchmark (https://pages.github.com/). In this link, go to either `node classification`
or `link prediction` directories, where a link to datasets you can use in .mat format is avaliable. Then this .mat file is both the
edges and labels file.
If you want to perform link prediction task, you must have non edges file:
"evaluation_tasks/non_edges_{name_of_dataset}" - A csv file which consists of two columns: node1, node2 ; where there is no edge between them (again no title).
In order to produce such file, you can go to evaluation_tasks -> calculate_non_edges.py , and follow the instructions there.
When you have all the files you need (depending on what you want to perform), you can run this file.
1. First initialize DATASET parameters dict:
- name: Name of dataset (as the name of the edge list txt file) (string)
- initial_size: List of initial core sizes. (list)
- dim: Embedding dimension (int)
- is_weighted: True if the graph is weighted, else False (bool)
- choose: "degrees" if the vertices of the initial core are the ones with highest degree (as done in our experiments), else "k_core" if the vertices of the initial core are
the ones with highest k-core score. (string)
- "s_a": True if you also want to calculate state-of-the-art embeddings (node2vec/GF/HOPE/GCN), else False.
Params for OGRE:
- epsilon: Weight to the second order neighbours embedding. For more details you can go to the implementation- our_embedding_methods -> OGRE.py (float).
Params for DOGRE/WOGRE:
- "regu_val": Regularization value for regression, only for DOGRE/WOGRE. For more details you can go to the implementation- our_embedding_methods -> D_W_OGRE.py (float).
- "weighted_reg": True for weighted regression, else False.
If the initial embedding method is GCN and/or a vertex classification task is applied, a labels file is also necessary:
- "label_file": path and name (together), so it can be read directly.
2. methods_ : List of our suggested embedding methods (OGRE/DOGRE/WOGRE) with whom you want to embed the given graph.
3. initial_methods_ : List of state-of-the-art embedding methods (node2vec/GF/HOPE/GCN) with whom the initial core will be embed.
4. params_dict_ : Parameters for state-of-the-art embeddings. These are the optimal ones (according to their papers). For more details you can go to-
state_of_the_art -> state_of_the_art_embedding.py
5. save_: True if you want to save the embedding in a .npy format, else False.
Once you have that, you can run "calculate_static_embeddings" function to get the embeddings as dictionaries. You can see function implementation and output format in
evaluation_tasks -> eval_utils.py .
If you only want the embedding of the graph, you can stop here. If you also want to apply link prediction or vertex classification task you should continue.
Line 107: export_time - Export a csv file with running times of each method according to the initial core size.
Lines 123-130- Link prediction task: A csv file of non edges is needed (as explained above), you can see comments in the code. For more details you can go to
evaluation_tasks -> link_prediction.py .
Lines 132-136- Vertex classification task: You can see comments in the code. For more details you can go to evaluation_tasks -> node_classification.py .
"""
from link_prediction import *
from node_classification import *
from static_embeddings import *
import csv
# initialize important variables / parameters
DATASET = {"name": "DBLP", "initial_size": [100, 1000], "dim": 128, "is_weighted": False, "choose": "degrees",
"regu_val": 0, "weighted_reg": False, "s_a": True, "epsilon": 0.1,
"label_file": os.path.join("..", "labels", "dblp_tags.txt")}
# Example for .mat
# DATASET = {"name": "Flickr", "initial_size": [1000], "dim": 128, "is_weighted": False, "choose": "degrees",
# "regu_val": 0, "weighted_reg": False, "s_a": False, "epsilon": 0.01,
# "label_file": os.path.join("..", "datasets", "Flickr.mat")}
datasets_path_ = os.path.join("..", "datasets")
# where to save the embeddings
if DATASET["choose"] == "degrees":
embeddings_path_ = os.path.join("..", "embeddings_degrees")
else:
embeddings_path_ = os.path.join("..", "embeddings_k_core")
# Our suggested embedding method
methods_ = ["OGRE"]
# state-of-the-art embedding methods
initial_methods_ = ["node2vec"]
# Parameters duct for state-of-the-art embedding methods
params_dict_ = {"node2vec": {"dimension": DATASET["dim"], "walk_length": 80, "num_walks": 16, "workers": 2},
"GF": {"dimension": DATASET["dim"], "eta": 0.1, "regularization": 0.1, "max_iter": 3000,
"print_step": 100}, "HOPE": {"dimension": 128, "beta": 0.1},
"GCN": {"dimension": DATASET["dim"], "epochs": 150, "lr": 0.01, "weight_decay": 5e-4, "hidden": 200,
"dropout": 0}}
# if you want to save the embeddings as npy file- save_=True
save_ = True
# calculate dict of embeddings
z, G, initial_size, list_initial_proj_nodes = calculate_static_embeddings(datasets_path_, embeddings_path_, DATASET,
methods_, initial_methods_, params_dict_,
save_=save_)
"""
if the embeddings is all you wanted you can stop here. Otherwise, here are functions to calculate running time, and
applying Link Prediction and Node Classification Tasks.
"""
# where to save resuts files
if DATASET["choose"] == "degrees":
save = "files_degrees"
else:
save = "files_k_core"
# evaluate running time
export_time(z, DATASET["name"], save)
if DATASET["name"] == "Yelp":
mapping = {i: n for i,n in zip(range(G.number_of_nodes()), list(G.nodes()))}
else:
mapping=None
DATASET["initial_size"] = initial_size
print(initial_size)
# Link prediction Task
n = G.number_of_nodes()
non_edges_file = "non_edges_{}.csv".format(DATASET["name"]) # non edges file
# number_true_false: Number of true and false edges, number choose: How many times to choose true and false edges
params_lp_dict = {"number_true_false": 10000, "rounds": 10, "test_ratio": [0.2, 0.3, 0.5], "number_choose": 10}
dict_lp = final_link_prediction(z, params_lp_dict, non_edges_file)
export_results_lp_nc_all(n, save, z, dict_lp, DATASET["initial_size"], DATASET["name"], "Link Prediction")
print("finish link prediction")
# Node Classification Task
params_nc_dict = {"rounds": 10, "test_ratio": [0.5, 0.9]}
# for multi-label node classification add multi=True
dict_nc = final_node_classification(DATASET["name"], z, params_nc_dict, DATASET, mapping=mapping, multi=False)
export_results_lp_nc_all(n, save, z, dict_nc, DATASET["initial_size"], DATASET["name"], "Node Classification")
print("finish node classification")
|
from setuptools import setup
setup(
name = 'mongofs',
packages = ['mongofs'], # this must be the same as the name above
version = '0.1.0.2',
description = 'Access Mongo documents in a FUSE filesystem',
author = 'Paulo Costa',
author_email = '[email protected]',
url = 'https://github.com/paulo-raca/mongofs',
download_url = 'https://github.com/paulo-raca/mongofs/0.1',
keywords = ['fuse', 'mongo'],
entry_points = {
'console_scripts': ['mount.mongofs=mongofs.__main__:main'],
},
install_requires = [
"RouteFS",
"notify2",
"pymongo",
"expiringdict",
"procfs"
]
) |
import torch
import re
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torchvision.datasets import ImageFolder
from skimage import io, transform
import string
import matplotlib.pyplot as plt # for plotting
import numpy as np
from PIL import Image
import pickle
import os
'''Paths'''
IMAGE_DIR = './train_images/'
CAPTIONS_FILE_PATH = './train_captions.tsv'
IMAGE_DIR_TEST = './public_test_images/'
IMAGE_DIR_PRIVATE_TEST = './private_test_images/'
CAPTIONS_FILE_PATH_TEST = './public_test_captions.tsv'
IMAGE_RESIZE = (224, 224)
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, image):
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
return img
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, image):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
# if (idx == 0):
# print("Final Image shape= ", image.shape)
return torch.tensor(image)
class CaptionsPreprocessing():
"""Preprocess the captions, generate vocabulary and convert words to tensor tokens
Args:
captions_file_path (string): captions tsv file path
"""
def __init__(self, captions_file_path):
self.captions_file_path = captions_file_path
# Read raw captions
self.raw_captions_dict = self.read_raw_captions()
# Preprocess captions
self.captions_dict = self.process_captions()
# Create vocabulary
self.vocab = self.generate_vocabulary()
# Max length of the captions in the training set
self.max_length = self.gen_max_length()
def read_raw_captions(self):
"""
Returns:
Dictionary with raw captions list keyed by image ids (integers)
"""
captions_dict = {}
with open(self.captions_file_path, 'r', encoding='utf-8') as f:
for img_caption_line in f.readlines():
img_captions = img_caption_line.strip().split('\t')
captions_dict[int(img_captions[0])] = img_captions[1:]
print('Number of Loaded captions: %d ' % len(captions_dict))
return captions_dict
def process_captions(self):
"""
Use this function to generate dictionary and other preprocessing on captions
"""
raw_captions_dict = self.raw_captions_dict
# Do the preprocessing here
captions_dict = raw_captions_dict
print("---Beginning the pre-processing of the training dataset captions---")
# Pre-processing of the captions by removing the punctuations, digits, all words with numbers since they won't contribute to captions
for idx, all_caption in captions_dict.items():
for i in range(len(all_caption)):
#caption = '' + all_caption[i].translate(str.maketrans('','',string.punctuation)).lower() + ''
#caption = ' '.join([w for w in caption.split() if w.isalpha()]) #Removes words which are numbers
# Add <start> and <end> to the caption as and when they are processed for each image
all_caption[i] = '<start> '+''.join(all_caption[i])+' <end>'
return captions_dict
def gen_max_length(self):
"""
Use this function to return the maximum possible length of the caption
present in the training dataset
This is needed to generate tensors of equal sizes
"""
max_length = 0
captions_dict = self.captions_dict
for idx, all_caption in captions_dict.items():
for caption in all_caption:
if (max_length < len(caption.split())):
max_length = len(caption.split())
#print("Maximum length of the captions in the dataset is = ", max_length)
return max_length
def generate_vocabulary(self):
"""
Use this function to generate dictionary and other preprocessing on captions
"""
captions_dict = self.captions_dict
"""
Generate the vocabulary
We use python set() and use the update() method which does not add repeated words in the set.
Since we only need words and not their frequency we dont use dictionary.
"""
temp = set()
for idx in captions_dict.keys():
[temp.update(caption.split()) for caption in captions_dict[idx]]
#temp.update({'<pad>'})
print("The number of words in the generated vocabulary are=", len(temp))
return temp
def captions_transform(self, img_caption_list):
"""
Use this function to generate tensor tokens for the text captions
Args:
img_caption_list: List of captions for a particular image
"""
vocab = self.vocab
# Enumerate all the words in the vocab so that they can be indexed directly
word_to_ix = {word: i for i, word in enumerate(vocab)}
img_caption_token = []
# Generate tensors
for caption in img_caption_list:
# Generate a tensor for all the captions by using enumeration
lookup_tensor = torch.tensor([word_to_ix[w] for w in caption.split()])
img_caption_token.append(lookup_tensor)
return img_caption_token
#return torch.zeros((len(img_caption_list),10))
class ImageCaptionsDataset(Dataset):
def __init__(self, img_dir, captions_dict, img_transform=None, captions_transform=None):
"""
Args:
img_dir (string): Directory with all the images.
captions_dict: Dictionary with captions list keyed by image ids (integers)
img_transform (callable, optional): Optional transform to be applied
on the image sample.
captions_transform: (callable, optional): Optional transform to be applied
on the caption sample (list).
"""
self.img_dir = img_dir
self.captions_dict = captions_dict
self.img_transform = img_transform
self.captions_transform = captions_transform
self.image_ids = list(captions_dict.keys())
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img_name = os.path.join(self.img_dir, 'image_{}.jpg'.format(self.image_ids[idx]))
image = io.imread(img_name)
captions = self.captions_dict[self.image_ids[idx]]
if self.img_transform:
image = self.img_transform(image)
if self.captions_transform:
captions = self.captions_transform(captions)
return torch.tensor(image), captions
#return torch.tensor(image), torch.tensor(captions)
return image, captions, self.image_ids[idx]
#sample = {'image': image, 'captions': captions}
def get_device():
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
if isinstance(data,(list,tuple)):
return [to_device(x,device) for x in data]
return data.to(device)
class Device_Loader():
def __init__(self, dl ,device):
self.dl= dl
self.device=device
def __iter__(self):
for batch in self.dl:
yield to_device(batch,self.device)
def __len__(self):
return len(self.dl)
'''
def collate_fn(data):
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions = zip(*data)
images = torch.stack(images, 0)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths
'''
'''
Collate function is used to create custom batches in case the user does not want to use the automatic batching
We have used collate function to create 5 different data points from the same image and its 5 captions
Then number of batches effectively would now be
batches = (num_images*5)/batch_size
'''
def collate_fn(data):
repeated_images=[]
images, captions = zip(*data)
captions = [cap_ for cap in captions for cap_ in cap]
for image in images :
repeated_images.append(image.repeat(5,1,1,1))
images_ = torch.cat(repeated_images, 0)
lengths = [len(cap) for cap in captions]
# Change the length of all the captions in one batch to the max of all the captions.
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images_, targets, lengths
def get_data(batch):
'''
Function to generate training data loader
Input : Batch size
Returns : Training data loader, Vocabulary length
'''
device = get_device()
# Sequentially compose the transforms using Compose (chains all the transformations to be applied to all images)
# We normalized the images to the custom normalization used by ImageNet dataset
# Normalize increased performance slightly
#img_transform = transforms.Compose([Rescale(IMAGE_RESIZE), ToTensor()])
img_transform = transforms.Compose([Rescale(IMAGE_RESIZE), ToTensor(),transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225)) ])
captions_preprocessing_obj = CaptionsPreprocessing(CAPTIONS_FILE_PATH)
# Regenerate the enumeration for the vocabulary words and store the opposite enumeration also
word_to_ix = {word: i for i, word in enumerate(captions_preprocessing_obj.vocab)}
ix_to_word = {i: word for i, word in enumerate(captions_preprocessing_obj.vocab)}
vocab_dict = (word_to_ix, ix_to_word)
with open('vocab_dict_rasha.pickle', 'wb') as handle:
pickle.dump(vocab_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
vocab_len = len(captions_preprocessing_obj.vocab)
# Loading the dataset
train_dataset = ImageCaptionsDataset(
IMAGE_DIR, captions_preprocessing_obj.captions_dict, img_transform=img_transform,
captions_transform=captions_preprocessing_obj.captions_transform
)
# train_loader = DataLoader(train_dataset, batch_size=batch, shuffle=True, num_workers=2)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch,
shuffle=True,
num_workers=2,
collate_fn=collate_fn)
return train_loader, vocab_len, vocab_dict
def get_test_data():
'''
Function to generate training data loader
Returns : Test data loader
'''
captions_test = CaptionsPreprocessing(CAPTIONS_FILE_PATH_TEST)
img_transform = transforms.Compose([Rescale(IMAGE_RESIZE), ToTensor(), transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
test_dataset = ImageCaptionsDataset(
IMAGE_DIR_TEST, captions_test.captions_dict, img_transform=img_transform,
captions_transform=None
)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle= False, num_workers=2)
return test_loader
class ImagePrivateDataset(Dataset):
def __init__(self, img_dir, img_ids, img_transform=None):
"""
Args:
img_dir (string): Directory with all the images.
img_transform (callable, optional): Optional transform to be applied
on the image sample.
"""
self.img_dir = img_dir
self.img_transform = img_transform
self.image_ids = img_ids
self.f = open('private_img_ids.txt', 'w')
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img_name = os.path.join(self.img_dir, 'image_{}.jpg'.format(self.image_ids[idx]))
image = io.imread(img_name)
self.f.write(img_name)
self.f.write("\n")
if self.img_transform:
image = self.img_transform(image)
return image, self.image_ids[idx]
def get_private_test_data():
'''
Function to generate training data loader
Returns : Test data loader
'''
img_ids = []
for file in os.listdir(IMAGE_DIR_PRIVATE_TEST):
file_= re.findall(r"[\w']+", file)[0].split('_')
img_ids.append(file_[1])
#captions_test = CaptionsPreprocessing(CAPTIONS_FILE_PATH_PRIVATE_TEST)
img_transform = transforms.Compose([Rescale(IMAGE_RESIZE), ToTensor(), transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
private_test_dataset = ImagePrivateDataset(
IMAGE_DIR_PRIVATE_TEST, img_ids, img_transform=img_transform,
)
private_test_loader = DataLoader(private_test_dataset, batch_size=1, shuffle= False, num_workers=0)
return private_test_loader
|
"""Formal Concept Analysis concepts."""
import operator
import typing
__all__ = ['Concept',
'Infimum', 'Atom', 'Supremum']
from . import algorithms
class Pair:
"""Formal concept as pair of extent and intent."""
objects = ()
properties = ()
def __init__(self,
lattice,
extent,
intent,
upper,
lower) -> None:
self.lattice = lattice #: The lattice containing the concept.
self._extent = extent
self._intent = intent
self.upper_neighbors = upper #: The directly implied concepts.
self.lower_neighbors = lower #: The directly subsumed concepts.
def __str__(self) -> str:
"""
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> print(lattice['+1',])
{1sg, 1pl} <-> [+1 -2 -3] <=> +1
"""
extent = ', '.join(self._extent.members())
intent = ' '.join(self._intent.members())
objects = ' <=> {}'.format(' '.join(self.objects)) if self.objects else ''
properties = ' <=> {}'.format(' '.join(self.properties)) if self.properties else ''
return f'{{{extent}}} <-> [{intent}]{objects}{properties}'
def __repr__(self) -> str:
"""
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',]
<Concept {1sg, 1pl} <-> [+1 -2 -3] <=> +1>
"""
return f'<{self.__class__.__name__} {self}>'
def _eq(self, other):
if not isinstance(other, Concept):
return NotImplemented
if (other._extent.members() != self._extent.members()
or other._intent.members() != self._intent.members()):
return False
for attname in ('upper_neighbors', 'lower_neighbors'):
s_neighbors = getattr(self, attname)
o_neighbors = getattr(other, attname)
if len(o_neighbors) != len(s_neighbors):
return False
for s, o in zip(s_neighbors, o_neighbors):
if o._extent.members() != s._extent.members():
return False
return True
def __iter__(self):
"""Yield ``extent`` and ``intent`` (e.g. for pair unpacking).
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> extent, intent = lattice['+1',]
>>> print(extent, intent)
('1sg', '1pl') ('+1', '-2', '-3')
"""
yield self._extent.members()
yield self._intent.members()
@property
def extent(self) -> typing.Tuple[str, ...]:
"""The objects subsumed by the concept.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].extent
('1sg', '1pl')
"""
return self._extent.members()
@property
def intent(self) -> typing.Tuple[str, ...]:
"""The properties implied by the concept."
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].intent
('+1', '-2', '-3')
"""
return self._intent.members()
def upset(self,
_sortkey=operator.attrgetter('index'),
_next_concepts=operator.attrgetter('upper_neighbors')):
"""Yield implied concepts including ``self``.
Yields:
:class:`.Concept` instances.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> list(lattice['+1',].upset()) # doctest: +NORMALIZE_WHITESPACE
[<Concept {1sg, 1pl} <-> [+1 -2 -3] <=> +1>,
<Concept {1sg, 1pl, 2sg, 2pl} <-> [-3] <=> -3>,
<Concept {1sg, 1pl, 3sg, 3pl} <-> [-2] <=> -2>,
<Supremum {1sg, 1pl, 2sg, 2pl, 3sg, 3pl} <-> []>]
"""
return algorithms.iterunion([self], _sortkey, _next_concepts)
def downset(self,
_sortkey=operator.attrgetter('dindex'),
_next_concepts=operator.attrgetter('lower_neighbors')):
"""Yield subsumed concepts including ``self``.
Yields:
:class:`.Concept` instances.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> list(lattice['+1',].downset()) # doctest: +NORMALIZE_WHITESPACE
[<Concept {1sg, 1pl} <-> [+1 -2 -3] <=> +1>,
<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,
<Atom {1pl} <-> [+1 -2 -3 +pl -sg] <=> 1pl>,
<Infimum {} <-> [+1 -1 +2 -2 +3 -3 +sg +pl -sg -pl]>]
"""
return algorithms.iterunion([self], _sortkey, _next_concepts)
class OrderableMixin:
"""Concept implication and subsumption as order comparison operators."""
def implies(self, other: 'Concept') -> bool:
"""Implication comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` implies ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',] <= lattice['-3',] <= lattice['-3',] <= lattice[()]
True
>>> lattice['+1',] <= lattice['+sg',] or lattice['+sg',] <= lattice['+1',]
False
"""
return self._extent & other._extent == self._extent
__le__ = implies
def subsumes(self, other: 'Concept') -> bool:
"""Subsumption comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` subsumes ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',] >= lattice['+1', '+sg'] >= lattice['+1', '+sg'] >= lattice['+1', '-1']
True
>>> lattice['+1',] >= lattice['+sg',] or lattice['+sg',] >= lattice['+1',]
False
"""
return self._extent | other._extent == self._extent
__ge__ = subsumes
def properly_implies(self, other: 'Concept') -> bool:
"""Proper implication comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` properly implies ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',] < lattice['-3',] < lattice[()]
True
"""
return self._extent & other._extent == self._extent != other._extent
__lt__ = properly_implies
def properly_subsumes(self, other: 'Concept') -> bool:
"""Proper subsumption comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` properly subsumes ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',] > lattice['+1', '+sg'] > lattice['+1', '-1']
True
"""
return self._extent | other._extent == self._extent != other._extent
__gt__ = properly_subsumes
class TransformableMixin:
"""Concept join and meet as ``|`` and ``&`` operations."""
def join(self, other: 'Concept') -> 'Concept':
"""Least upper bound, supremum, or, generalization.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
:class:`.Concept` instance from the same lattice.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].join(lattice['+2',])
<Concept {1sg, 1pl, 2sg, 2pl} <-> [-3] <=> -3>
>>> lattice['+2',] | lattice['+1',]
<Concept {1sg, 1pl, 2sg, 2pl} <-> [-3] <=> -3>
"""
common = self._extent | other._extent
extent = self.lattice._context._extents.double(common)
return self.lattice._mapping[extent]
__or__ = join
def meet(self, other: 'Concept') -> 'Concept':
"""Greatest lower bound, infimum, and, unification.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
:class:`.Concept` instance from the same lattice.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['-1', '-2'].meet(lattice['-pl',])
<Atom {3sg} <-> [-1 -2 +3 +sg -pl] <=> 3sg>
>>> lattice['-pl',] & lattice['-1', '-2']
<Atom {3sg} <-> [-1 -2 +3 +sg -pl] <=> 3sg>
"""
common = self._extent & other._extent
extent = self.lattice._context._extents.double(common)
return self.lattice._mapping[extent]
__and__ = meet
class RelationsMixin:
"""Concept logical connective methods."""
def incompatible_with(self, other: 'Concept') -> bool:
"""Infimum meet comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` is incompatible with ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].incompatible_with(lattice['+3',])
True
>>> lattice['+1',].incompatible_with(lattice['+sg',])
False
"""
return not self._extent & other._extent
def complement_of(self, other: 'Concept') -> bool:
"""Infimum meet and supremum join comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` is the complement of ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].complement_of(lattice['-1',])
True
>>> lattice['+1',].complement_of(lattice['+3',])
False
"""
return (not self._extent & other._extent
and (self._extent | other._extent) == self.lattice.supremum._extent)
def subcontrary_with(self, other: 'Concept') -> bool:
"""Non-infimum meet and supremum join comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` is the subcontrary to ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['-1',].subcontrary_with(lattice['-3',])
True
>>> lattice['-1',].subcontrary_with(lattice['+sg',])
False
"""
return (self._extent & other._extent
and (self._extent | other._extent) == self.lattice.supremum._extent)
def orthogonal_to(self, other: 'Concept') -> bool:
"""Non-infimum meet, incomparable, and non-supremum join comparison.
Args:
other: :class:`.Concept` instance from the same lattice.
Returns:
bool: ``True`` if ``self`` is orthogonal to ``other`` else ``False``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].orthogonal_to(lattice['+sg',])
True
>>> lattice['+1',].orthogonal_to(lattice['+3',])
False
"""
meet = self._extent & other._extent
return (not not meet and meet != self._extent and meet != other._extent
and (self._extent | other._extent) != self.lattice.supremum._extent)
class Concept(RelationsMixin, TransformableMixin, OrderableMixin, Pair):
"""Formal concept as pair of extent and intent.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> concept = lattice['+1',]
>>> concept
<Concept {1sg, 1pl} <-> [+1 -2 -3] <=> +1>
>>> concept.index, concept.dindex
(7, 6)
>>> concept.objects, concept.properties
((), ('+1',))
>>> concept.atoms # doctest: +NORMALIZE_WHITESPACE
(<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,
<Atom {1pl} <-> [+1 -2 -3 +pl -sg] <=> 1pl>)
>>> concept.upper_neighbors # doctest: +NORMALIZE_WHITESPACE
(<Concept {1sg, 1pl, 2sg, 2pl} <-> [-3] <=> -3>,
<Concept {1sg, 1pl, 3sg, 3pl} <-> [-2] <=> -2>)
>>> concept.lower_neighbors # doctest: +NORMALIZE_WHITESPACE
(<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,
<Atom {1pl} <-> [+1 -2 -3 +pl -sg] <=> 1pl>)
"""
def minimal(self) -> typing.Tuple[str, ...]:
"""Shortlex minimal properties generating the concept.
Returns:
Property name strings.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice['+1',].minimal()
('+1',)
Note:
For :class:`.Infimum`, this returns **all** properties instead of
the first contradictory subset of properties.
"""
return self.lattice._context._minimal(self._extent,
self._intent).members()
def attributes(self) -> typing.Iterator[typing.Tuple[str]]:
"""Yield properties generating the concept in shortlex order.
Yields:
Tuples of property name strings.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> list(lattice['+1',].attributes())
[('+1',), ('+1', '-2'), ('+1', '-3'), ('-2', '-3'), ('+1', '-2', '-3')]
"""
minimize = self.lattice._context._minimize(self._extent, self._intent)
return (i.members() for i in minimize)
class Infimum(Concept):
"""Contradiction with empty ``extent`` and universal ``intent``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice.infimum
<Infimum {} <-> [+1 -1 +2 -2 +3 -3 +sg +pl -sg -pl]>
>>> lattice.infimum.index, lattice.infimum.dindex
(0, 21)
>>> lattice.infimum.objects, lattice.infimum.properties
((), ())
>>> lattice.infimum.atoms
()
>>> lattice.infimum.upper_neighbors # doctest: +NORMALIZE_WHITESPACE
(<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,
<Atom {1pl} <-> [+1 -2 -3 +pl -sg] <=> 1pl>,
<Atom {2sg} <-> [-1 +2 -3 +sg -pl] <=> 2sg>,
<Atom {2pl} <-> [-1 +2 -3 +pl -sg] <=> 2pl>,
<Atom {3sg} <-> [-1 -2 +3 +sg -pl] <=> 3sg>,
<Atom {3pl} <-> [-1 -2 +3 +pl -sg] <=> 3pl>)
>>> lattice.infimum.lower_neighbors
()
"""
def minimal(self) -> typing.Tuple[str, ...]:
"""Shortlex minimal properties generating the concept.
Returns:
Property name strings.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice.infimum.minimal()
('+1', '-1', '+2', '-2', '+3', '-3', '+sg', '+pl', '-sg', '-pl')
Note:
For :class:`.Infimum`, this returns **all** properties instead of
the first contradictory subset of properties.
"""
return self._intent.members()
class Atom(Concept):
"""Concept which is a minimal non-zero element in its lattice.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice.atoms # doctest: +NORMALIZE_WHITESPACE
(<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,
<Atom {1pl} <-> [+1 -2 -3 +pl -sg] <=> 1pl>,
<Atom {2sg} <-> [-1 +2 -3 +sg -pl] <=> 2sg>,
<Atom {2pl} <-> [-1 +2 -3 +pl -sg] <=> 2pl>,
<Atom {3sg} <-> [-1 -2 +3 +sg -pl] <=> 3sg>,
<Atom {3pl} <-> [-1 -2 +3 +pl -sg] <=> 3pl>)
>>> lattice.atoms[0].index, lattice.atoms[0].dindex
(1, 15)
>>> lattice.atoms[0].objects, lattice.atoms[0].properties
(('1sg',), ())
>>> lattice.atoms[0].atoms
(<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,)
>>> lattice.atoms[0].upper_neighbors # doctest: +NORMALIZE_WHITESPACE
(<Concept {1sg, 1pl} <-> [+1 -2 -3] <=> +1>,
<Concept {1sg, 2sg} <-> [-3 +sg -pl]>,
<Concept {1sg, 3sg} <-> [-2 +sg -pl]>)
>>> lattice.atoms[0].lower_neighbors # doctest: +NORMALIZE_WHITESPACE
(<Infimum {} <-> [+1 -1 +2 -2 +3 -3 +sg +pl -sg -pl]>,)
"""
class Supremum(Concept):
"""Tautology with universal ``extent`` and empty ``intent``.
Example:
>>> import concepts
>>> lattice = concepts.Context.fromstring(concepts.EXAMPLE).lattice
>>> lattice.supremum
<Supremum {1sg, 1pl, 2sg, 2pl, 3sg, 3pl} <-> []>
>>> lattice.supremum.index, lattice.supremum.dindex
(21, 0)
>>> lattice.supremum.objects, lattice.supremum.properties
((), ())
>>> lattice.supremum.atoms # doctest: +NORMALIZE_WHITESPACE
(<Atom {1sg} <-> [+1 -2 -3 +sg -pl] <=> 1sg>,
<Atom {1pl} <-> [+1 -2 -3 +pl -sg] <=> 1pl>,
<Atom {2sg} <-> [-1 +2 -3 +sg -pl] <=> 2sg>,
<Atom {2pl} <-> [-1 +2 -3 +pl -sg] <=> 2pl>,
<Atom {3sg} <-> [-1 -2 +3 +sg -pl] <=> 3sg>,
<Atom {3pl} <-> [-1 -2 +3 +pl -sg] <=> 3pl>)
>>> lattice.supremum.upper_neighbors
()
>>> lattice.supremum.lower_neighbors # doctest: +NORMALIZE_WHITESPACE
(<Concept {1sg, 1pl, 2sg, 2pl} <-> [-3] <=> -3>,
<Concept {1sg, 1pl, 3sg, 3pl} <-> [-2] <=> -2>,
<Concept {2sg, 2pl, 3sg, 3pl} <-> [-1] <=> -1>,
<Concept {1sg, 2sg, 3sg} <-> [+sg -pl] <=> +sg -pl>,
<Concept {1pl, 2pl, 3pl} <-> [+pl -sg] <=> +pl -sg>)
"""
|
import requests
from ..constants import SITES_URL, USERS_URL
class SiteService:
def __init__(self, vault):
self.vault = vault
def get_sites(self):
"""
get all sites
:return: dict
"""
request_url = self.vault.base_url + SITES_URL
headers = self.vault.get_auth_headers()
resp = requests.get(request_url, headers=headers).json()
return resp
def get_site(self, site_id):
"""
get a site by id
:param site_id: string uuid4
:return: dict
"""
endpoint = SITES_URL + '/' + site_id
request_url = self.vault.base_url + endpoint
headers = self.vault.get_auth_headers()
resp = requests.get(request_url, headers=headers).json()
return resp
def get_site_users(self, site_id):
"""
get all users of a site
:param site_id: string uuid4
:return: dict
"""
endpoint = SITES_URL + '/' + site_id + '/' + USERS_URL
request_url = self.vault.base_url + endpoint
headers = self.vault.get_auth_headers()
resp = requests.get(request_url, headers=headers).json()
return resp
def create_site(self, name, description):
"""
create a new site
:param name: string
:param description: string
:return: dict
"""
request_url = self.vault.base_url + SITES_URL
headers = self.vault.get_auth_headers()
payload = {
'name': name,
'description': description
}
resp = requests.post(request_url, headers=headers, data=payload).json()
return resp
def create_site_user(self, site_id, user_id, first_name, last_name, email, password, must_change_password=False,
send_email=False, password_never_expires=True):
"""
creates a new user for a site
:param site_id: string uuid4
:param user_id: string uuid4
:param first_name: string
:param last_name: string
:param email: string
:param password: string
:param must_change_password: bool, default False
:param send_email: bool, default False
:param password_never_expires: bool, default True
:return: dict
"""
endpoint = SITES_URL + '/' + site_id + '/' + USERS_URL
request_url = self.vault.base_url + endpoint
headers = self.vault.get_auth_headers()
payload = {
'userId': user_id,
'firstName': first_name,
'lastName': last_name,
'emailAddress': email,
'password': password,
'mustChangePassword': must_change_password,
'sendEmail': send_email,
'passwordNeverExpires': password_never_expires
}
resp = requests.post(request_url, headers=headers, data=payload).json()
return resp
|
class CacheException(Exception):
pass
class InvalidCachePackage(CacheException):
pass
class BadLinkError(CacheException):
"""
Error raised when a symbolic link is bad.
Can also arise when the link is possibly malicious.
"""
pass
class BadPathError(CacheException):
pass |
import pytest
from pygears.typing import Uint, Ufixp, Int, Tuple, Fixp, cast
def test_ufixp_type_cast():
assert cast(Ufixp[8, 16], Uint) == Uint[8]
assert cast(Ufixp[8, 16], Uint[16]) == Uint[16]
assert cast(Ufixp[16, 8], Uint) == Uint[16]
with pytest.raises(TypeError):
cast(Ufixp[8, 16], Uint[4])
with pytest.raises(TypeError):
cast(Ufixp[-1, 16], Uint)
def test_ufixp_value_cast():
assert cast(Ufixp[8, 16](2.15), Uint) == Uint[8](2)
assert cast(Ufixp[8, 16](2.15), Uint[16]) == Uint[16](2)
with pytest.raises(TypeError):
cast(Ufixp[-1, 16](0.15), Uint)
assert cast(Ufixp[-1, 16](0.15), Uint[16]) == Uint[16](0)
with pytest.raises(TypeError):
cast(Ufixp[8, 16](56.15), Uint[4])
def test_uint_type_cast():
assert cast(Uint[8], Uint) == Uint[8]
assert cast(Uint[8], Uint[16]) == Uint[16]
with pytest.raises(TypeError):
cast(Uint[16], Uint[8])
def test_uint_value_cast():
assert cast(Uint[8](128), Uint[16]) == Uint[16](128)
with pytest.raises(TypeError):
cast(Uint[16](128), Uint[4])
assert cast(2.15, Uint[4]) == Uint[4](2)
assert cast(15, Uint[4]) == Uint[4](15)
with pytest.raises(ValueError):
cast(27, Uint[4])
def test_unsupported_cast():
for t in [Int[6], Tuple[Int[2], Uint[2]], Fixp[1, 14]]:
with pytest.raises(TypeError):
cast(t, Uint)
|
from __future__ import absolute_import
from .base_datasource import DataSource
from .cuckoo_report import CuckooReport
from .darpa_tc_json import DARPATCJson
from .elasticsearch_qs import ElasticSearchQSSerach
from .fireeye_ax_report import FireEyeAXReport
from .hx_triage import HXTriage
from .memory import WindowsMemory
from .pcap import PCAP
from .procmon_csv import ProcmonCSV
from .splunk_spl import SplunkSPLSearch
from .sysmon_evtx import SysmonEVTX
from .virustotal import GenericVTSandbox, GenericVTSandboxAPI
from .win_evtx import WinEVTX
__all__ = [
"DataSource",
"SplunkSPLSearch",
"CuckooReport",
"FireEyeAXReport",
"HXTriage",
"WindowsMemory",
"ProcmonCSV",
"SysmonEVTX",
"PCAP",
"GenericVTSandbox",
"GenericVTSandboxAPI",
"WinEVTX",
"DARPATCJson",
"ElasticSearchQSSerach",
]
|
from .context import BM25Summarizer, Doc, tf_context, load_raw_corpus
import numpy as np
import pytest
@pytest.mark.skip()
@pytest.mark.parametrize("tf_type, result", [('binary', np.array([0.636, 0.364])),
('raw', np.array([0.636, 0.364])),
('freq', np.array([0.636, 0.3638])),
('log_norm', np.array([0.6357, 0.3638])),
('double_norm', np.array([0.636, 0.364]))])
def test_bm25(tf_type, result):
with tf_context(tf_type) as Doc2:
d = Doc2("Onu hiç sevmedim. Bu iş çok zor.")
assert BM25Summarizer().predict(d) == pytest.approx(result)
def test_bm25_corpus():
raw = load_raw_corpus()
for text in raw:
d = Doc(text)
assert np.sum(BM25Summarizer("log_norm", "smooth").predict(d)) == pytest.approx(1)
|
import xlrd
from itertools import product
def get_rows(filename, sheet, num_relays, relay_id):
#get num of rows
num_labels_rows = 3 -1 #it counts from 0
workbook = xlrd.open_workbook(filename)
worksheet = workbook.sheet_by_name(sheet)
num_rows = worksheet.nrows
num_clean_rows = num_rows - num_labels_rows
#compute rows for this relay
rows_per_relay = num_clean_rows / num_relays
lower_bound = rows_per_relay*relay_id + num_labels_rows + 1
upper_bound = rows_per_relay*(relay_id+1) + num_labels_rows
#upper_bound = 500
#add residual in the first relay
if ((num_relays -1)==relay_id):
residual = num_clean_rows - (rows_per_relay * num_relays)
upper_bound += residual
#print "from: " + str(lower_bound)
#print "to: " + str(upper_bound)
#get labels from these rows
rows = []
for row_index in xrange(worksheet.nrows):
tmp_row_lbl = worksheet.cell(row_index, 1).value #row label
if (row_index<=upper_bound and row_index>=lower_bound and not tmp_row_lbl == ''):
rows.append(tmp_row_lbl)
if ((num_relays-1)==relay_id):
rows.pop() #remove last line, with the average
return rows
#Fetch from the xls cells with matching labels
def read_xls_cell(filename, sheet, column_lbl_1, column_lbl_2, column_lbl_3, row_lbls=[]):
cells = []
workbook = xlrd.open_workbook(filename)
worksheet = workbook.sheet_by_name(sheet)
for row_index in xrange(worksheet.nrows):
for col_index in xrange(worksheet.ncols):
#row label
tmp_row_lbl = worksheet.cell(row_index, 1).value
#column label 1
tmp_counter = col_index
tmp_label = ''
while True:
tmp_label = worksheet.cell(0, tmp_counter).value
if tmp_label != '':
break
tmp_counter -= 1
tmp_col_lbl_1 = tmp_label
#column label 2
tmp_counter = col_index
tmp_label = ''
while True:
tmp_label = worksheet.cell(1, tmp_counter).value
if tmp_label != '':
break
tmp_counter -= 1
tmp_col_lbl_2 = tmp_label
#column label 3
tmp_counter = col_index
tmp_label = ''
while True:
tmp_label = worksheet.cell(2, tmp_counter).value
if tmp_label != '':
break
tmp_counter -= 1
#excel treats every num as float and adds .0
if isinstance(tmp_label, float):
tmp_col_lbl_3 = str(tmp_label)[:-2]
else:
tmp_col_lbl_3 = tmp_label
#print tmp_col_lbl_1,tmp_col_lbl_2,tmp_col_lbl_3,tmp_row_lbl
if (tmp_col_lbl_1 == column_lbl_1
and tmp_col_lbl_2 == column_lbl_2
and tmp_col_lbl_3 == column_lbl_3
and tmp_row_lbl in row_lbls): #if (row and columns labels) match was found
cells.append(int(worksheet.cell(row_index, col_index).value)) #add cell to list
#print int(worksheet.cell(row_index, col_index).value)
return cells
|
if not 0:
print("zero value int is False")
if not 0.0:
print("zero value float is False")
if not None:
print("None is False")
if not '':
print("empty string is False")
if not []:
print("empty list is False")
if not ():
print("empty tuple is False")
if not {}:
print("empty dict is False")
if not set():
print("empty set is False")
|
import logging
from abc import ABC, abstractmethod
from .coupler import NoCoupler, IdentityCoupler, coupler_map
from .coupler import AdditiveCoupler, SubtractiveCoupler, MultiplicativeCoupler
logger = logging.getLogger(__name__)
# =============================================================================
# =============================================================================
# =============================================================================
class IParameter(ABC):
"""Interface class to parameter objects"""
@property
@abstractmethod
def value(self):
raise NotImplementedError
@abstractmethod
def set_value(self, value):
raise NotImplementedError
@abstractmethod
def get_value(self):
raise NotImplementedError
@property
@abstractmethod
def bounds(self):
raise NotImplementedError
def to_contr(self, controller):
controller.add_parameters(self)
return self
def _textify(self):
name = 'Name: "{}"'.format(self.name)
value = '\tValue: {} ({})'.format(self._raw_val, self.value)
bounds = '\tBounds: {}'.format(self.bounds)
fit = '\tFit: {}'.format(self.fit)
coupled_func = '\tCoupling: {}'.format(self.coupler)
lst = [name, value, bounds, fit, coupled_func]
text = ''.join(lst)
return text
def __str__(self):
return self._textify()
def __repr__(self):
text = self._textify()
return 'class: {}, {}'.format(self.__class__.__name__, text)
class Parameter(IParameter):
"""
Basic building block of object oriented parameter treatment.
Keyword arguments:
name -- str
Works as identifier
value -- float
Provides the raw value of the Parameter, which might be modifying an
underlying base Parameter.
bounds -- tuple
Determines upper and lower bounds of fitting range. If
bounds_are_relative=True each tuple element is treated as a factor
upon 'value' attribute
fit -- Bool
Flag indicating whether parameter shall be fitted or not
coupler -- instance of Coupler Class
The Coupler must already be initialised and associated with a base
Parameter. self is then treated as a modifier of base.
bounds_are_relative -- Bool
As explained under bounds
"""
def __init__(self, name, value=None, bounds=None, fit=False, coupler=None,
bounds_are_relative=False):
self.name = name
self._raw_val = value
self._bounds = bounds
self.bounds_are_relative = bounds_are_relative
if type(coupler) == tuple:
identifier, base = coupler
_coupler = coupler_map[identifier]
self.coupler = _coupler(base)
else:
self.coupler = coupler or NoCoupler(modifier=self)
self.coupler.couple(self)
self._fit = None
self.fit = fit
@property
def bounds(self):
if self._bounds is None:
return self._bounds
if not self.bounds_are_relative:
return self._bounds
else:
return self._bounds[0] * self.value, self._bounds[1] * self.value
@bounds.setter
def bounds(self, new_bounds):
self._bounds = new_bounds
@property
def fit(self):
return self._fit
@fit.setter
def fit(self, value):
if (value is False) or (value is None):
self._fit = value
else:
if self.bounds is not None:
self._fit = value
else:
msg = (
f'Toggling fit=True only allowed on set bounds.',
f'(Parameter: {self.name})'
)
raise AttributeError(msg)
@property
def value(self):
logger.debug('calling Parameter.value')
return self.coupler.value
def set_value(self, value):
"""
Sets value of parameter.
If parameter is coupled, sets only the raw value, i.e. modifier value
NOT the coupled value as obtained from calling Parameter.get_value()
"""
self._raw_val = value
def get_value(self, no_coupling=False):
"""
Return parameter value
If parameter is coupled and no_coupling == True, return uncoupled value
"""
if no_coupling:
return self._raw_val
else:
return self.value
# ==============================================================================
# ==============================================================================
# ==============================================================================
class ReferenceParameter(IParameter):
"""
Is identical to a referenced parameter apart from its name
"""
def __init__(self, name, references):
"""
Creates parameter identical to reference apart form its name
:param name: Identifier of this parameter
:param references: Parameter that all other properties are shared with
"""
self.name = name
self.coupler = IdentityCoupler(references)
self.coupler.couple(self)
self._raw_val = None
@property
def value(self):
return self.coupler.value
def set_value(self, value):
msg = (
'Can not assign value to ReferenceParameter\n'
'Assign value to referenced parameter instead.'
)
raise TypeError(msg)
def get_value(self, no_coupling=False):
if no_coupling:
return self.coupler.base._raw_val
else:
return self.coupler.base.value
@property
def bounds(self):
return self.coupler.base.bounds
@bounds.setter
def bounds(self, bounds):
msg = (
'Can not assign to bounds attribute of ReferenceParameter.\n'
'Assign to "bounds" of referenced Parameter instead.'
)
raise TypeError(msg)
@property
def fit(self):
return None
@fit.setter
def fit(self, value):
msg = (
'Can not assign to "fit" attribute of ReferenceParameter.\n'
'Assign to "fit" of referenced Parameter instead.'
)
raise TypeError(msg)
# ==============================================================================
# ==============================================================================
# ==============================================================================
class ComplexParameter(Parameter):
"""
Composite object of two Parameter instances, each representing real and
imaginary part of a complex number.
All interaction should be limited to real- and imag-attributes.
Returns a complex number, given by the values of real- and imaginary
components
"""
def __init__(self, name, real_part, imag_part=None):
"""
:param name: Identifier of this parameter
:param real_part: Parameter representing real part of complex number
:param imag_part: Parameter representing imaginary part of complex number
"""
super().__init__(name)
self.real = real_part
self.imag = imag_part or Parameter(name='imag', value=0.)
@property
def value(self):
return self.real.value + 1J * self.imag.value
def set_value(self, value):
raise TypeError(
'Use set_value() method of attributes "real" and "imag" '
'instead of ComplexParameter.')
def get_value(self, no_coupling=False):
if no_coupling:
return self.real._raw_val + 1J * self.imag._raw_val
else:
return self.value
@property
def bounds(self):
raise TypeError(
'Use "bounds" attribute of "real" and "imag" attributes of '
'ComplexParameter instance.'
)
# ==============================================================================
# ==============================================================================
# ==============================================================================
class ScatteringFactorParameter(Parameter):
"""
Construct of 2 to 4 Parameter instances representing real- and imaginary-,
charge- and magnetic- parts of a scattering element.
"""
def __init__(self, name,
f_charge_real, f_charge_imag,
f_magn_real=None, f_magn_imag=None,
return_mode='full'):
"""
:param name: Identifier of this Parameter
:param f_charge_real: instance of Parameter class
Imaginary part of the charge component of the scattering factor
:param f_charge_imag: instance of Parameter class
Imaginary part of the charge component of the scattering factor
:param f_magn_real: f_magn_real -- instance of Parameter class
Real part of magnetic contribution to scattering factor
:param f_magn_imag: instance of Parameter class
Imaginary part of magnetic contribution to scattering factor
:param return_mode: str, one of 'full', 'charge', 'magn', '+', '-'
Indicates return mode of the scattering factor. Might be only charge,
only magnetic, or adding or subtracting the magnetic contribution,
"""
super().__init__(name)
self.f_ch_r = f_charge_real
self.f_ch_i = f_charge_imag
self.f_m_r = f_magn_real or Parameter('f_mag', 0.)
self.f_m_i = f_magn_imag or Parameter('f_mag', 0.)
self.return_mode = return_mode
def set_return_mode(self, return_mode):
self.return_mode = return_mode
@property
def value(self):
logger.debug(f'Calling ScatteringFactorPara: mode={self.return_mode}')
if self.return_mode == 'full':
return self.f_ch_r.value + self.f_m_r.value \
+ 1J * (self.f_ch_i.value + self.f_m_i.value)
elif self.return_mode in ['charge', 'c']:
return self.f_ch_r.value + 1J * self.f_ch_i.value
elif self.return_mode in ['magn', 'mag', 'magnetic', 'm']:
return self.f_m_r.value + 1J * self.f_m_i.value
elif self.return_mode in ['+', 'plus']:
return (self.f_ch_r.value + self.f_m_r.value) \
+ (self.f_ch_i.value + self.f_m_i.value) * 1J
elif self.return_mode in ['-', 'minus']:
return (self.f_ch_r.value - self.f_m_r.value) \
+ (self.f_ch_i.value - self.f_m_i.value) * 1J
else:
raise NameError('ScatteringFactorParameter return mode unknown.')
def get_value(self):
return self.value
# ==============================================================================
# ==============================================================================
# ==============================================================================
class ParameterGroup(Parameter):
"""
Turns a set of ordered parameters into an iterable (list)
Upon calling "value", return a list of the underlying parameter-values
"""
def __init__(self, group_name, *parameters):
"""
:param group_name: Identifier of this Parameter group
:param parameters: Parameters to join the group in the given order
"""
self.name = group_name
self.group = [p for p in parameters]
self.fit = None
@property
def value(self):
"""
:return: List of values of the individual parameters of the group
"""
return [p.value for p in self.group]
def set_value(self, value):
raise TypeError(
'Use set_value() method of constituent parameters '
'instead of ParameterGroup instance.')
def get_value(self, no_coupling=False):
if no_coupling:
return [p._raw_val for p in self.group]
else:
return self.value
@property
def bounds(self):
raise TypeError(
'Use "bounds" attribute of constituent parameters '
'instead of ParameterGroup instance.'
)
def __repr__(self):
name = 'Name: {}(ParameterGroup)'.format(self.name)
return " ".join([name] + [str(p.value) for p in self.group])
|
#!/usr/bin/env python3
import numpy as np
import rospy
import rospkg
import os
import yaml
from cv_bridge import CvBridge
import debugpy
debugpy.listen(("localhost", 5678))
from duckietown.dtros import DTROS, NodeType, TopicType, DTParam, ParamType
from sensor_msgs.msg import CompressedImage, Image
from geometry_msgs.msg import Point as PointMsg
from duckietown_msgs.msg import SegmentList, Segment, Twist2DStamped, LanePose, WheelsCmdStamped, BoolStamped, FSMState, StopLineReading, AntiInstagramThresholds
from image_processing.anti_instagram import AntiInstagram
from image_processing.ground_projection_geometry import GroundProjectionGeometry, Point
import cv2
from object_detection.model import Wrapper
from cv_bridge import CvBridge
import time
class ObjectDetectionNode(DTROS):
#def __init__(self, node_name, model_type="bezier"):
def __init__(self, node_name, model_type="segmentation"):
# Initialize the DTROS parent class
super(ObjectDetectionNode, self).__init__(
node_name=node_name,
node_type=NodeType.PERCEPTION
)
self.duckie_alert = False
self.duckies_around= False
self.model_type = model_type
self.duckie_location = None
self.duckie_timer = 0
if self.model_type=="bezier":
self.height=320
self.width=240
elif self.model_type=="segmentation":
self.height=160
self.width=120
else:
raise ValueError(f"Unsuported model type: {model_type}")
# Construct publishers
self.pub_obj_dets = rospy.Publisher(
"~duckie_detected_hack",
BoolStamped,
queue_size=1,
dt_topic_type=TopicType.PERCEPTION
)
# Construct publishers
#self.pub_obj_dets = rospy.Publisher(
# "~duckie_detected",
# BoolStamped,
# queue_size=1,
# dt_topic_type=TopicType.PERCEPTION
#)
# Construct subscribers
self.sub_image = rospy.Subscriber(
"~image/compressed",
CompressedImage,
self.image_cb,
buff_size=10000000,
queue_size=1
)
self.sub_thresholds = rospy.Subscriber(
"~thresholds",
AntiInstagramThresholds,
self.thresholds_cb,
queue_size=1
)
self.pub_seglist_filtered = rospy.Publisher("~seglist_filtered",
SegmentList,
queue_size=1,
dt_topic_type=TopicType.DEBUG)
self.pub_segmented_img = rospy.Publisher("~debug/segmented_image/compressed",
CompressedImage,
queue_size=1,
dt_topic_type=TopicType.DEBUG)
self.ai_thresholds_received = False
self.anti_instagram_thresholds=dict()
self.ai = AntiInstagram()
self.bridge = CvBridge()
#model_file = rospy.get_param('~model_file','.')
rospack = rospkg.RosPack()
#model_file_absolute = rospack.get_path('object_detection') + model_file
self.model_wrapper = Wrapper(self.model_type)
self.homography = self.load_extrinsics()
homography = np.array(self.homography).reshape((3, 3))
self.bridge = CvBridge()
self.gpg = GroundProjectionGeometry(160,120, homography)
self.initialized = True
self.log("Initialized!")
def thresholds_cb(self, thresh_msg):
self.anti_instagram_thresholds["lower"] = thresh_msg.low
self.anti_instagram_thresholds["higher"] = thresh_msg.high
self.ai_thresholds_received = True
def image_cb(self, image_msg):
if not self.initialized:
return
# TODO to get better hz, you might want to only call your wrapper's predict function only once ever 4-5 images?
# This way, you're not calling the model again for two practically identical images. Experiment to find a good number of skipped
# images.
# Decode from compressed image with OpenCV
try:
image = self.bridge.compressed_imgmsg_to_cv2(image_msg)
except ValueError as e:
self.logerr('Could not decode image: %s' % e)
return
# Perform color correction
if self.ai_thresholds_received:
image = self.ai.apply_color_balance(
self.anti_instagram_thresholds["lower"],
self.anti_instagram_thresholds["higher"],
image
)
#image = cv2.resize(image, (224,224))
# img_small = cv2.resize(image, (160,120))
# self.model_wrapper.segment_cv2_image(img_small)
# img_small = cv2.resize(image, (160, 120))
img_reg = cv2.resize(image, (self.height,self.width))
self.model_wrapper.segment_cv2_image(img_reg)
seg_img = self.model_wrapper.get_seg()
yellow_segments_px = self.model_wrapper.get_yellow_segments_px() ###
white_segments_px = self.model_wrapper.get_white_segments_px() ###
right_bezier_segments_px = self.model_wrapper.get_right_bezier_px()
# left_bezier_segments_px = self.model_wrapper.get_left_bezier_px()
#ground project segments
yellow_segments = self.ground_project_segments_px(yellow_segments_px)
white_segments = self.ground_project_segments_px(white_segments_px, right_only=True)
bezier_segments = self.ground_project_segments_px(right_bezier_segments_px)
self.lookout_for_duckies()
seg_msg = SegmentList()
seg_msg.header = image_msg.header
self.add_segments(yellow_segments, seg_msg, Segment.YELLOW)
self.add_segments(white_segments, seg_msg, Segment.WHITE)
# no other color besides yellow, white and red, so using red for now, as it is not being used for the moment
self.add_segments(bezier_segments, seg_msg, Segment.RED)
self.pub_seglist_filtered.publish(seg_msg)
bgr = np.zeros((seg_img.shape[0], seg_img.shape[1], 3))
if self.model_type=="bezier":
bgr[(seg_img == 0)] = np.array([0, 0, 0]).astype(int)
bgr[(seg_img == 1)] = np.array([255, 255, 255]).astype(int)
bgr[(seg_img == 2)] = np.array([255, 255, 0]).astype(int)
bgr[(seg_img == 3)] = np.array([255, 0, 0]).astype(int)
bgr[(seg_img == 4)] = np.array([0, 0, 255]).astype(int)
bgr[(seg_img == 5)] = np.array([0, 255, 0]).astype(int)
else:
bgr[(seg_img == 0)] = np.array([0, 0, 0]).astype(int)
bgr[(seg_img == 2)] = np.array([255, 255, 255]).astype(int)
bgr[(seg_img == 1)] = np.array([0, 255, 255]).astype(int)
bgr[(seg_img == 3)] = np.array([0, 0, 255]).astype(int)
# segmented_img_cv = cv2.applyColorMap(self.model_wrapper.seg*64, cv2.COLORMAP_JET)
segmented_img = self.bridge.cv2_to_compressed_imgmsg(bgr)
segmented_img.header.stamp = image_msg.header.stamp
self.pub_segmented_img.publish(segmented_img)
print(f"Found {len(right_bezier_segments_px)} bezier segments")
bboxes, classes, scores = self.model_wrapper.predict(image)
msg = BoolStamped()
msg.header = image_msg.header
msg.data = self.duckie_alert
if self.duckie_alert:
self.log(f"Warning Duckie Citizen Ahead! Location = {self.duckie_location}")
self.pub_obj_dets.publish(msg)
def add_segments(self, yellow_segments, seg_msg, color):
for yellow_segment in yellow_segments:
new_segment = Segment()
ground_pt_msg_1 = PointMsg()
ground_pt_msg_1.z=0
ground_pt_msg_1.x=yellow_segment[0][0]
ground_pt_msg_1.y=yellow_segment[0][1]
ground_pt_msg_2 = PointMsg()
ground_pt_msg_2.z=0
ground_pt_msg_2.x=yellow_segment[1][0]
ground_pt_msg_2.y=yellow_segment[1][1]
new_segment.points[0] = ground_pt_msg_1
new_segment.points[1] = ground_pt_msg_2
new_segment.color = color
seg_msg.segments.append(new_segment)
def lookout_for_duckies(self):
nearest_duckies_px = self.model_wrapper.get_nearest_duckies_px()
ped_distance = rospy.get_param("ped_distance",0.4)
ped_left = -rospy.get_param("ped_left",0.15)
ped_right = rospy.get_param("ped_right",0.15)
if time.time() > self.duckie_timer + rospy.get_param("ped_timeout",5):
self.duckie_alert = False #We almost killed a dukie. We take a break to think about it.
self.duckies_around = False
self.duckie_location = None
nearest_duckies = self.ground_project_segments_px(nearest_duckies_px)
for duckie_segment in nearest_duckies:
#There is some duckies around!
self.duckies_around=True
pt1 = duckie_segment[0]
pt2 = duckie_segment[1]
for pt in [pt1, pt2]:
x = pt[0]
y = pt[1]
#Distance in front of the Duckieboty
#Distance left/right of the Duckiebot
if y > ped_left and y < ped_right:
#There is a duckie bot in front of us!
if x < ped_distance:
# We're getting to close!
self.duckie_alert=True
self.duckie_location = (x,y)
self.duckie_timer = time.time()
def ground_project_segments_px(self, segments_px, right_only=False, xmin=0.0, xmax=1):
x=[]
y=[]
segments=[]
for segment_px in segments_px:
if self.model_type=="bezier":
pixel1 = Point(segment_px[0][0]*2,segment_px[0][1]*2) #Conversion. Points are converted in 640x480 for the homography to work
pixel2 = Point(segment_px[1][0]*2,segment_px[1][1]*2) #Conversion. Points are converted in 640x480 for the homography to work
else:
pixel1 = Point(segment_px[0][0]*4,segment_px[0][1]*4) #Conversion. Points are converted in 640x480 for the homography to work
pixel2 = Point(segment_px[1][0]*4,segment_px[1][1]*4) #Conversion. Points are converted in 640x480 for the homography to work
ground_projected_point1 = self.gpg.pixel2ground(pixel1)
ground_projected_point2 = self.gpg.pixel2ground(pixel2)
pt1 = (ground_projected_point1.x, ground_projected_point1.y)
pt2 = (ground_projected_point2.x, ground_projected_point2.y)
segment = (pt1,pt2)
if right_only: #For the white line, we assume it is right of the duckie.
if pt1[1] > 0 or pt2[1] > 0:
continue
if pt1[0] < xmin or pt2[0] < xmin: #Not to close to the duckiebot.
continue
if pt1[0] > xmax or pt2[0] > xmax: #Neither too far!
continue
segments.append(segment)
return segments
def det2bool(self, bboxes, classes):
# TODO remove these debugging prints
print(bboxes)
print(classes)
# This is a dummy solution, remove this next line
return len(bboxes) > 1
# TODO filter the predictions: the environment here is a bit different versus the data collection environment, and your model might output a bit
# of noise. For example, you might see a bunch of predictions with x1=223.4 and x2=224, which makes
# no sense. You should remove these.
# TODO also filter detections which are outside of the road, or too far away from the bot. Only return True when there's a pedestrian (aka a duckie)
# in front of the bot, which you know the bot will have to avoid. A good heuristic would be "if centroid of bounding box is in the center of the image,
# assume duckie is in the road" and "if bouding box's area is more than X pixels, assume duckie is close to us"
obj_det_list = []
for i in range(len(bboxes)):
x1, y1, x2, y2 = bboxes[i]
label = classes[i]
# TODO if label isn't a duckie, skip
# TODO if detection is a pedestrian in front of us:
# return True
def load_extrinsics(self):
"""
Loads the homography matrix from the extrinsic calibration file.
Returns:
:obj:`numpy array`: the loaded homography matrix
"""
# load intrinsic calibration
cali_file_folder = '/data/config/calibrations/camera_extrinsic/'
cali_file = cali_file_folder + rospy.get_namespace().strip("/") + ".yaml"
# Locate calibration yaml file or use the default otherwise
if not os.path.isfile(cali_file):
self.log("Can't find calibration file: %s.\n Using default calibration instead."
% cali_file, 'warn')
cali_file = (cali_file_folder + "default.yaml")
# Shutdown if no calibration file not found
if not os.path.isfile(cali_file):
msg = 'Found no calibration file ... aborting'
self.log(msg, 'err')
rospy.signal_shutdown(msg)
try:
with open(cali_file,'r') as stream:
calib_data = yaml.load(stream)
except yaml.YAMLError:
msg = 'Error in parsing calibration file %s ... aborting' % cali_file
self.log(msg, 'err')
rospy.signal_shutdown(msg)
return calib_data['homography']
if __name__ == "__main__":
# Initialize the node
object_detection_node = ObjectDetectionNode(node_name='object_detection_node')
# Keep it spinning
rospy.spin()
|
"""Unit tests for editing pre and post bash scripts, comments, etc."""
from __future__ import absolute_import
import os
import unittest
from click.testing import CliRunner
from aiida.cmdline.utils.multi_line_input import edit_pre_post, edit_comment
class TestMultilineInput(unittest.TestCase):
"""Test functions for editing pre and post bash scripts, comments, etc."""
def setUp(self):
## Sleep 1 is needed because on some filesystems (e.g. some pre 10.13 Mac) the
## filesystem returns the time with a precision of 1 second, and
## click uses the timestamp to decide if the file was re-saved or not.
editor_cmd = 'sleep 1 ; vim -c "%s/$/Test/g" -cwq' # appends Test to every line
os.environ['EDITOR'] = editor_cmd
os.environ['VISUAL'] = editor_cmd
self.runner = CliRunner()
def test_pre_post(self):
result = edit_pre_post(summary={'Param 1': 'Value 1', 'Param 2': 'Value 1'})
self.assertEqual(result[0], 'Test\nTest\nTest')
self.assertEqual(result[1], 'Test\nTest\nTest')
def test_edit_pre_post(self):
result = edit_pre_post(pre='OldPre', post='OldPost')
self.assertEqual(result[0], 'Test\nOldPreTest\nTest')
self.assertEqual(result[1], 'Test\nOldPostTest\nTest')
def test_new_comment(self):
new_comment = edit_comment()
self.assertEqual(new_comment, 'Test')
def test_edit_comment(self):
old_comment = 'OldComment'
new_comment = edit_comment(old_cmt=old_comment)
self.assertEqual(new_comment, old_comment + 'Test')
|
import os.path as op
from nose.tools import eq_, ok_
from flask_admin.contrib import fileadmin
from flask_admin import Admin
from flask import Flask
from . import setup
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def create_view():
app, admin = setup()
class MyFileAdmin(fileadmin.FileAdmin):
editable_extensions = ('txt',)
path = op.join(op.dirname(__file__), 'files')
view = MyFileAdmin(path, '/files/', name='Files')
admin.add_view(view)
return app, admin, view
def test_file_admin():
app, admin, view = create_view()
client = app.test_client()
# index
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
# edit
rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/edit/?path=dummy.txt', data=dict(
content='new_string'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/edit/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
ok_('new_string' in rv.data.decode('utf-8'))
# rename
rv = client.get('/admin/myfileadmin/rename/?path=dummy.txt')
eq_(rv.status_code, 200)
ok_('dummy.txt' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/rename/?path=dummy.txt', data=dict(
name='dummy_renamed.txt',
path='dummy.txt'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
ok_('path=dummy.txt' not in rv.data.decode('utf-8'))
# upload
rv = client.get('/admin/myfileadmin/upload/')
eq_(rv.status_code, 200)
rv = client.post('/admin/myfileadmin/upload/', data=dict(
upload=(StringIO(""), 'dummy.txt'),
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
ok_('path=dummy_renamed.txt' in rv.data.decode('utf-8'))
# delete
rv = client.post('/admin/myfileadmin/delete/', data=dict(
path='dummy_renamed.txt'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed.txt' not in rv.data.decode('utf-8'))
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
# mkdir
rv = client.get('/admin/myfileadmin/mkdir/')
eq_(rv.status_code, 200)
rv = client.post('/admin/myfileadmin/mkdir/', data=dict(
name='dummy_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
ok_('path=dummy_dir' in rv.data.decode('utf-8'))
# rename - directory
rv = client.get('/admin/myfileadmin/rename/?path=dummy_dir')
eq_(rv.status_code, 200)
ok_('dummy_dir' in rv.data.decode('utf-8'))
rv = client.post('/admin/myfileadmin/rename/?path=dummy_dir', data=dict(
name='dummy_renamed_dir',
path='dummy_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed_dir' in rv.data.decode('utf-8'))
ok_('path=dummy_dir' not in rv.data.decode('utf-8'))
# delete - directory
rv = client.post('/admin/myfileadmin/delete/', data=dict(
path='dummy_renamed_dir'
))
eq_(rv.status_code, 302)
rv = client.get('/admin/myfileadmin/')
eq_(rv.status_code, 200)
ok_('path=dummy_renamed_dir' not in rv.data.decode('utf-8'))
ok_('path=dummy.txt' in rv.data.decode('utf-8'))
def test_modal_edit():
# bootstrap 2 - test edit_modal
app_bs2 = Flask(__name__)
admin_bs2 = Admin(app_bs2, template_mode="bootstrap2")
class EditModalOn(fileadmin.FileAdmin):
edit_modal = True
editable_extensions = ('txt',)
class EditModalOff(fileadmin.FileAdmin):
edit_modal = False
editable_extensions = ('txt',)
path = op.join(op.dirname(__file__), 'files')
edit_modal_on = EditModalOn(path, '/files/', endpoint='edit_modal_on')
edit_modal_off = EditModalOff(path, '/files/', endpoint='edit_modal_off')
admin_bs2.add_view(edit_modal_on)
admin_bs2.add_view(edit_modal_off)
client_bs2 = app_bs2.test_client()
# bootstrap 2 - ensure modal window is added when edit_modal is enabled
rv = client_bs2.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 2 - test edit modal disabled
rv = client_bs2.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
# bootstrap 3
app_bs3 = Flask(__name__)
admin_bs3 = Admin(app_bs3, template_mode="bootstrap3")
admin_bs3.add_view(edit_modal_on)
admin_bs3.add_view(edit_modal_off)
client_bs3 = app_bs3.test_client()
# bootstrap 3 - ensure modal window is added when edit_modal is enabled
rv = client_bs3.get('/admin/edit_modal_on/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' in data)
# bootstrap 3 - test modal disabled
rv = client_bs3.get('/admin/edit_modal_off/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('fa_modal_window' not in data)
|
import requests
from requests_html import HTML
import time
def parse_data(html):
summary_stats = html.find(".js-post-summary-stats")
summary_content = html.find(".s-post-summary--content")
posts_lst = []
for i in range(len(summary_content)):
post_dict = {}
post_dict["votes"] = summary_stats[i].text.split("\n")[0].split("v")[0]
post_dict["answers"] = summary_stats[i].text.split("\n")[1].split("a")[0]
post_dict["views"] = summary_stats[i].text.split("\n")[2].split("v")[0]
posts_dict["question_title"] = summary_content[i].text.split("\n")[0]
post_dict["question"] = summary_content[i].text.split("\n")[1]
post_dict["tags"] = summary_content[i].text.split("\n")[2]
post_dict["user"] = summary_content[i].text.split("\n")[4]
post_dict["user_reputation"] = summary_content[i].text.split("\n")[5]
post_dict["time_asked"] = summary_content[i].text.split("\n")[-1]
posts_lst.append(post_dict)
return posts_lst
def extract_data_from_url(url):
r = requests.get(url)
if r.status_code not in range(200,300):
return []
html_text = r.text
html = HTML(html=html_text)
data = parse_data(html)
return data
def scrape_tag(tag='python', query_filter='Votes', max_pages=50, pagesize=25):
base_url = 'https://stackoverflow.com/questions/tagged/'
data_ = []
for p in range(max_pages):
page_num = p + 1
# https://stackoverflow.com/questions/tagged/python?tab=votes&page=5&pagesize=15
url = f'{base_url}{tag}?tab={query_filter}&page={page_num}&pagesize={pagesize}'
data_ += extract_data_from_url(url)
time.sleep(1.2)
return data_ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.